Merge branch 'sched/urgent' into sched/core, to pick up fixes
authorIngo Molnar <mingo@kernel.org>
Mon, 27 Jun 2016 09:35:02 +0000 (11:35 +0200)
committerIngo Molnar <mingo@kernel.org>
Mon, 27 Jun 2016 09:35:02 +0000 (11:35 +0200)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
466 files changed:
.mailmap
CREDITS
Documentation/ABI/testing/configfs-usb-gadget-uvc
Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935
Documentation/leds/leds-class.txt
Documentation/scsi/scsi_eh.txt
MAINTAINERS
Makefile
arch/Kconfig
arch/alpha/include/asm/pgalloc.h
arch/arc/include/asm/pgalloc.h
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am437x-sk-evm.dts
arch/arm/boot/dts/am57xx-idk-common.dtsi
arch/arm/boot/dts/dm8148-evm.dts
arch/arm/boot/dts/dm8148-t410.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra74x.dtsi
arch/arm/boot/dts/exynos5250-snow-common.dtsi
arch/arm/boot/dts/exynos5420-peach-pit.dts
arch/arm/boot/dts/omap3-evm-37xx.dts
arch/arm/boot/dts/omap3-igep.dtsi
arch/arm/boot/dts/omap3-igep0020-common.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap3-n950-n9.dtsi
arch/arm/boot/dts/omap3-zoom3.dts
arch/arm/boot/dts/omap5-board-common.dtsi
arch/arm/boot/dts/omap5-igep0050.dts
arch/arm/boot/dts/omap5-uevm.dts
arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
arch/arm/boot/dts/stih407-family.dtsi
arch/arm/boot/dts/sun6i-a31s-primo81.dts
arch/arm/boot/dts/sun6i-a31s-yones-toptech-bs1078-v2.dts
arch/arm/configs/exynos_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/include/asm/pgalloc.h
arch/arm/include/asm/pgtable-2level.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/pgtable.h
arch/arm/kernel/smp.c
arch/arm/mach-exynos/Kconfig
arch/arm/mach-imx/mach-imx6ul.c
arch/arm/mach-omap1/ams-delta-fiq-handler.S
arch/arm/mach-omap1/ams-delta-fiq.c
arch/arm/mach-omap1/include/mach/ams-delta-fiq.h
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/omap-secure.h
arch/arm/mach-omap2/omap-smp.c
arch/arm/mach-omap2/powerdomain.c
arch/arm/mach-omap2/powerdomains7xx_data.c
arch/arm/mach-omap2/timer.c
arch/arm/plat-samsung/devs.c
arch/arm64/Makefile
arch/arm64/boot/dts/lg/lg1312.dtsi
arch/arm64/boot/dts/rockchip/rk3399.dtsi
arch/arm64/include/asm/kgdb.h
arch/arm64/include/asm/pgalloc.h
arch/arm64/include/asm/smp.h
arch/arm64/include/asm/spinlock.h
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/kgdb.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/traps.c
arch/arm64/mm/context.c
arch/arm64/mm/fault.c
arch/arm64/mm/flush.c
arch/avr32/include/asm/pgalloc.h
arch/cris/include/asm/pgalloc.h
arch/frv/mm/pgalloc.c
arch/hexagon/include/asm/pgalloc.h
arch/ia64/Kconfig
arch/ia64/include/asm/thread_info.h
arch/ia64/kernel/init_task.c
arch/m68k/include/asm/mcf_pgalloc.h
arch/m68k/include/asm/motorola_pgalloc.h
arch/m68k/include/asm/sun3_pgalloc.h
arch/metag/include/asm/pgalloc.h
arch/microblaze/include/asm/pgalloc.h
arch/microblaze/mm/pgtable.c
arch/mips/include/asm/kvm_host.h
arch/mips/include/asm/pgalloc.h
arch/mips/kvm/emulate.c
arch/mips/kvm/interrupt.h
arch/mips/kvm/locore.S
arch/mips/kvm/mips.c
arch/mn10300/include/asm/thread_info.h
arch/mn10300/kernel/kgdb.c
arch/mn10300/mm/pgtable.c
arch/nios2/include/asm/pgalloc.h
arch/openrisc/include/asm/pgalloc.h
arch/openrisc/mm/ioremap.c
arch/parisc/include/asm/pgalloc.h
arch/powerpc/Kconfig
arch/powerpc/include/asm/book3s/32/pgalloc.h
arch/powerpc/include/asm/book3s/64/mmu-hash.h
arch/powerpc/include/asm/book3s/64/pgalloc.h
arch/powerpc/include/asm/book3s/64/radix.h
arch/powerpc/include/asm/book3s/64/tlbflush-radix.h
arch/powerpc/include/asm/book3s/64/tlbflush.h
arch/powerpc/include/asm/book3s/pgalloc.h
arch/powerpc/include/asm/nohash/64/pgalloc.h
arch/powerpc/kernel/eeh_driver.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/mmu_context_book3s64.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/mm/pgtable_32.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/tlb-radix.c
arch/s390/include/asm/kvm_host.h
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kvm/intercept.c
arch/s390/kvm/kvm-s390.c
arch/s390/mm/pgalloc.c
arch/s390/mm/pgtable.c
arch/score/include/asm/pgalloc.h
arch/sh/include/asm/pgalloc.h
arch/sh/mm/pgtable.c
arch/sparc/include/asm/pgalloc_64.h
arch/sparc/mm/init_64.c
arch/tile/include/asm/thread_info.h
arch/tile/kernel/process.c
arch/tile/mm/pgtable.c
arch/um/kernel/mem.c
arch/unicore32/include/asm/pgalloc.h
arch/x86/Kconfig
arch/x86/include/asm/kprobes.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/pgalloc.h
arch/x86/include/asm/stacktrace.h
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack_32.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/espfix_64.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/kprobes/core.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/mm/pgtable.c
arch/x86/platform/efi/efi_64.c
arch/x86/xen/mmu.c
arch/x86/xen/p2m.c
arch/xtensa/include/asm/pgalloc.h
drivers/acpi/acpica/exconfig.c
drivers/acpi/acpica/hwregs.c
drivers/acpi/acpica/nsparse.c
drivers/ata/libata-eh.c
drivers/base/Makefile
drivers/base/isa.c
drivers/base/module.c
drivers/base/power/opp/cpu.c
drivers/base/power/opp/of.c
drivers/base/power/opp/opp.h
drivers/block/aoe/aoecmd.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/pcc-cpufreq.c
drivers/devfreq/devfreq.c
drivers/devfreq/event/exynos-nocp.c
drivers/dma/at_xdmac.c
drivers/dma/mv_xor.c
drivers/extcon/extcon-palmas.c
drivers/gpio/Kconfig
drivers/gpio/gpio-104-idi-48.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/cik.c
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
drivers/gpu/drm/amd/amdgpu/vi.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/include/atombios.h
drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c
drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h
drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h
drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h
drivers/gpu/drm/amd/powerplay/inc/smu74.h
drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h
drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
drivers/gpu/drm/drm_atomic.c
drivers/gpu/drm/drm_crtc_helper.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/etnaviv/etnaviv_iommu.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_dp.c
drivers/gpu/drm/exynos/exynos_drm_core.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_g2d.c
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_dp_link_training.c
drivers/gpu/drm/i915/intel_dpll_mgr.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_fbc.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_vbt_defs.h
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/sun4i/Kconfig
drivers/gpu/drm/sun4i/sun4i_backend.c
drivers/gpu/drm/sun4i/sun4i_dotclock.c
drivers/gpu/drm/sun4i/sun4i_drv.c
drivers/gpu/drm/sun4i/sun4i_rgb.c
drivers/gpu/drm/sun4i/sun4i_tcon.c
drivers/hid/hid-elo.c
drivers/hid/hid-multitouch.c
drivers/hid/usbhid/hiddev.c
drivers/hwmon/dell-smm-hwmon.c
drivers/hwtracing/coresight/coresight-tmc-etr.c
drivers/hwtracing/coresight/coresight.c
drivers/iio/accel/st_accel_buffer.c
drivers/iio/accel/st_accel_core.c
drivers/iio/common/st_sensors/st_sensors_buffer.c
drivers/iio/common/st_sensors/st_sensors_core.c
drivers/iio/common/st_sensors/st_sensors_trigger.c
drivers/iio/dac/Kconfig
drivers/iio/dac/ad5592r-base.c
drivers/iio/gyro/st_gyro_buffer.c
drivers/iio/gyro/st_gyro_core.c
drivers/iio/humidity/am2315.c
drivers/iio/humidity/hdc100x.c
drivers/iio/imu/bmi160/bmi160_core.c
drivers/iio/industrialio-trigger.c
drivers/iio/light/apds9960.c
drivers/iio/light/bh1780.c
drivers/iio/light/max44000.c
drivers/iio/magnetometer/st_magn_buffer.c
drivers/iio/magnetometer/st_magn_core.c
drivers/iio/pressure/bmp280.c
drivers/iio/pressure/st_pressure_buffer.c
drivers/iio/pressure/st_pressure_core.c
drivers/iio/proximity/as3935.c
drivers/infiniband/core/cache.c
drivers/infiniband/core/cma.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/hfi1/chip.c
drivers/infiniband/hw/hfi1/file_ops.c
drivers/infiniband/hw/hfi1/init.c
drivers/infiniband/hw/hfi1/mad.c
drivers/infiniband/hw/hfi1/mad.h
drivers/infiniband/hw/hfi1/pio.c
drivers/infiniband/hw/hfi1/qsfp.c
drivers/infiniband/hw/hfi1/verbs_txreq.c
drivers/infiniband/hw/hfi1/verbs_txreq.h
drivers/infiniband/hw/i40iw/i40iw.h
drivers/infiniband/hw/i40iw/i40iw_verbs.c
drivers/infiniband/hw/mlx4/ah.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/mr.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/sw/rdmavt/qp.c
drivers/infiniband/sw/rdmavt/vt.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/infiniband/ulp/srpt/ib_srpt.h
drivers/iommu/arm-smmu-v3.c
drivers/iommu/intel-iommu.c
drivers/iommu/rockchip-iommu.c
drivers/irqchip/irq-mips-gic.c
drivers/leds/led-core.c
drivers/leds/trigger/ledtrig-heartbeat.c
drivers/mcb/mcb-core.c
drivers/media/v4l2-core/v4l2-mc.c
drivers/memory/omap-gpmc.c
drivers/misc/mei/client.c
drivers/mtd/ubi/build.c
drivers/mtd/ubi/eba.c
drivers/mtd/ubi/kapi.c
drivers/pci/vc.c
drivers/perf/arm_pmu.c
drivers/phy/phy-exynos-mipi-video.c
drivers/phy/phy-ti-pipe3.c
drivers/phy/phy-twl4030-usb.c
drivers/platform/x86/Kconfig
drivers/platform/x86/ideapad-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/pwm/core.c
drivers/pwm/pwm-atmel-hlcdc.c
drivers/pwm/sysfs.c
drivers/scsi/53c700.c
drivers/scsi/scsi_error.c
drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
drivers/staging/rtl8188eu/core/rtw_efuse.c
drivers/staging/rtl8188eu/hal/usb_halinit.c
drivers/usb/core/quirks.c
drivers/usb/dwc2/core.h
drivers/usb/dwc2/gadget.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/dwc3-exynos.c
drivers/usb/dwc3/dwc3-st.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_printer.c
drivers/usb/gadget/function/f_tcm.c
drivers/usb/gadget/function/f_uac2.c
drivers/usb/gadget/function/storage_common.c
drivers/usb/gadget/legacy/inode.c
drivers/usb/gadget/udc/udc-core.c
drivers/usb/host/ehci-hcd.c
drivers/usb/host/ehci-hub.c
drivers/usb/host/ehci-msm.c
drivers/usb/host/ehci-tegra.c
drivers/usb/host/ohci-q.c
drivers/usb/host/xhci-pci.c
drivers/usb/host/xhci-plat.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_core.h
drivers/usb/musb/musb_gadget.c
drivers/usb/musb/musb_host.c
drivers/usb/musb/omap2430.c
drivers/usb/musb/sunxi.c
drivers/usb/phy/phy-twl6030-usb.c
drivers/usb/serial/mos7720.c
drivers/usb/storage/uas.c
drivers/usb/usbip/vhci_hcd.c
drivers/watchdog/Kconfig
drivers/xen/balloon.c
drivers/xen/xen-pciback/conf_space.c
drivers/xen/xen-pciback/conf_space_header.c
fs/autofs4/autofs_i.h
fs/autofs4/expire.c
fs/autofs4/root.c
fs/autofs4/waitq.c
fs/btrfs/check-integrity.c
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-inode.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/file.c
fs/btrfs/inode.c
fs/btrfs/ordered-data.c
fs/btrfs/super.c
fs/btrfs/tests/btrfs-tests.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/dcache.c
fs/debugfs/file.c
fs/internal.h
fs/jbd2/journal.c
fs/libfs.c
fs/namespace.c
fs/nfsd/blocklayout.c
fs/nfsd/nfs2acl.c
fs/nfsd/nfs3acl.c
fs/nfsd/nfs4acl.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4state.c
fs/nfsd/state.h
fs/nilfs2/the_nilfs.c
fs/ocfs2/Makefile
fs/ocfs2/buffer_head_io.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/posix_acl.c
fs/reiserfs/super.c
fs/ubifs/file.c
fs/udf/partition.c
fs/udf/super.c
fs/udf/udf_sb.h
include/linux/dcache.h
include/linux/iio/common/st_sensors.h
include/linux/init_task.h
include/linux/isa.h
include/linux/jump_label.h
include/linux/kasan.h
include/linux/leds.h
include/linux/mlx5/qp.h
include/linux/mm.h
include/linux/pwm.h
include/linux/sched.h
include/linux/sunrpc/clnt.h
include/linux/sunrpc/svc_xprt.h
include/linux/sunrpc/xprt.h
include/linux/usb/gadget.h
include/linux/usb/musb.h
include/media/v4l2-mc.h
include/rdma/rdma_vt.h
init/main.c
kernel/fork.c
kernel/jump_label.c
kernel/kcov.c
kernel/locking/mutex-debug.c
kernel/locking/mutex-debug.h
kernel/locking/mutex.c
kernel/locking/mutex.h
kernel/power/process.c
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h
kernel/trace/trace_printk.c
mm/compaction.c
mm/filemap.c
mm/hugetlb.c
mm/internal.h
mm/kasan/kasan.c
mm/kmemleak.c
mm/memcontrol.c
mm/memory.c
mm/mempool.c
mm/migrate.c
mm/oom_kill.c
mm/page_owner.c
mm/shmem.c
mm/swap.c
net/sunrpc/clnt.c
net/sunrpc/svc_xprt.c
net/sunrpc/xprtsock.c
net/unix/af_unix.c
security/keys/key.c
sound/drivers/dummy.c
sound/hda/hdac_regmap.c
sound/pci/hda/hda_tegra.c
sound/pci/hda/patch_realtek.c
tools/testing/selftests/ftrace/test.d/trigger/trigger-hist-mod.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-hist.tc
tools/testing/selftests/ftrace/test.d/trigger/trigger-multihist.tc
tools/testing/selftests/vm/compaction_test.c
tools/virtio/ringtest/Makefile
tools/virtio/ringtest/README
tools/virtio/ringtest/noring.c [new file with mode: 0644]
tools/virtio/ringtest/run-on-all.sh
tools/vm/slabinfo.c
virt/kvm/kvm_main.c

index 08b8042..52489f5 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -21,6 +21,7 @@ Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
 Andrew Morton <akpm@linux-foundation.org>
 Andrew Vasquez <andrew.vasquez@qlogic.com>
 Andy Adamson <andros@citi.umich.edu>
+Antoine Tenart <antoine.tenart@free-electrons.com>
 Antonio Ospite <ao2@ao2.it> <ao2@amarulasolutions.com>
 Archit Taneja <archit@ti.com>
 Arnaud Patard <arnaud.patard@rtp-net.org>
@@ -30,6 +31,9 @@ Axel Lin <axel.lin@gmail.com>
 Ben Gardner <bgardner@wabtec.com>
 Ben M Cahill <ben.m.cahill@intel.com>
 Björn Steinbrink <B.Steinbrink@gmx.de>
+Boris Brezillon <boris.brezillon@free-electrons.com>
+Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon.dev@gmail.com>
+Boris Brezillon <boris.brezillon@free-electrons.com> <b.brezillon@overkiz.com>
 Brian Avery <b.avery@hp.com>
 Brian King <brking@us.ibm.com>
 Christoph Hellwig <hch@lst.de>
@@ -89,6 +93,7 @@ Leonid I Ananiev <leonid.i.ananiev@intel.com>
 Linas Vepstas <linas@austin.ibm.com>
 Mark Brown <broonie@sirena.org.uk>
 Matthieu CASTET <castet.matthieu@free.fr>
+Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> <mchehab@infradead.org> <mchehab@redhat.com> <m.chehab@samsung.com> <mchehab@osg.samsung.com> <mchehab@s-opensource.com>
 Mayuresh Janorkar <mayur@ti.com>
 Michael Buesch <m@bues.ch>
 Michel Dänzer <michel@tungstengraphics.com>
@@ -122,6 +127,7 @@ Santosh Shilimkar <santosh.shilimkar@oracle.org>
 Sascha Hauer <s.hauer@pengutronix.de>
 S.ÇaÄŸlar Onur <caglar@pardus.org.tr>
 Shiraz Hashim <shiraz.linux.kernel@gmail.com> <shiraz.hashim@st.com>
+Shuah Khan <shuah@kernel.org> <shuahkhan@gmail.com> <shuah.khan@hp.com> <shuahkh@osg.samsung.com> <shuah.kh@samsung.com>
 Simon Kelley <simon@thekelleys.org.uk>
 Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
 Stephen Hemminger <shemminger@osdl.org>
diff --git a/CREDITS b/CREDITS
index 0f0bf22..2a3fbcd 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -649,6 +649,7 @@ D: Configure, Menuconfig, xconfig
 
 N: Mauro Carvalho Chehab
 E: m.chehab@samsung.org
+E: mchehab@osg.samsung.com
 E: mchehab@infradead.org
 D: Media subsystem (V4L/DVB) drivers and core
 D: EDAC drivers and EDAC 3.0 core rework
index 2f4a005..1ba0d0f 100644 (file)
@@ -1,6 +1,6 @@
 What:          /config/usb-gadget/gadget/functions/uvc.name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   UVC function directory
 
                streaming_maxburst      - 0..15 (ss only)
@@ -9,37 +9,37 @@ Description:  UVC function directory
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Control descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/class
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/class/ss
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Super speed control class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/class/fs
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Full speed control class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Output terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal/output/default
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Default output terminal descriptors
 
                All attributes read only:
@@ -53,12 +53,12 @@ Description:        Default output terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Camera terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/terminal/camera/default
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Default camera terminal descriptors
 
                All attributes read only:
@@ -75,12 +75,12 @@ Description:        Default camera terminal descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/processing
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Processing unit descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/processing/default
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Default processing unit descriptors
 
                All attributes read only:
@@ -94,49 +94,49 @@ Description:        Default processing unit descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/header
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Control header descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/control/header/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific control header descriptors
 
 dwClockFrequency
 bcdUVC
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Streaming descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/class
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Streaming class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/class/ss
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Super speed streaming class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/class/hs
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   High speed streaming class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/class/fs
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Full speed streaming class descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Color matching descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/color_matching/default
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Default color matching descriptors
 
                All attributes read only:
@@ -150,12 +150,12 @@ Description:      Default color matching descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   MJPEG format descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific MJPEG format descriptors
 
                All attributes read only,
@@ -174,7 +174,7 @@ Description:        Specific MJPEG format descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/mjpeg/name/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific MJPEG frame descriptors
 
                dwFrameInterval         - indicates how frame interval can be
@@ -196,12 +196,12 @@ Description:      Specific MJPEG frame descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Uncompressed format descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific uncompressed format descriptors
 
                bmaControls             - this format's data for bmaControls in
@@ -221,7 +221,7 @@ Description:        Specific uncompressed format descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/uncompressed/name/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific uncompressed frame descriptors
 
                dwFrameInterval         - indicates how frame interval can be
@@ -243,12 +243,12 @@ Description:      Specific uncompressed frame descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/header
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Streaming header descriptors
 
 What:          /config/usb-gadget/gadget/functions/uvc.name/streaming/header/name
 Date:          Dec 2014
-KernelVersion: 3.20
+KernelVersion: 4.0
 Description:   Specific streaming header descriptors
 
                All attributes read only:
index 6708c5e..33e96f7 100644 (file)
@@ -1,4 +1,4 @@
-What           /sys/bus/iio/devices/iio:deviceX/in_proximity_raw
+What           /sys/bus/iio/devices/iio:deviceX/in_proximity_input
 Date:          March 2014
 KernelVersion: 3.15
 Contact:       Matt Ranostay <mranostay@gmail.com>
index d406d98..44f5e6b 100644 (file)
@@ -74,8 +74,8 @@ blink_set() function (see <linux/leds.h>). To set an LED to blinking,
 however, it is better to use the API function led_blink_set(), as it
 will check and implement software fallback if necessary.
 
-To turn off blinking again, use the API function led_brightness_set()
-as that will not just set the LED brightness but also stop any software
+To turn off blinking, use the API function led_brightness_set()
+with brightness value LED_OFF, which should stop any software
 timers that may have been required for blinking.
 
 The blink_set() function should choose a user friendly blinking value
index 8638f61..37eca00 100644 (file)
@@ -263,19 +263,23 @@ scmd->allowed.
 
  3. scmd recovered
     ACTION: scsi_eh_finish_cmd() is invoked to EH-finish scmd
-       - shost->host_failed--
        - clear scmd->eh_eflags
        - scsi_setup_cmd_retry()
        - move from local eh_work_q to local eh_done_q
     LOCKING: none
+    CONCURRENCY: at most one thread per separate eh_work_q to
+                keep queue manipulation lockless
 
  4. EH completes
     ACTION: scsi_eh_flush_done_q() retries scmds or notifies upper
-           layer of failure.
+           layer of failure. May be called concurrently but must have
+           a no more than one thread per separate eh_work_q to
+           manipulate the queue locklessly
        - scmd is removed from eh_done_q and scmd->eh_entry is cleared
        - if retry is necessary, scmd is requeued using
           scsi_queue_insert()
        - otherwise, scsi_finish_command() is invoked for scmd
+       - zero shost->host_failed
     LOCKING: queue or finish function performs appropriate locking
 
 
index 16700e4..952fd2a 100644 (file)
@@ -1159,6 +1159,7 @@ F:        arch/arm/mach-footbridge/
 ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
 M:     Shawn Guo <shawnguo@kernel.org>
 M:     Sascha Hauer <kernel@pengutronix.de>
+R:     Fabio Estevam <fabio.estevam@nxp.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
@@ -2242,7 +2243,8 @@ F:        include/net/ax25.h
 F:     net/ax25/
 
 AZ6007 DVB DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -2709,7 +2711,8 @@ F:        Documentation/filesystems/btrfs.txt
 F:     fs/btrfs/
 
 BTTV VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -2773,9 +2776,9 @@ F:        include/net/caif/
 F:     net/caif/
 
 CALGARY x86-64 IOMMU
-M:     Muli Ben-Yehuda <muli@il.ibm.com>
-M:     "Jon D. Mason" <jdmason@kudzu.us>
-L:     discuss@x86-64.org
+M:     Muli Ben-Yehuda <mulix@mulix.org>
+M:     Jon Mason <jdmason@kudzu.us>
+L:     iommu@lists.linux-foundation.org
 S:     Maintained
 F:     arch/x86/kernel/pci-calgary_64.c
 F:     arch/x86/kernel/tce_64.c
@@ -3344,7 +3347,8 @@ S:        Maintained
 F:     drivers/media/dvb-frontends/cx24120*
 
 CX88 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -3774,6 +3778,7 @@ Q:        https://patchwork.kernel.org/project/linux-dmaengine/list/
 S:     Maintained
 F:     drivers/dma/
 F:     include/linux/dmaengine.h
+F:     Documentation/devicetree/bindings/dma/
 F:     Documentation/dmaengine/
 T:     git git://git.infradead.org/users/vkoul/slave-dma.git
 
@@ -4291,7 +4296,8 @@ F:        fs/ecryptfs/
 EDAC-CORE
 M:     Doug Thompson <dougthompson@xmission.com>
 M:     Borislav Petkov <bp@alien8.de>
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-edac.git linux_next
@@ -4336,7 +4342,8 @@ S:        Maintained
 F:     drivers/edac/e7xxx_edac.c
 
 EDAC-GHES
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/ghes_edac.c
@@ -4360,19 +4367,22 @@ S:      Maintained
 F:     drivers/edac/i5000_edac.c
 
 EDAC-I5400
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/i5400_edac.c
 
 EDAC-I7300
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/i7300_edac.c
 
 EDAC-I7CORE
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/i7core_edac.c
@@ -4409,7 +4419,8 @@ S:        Maintained
 F:     drivers/edac/r82600_edac.c
 
 EDAC-SBRIDGE
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     drivers/edac/sb_edac.c
@@ -4468,7 +4479,8 @@ S:        Maintained
 F:     drivers/net/ethernet/ibm/ehea/
 
 EM28XX VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -6487,6 +6499,7 @@ F:        include/uapi/linux/sunrpc/
 
 KERNEL SELFTEST FRAMEWORK
 M:     Shuah Khan <shuahkh@osg.samsung.com>
+M:     Shuah Khan <shuah@kernel.org>
 L:     linux-kselftest@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/shuah/linux-kselftest
 S:     Maintained
@@ -7358,7 +7371,8 @@ S:        Supported
 F:     drivers/media/pci/netup_unidvb/*
 
 MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 P:     LinuxTV.org Project
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
@@ -9852,7 +9866,8 @@ S:        Odd Fixes
 F:     drivers/media/i2c/saa6588*
 
 SAA7134 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -10371,7 +10386,8 @@ S:      Maintained
 F:     drivers/media/radio/si4713/radio-usb-si4713.c
 
 SIANO DVB DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -11137,7 +11153,8 @@ S:      Maintained
 F:     drivers/media/i2c/tda9840*
 
 TEA5761 TUNER DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -11145,7 +11162,8 @@ S:      Odd fixes
 F:     drivers/media/tuners/tea5761.*
 
 TEA5767 TUNER DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -11532,7 +11550,8 @@ F:      include/linux/shmem_fs.h
 F:     mm/shmem.c
 
 TM6000 VIDEO4LINUX DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
@@ -11886,7 +11905,8 @@ F:      drivers/usb/common/usb-otg-fsm.c
 
 USB OVER IP DRIVER
 M:     Valentina Manea <valentina.manea.m@gmail.com>
-M:     Shuah Khan <shuah.kh@samsung.com>
+M:     Shuah Khan <shuahkh@osg.samsung.com>
+M:     Shuah Khan <shuah@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 F:     Documentation/usb/usbip_protocol.txt
@@ -11957,6 +11977,7 @@ L:      linux-usb@vger.kernel.org
 W:     http://www.linux-usb.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb.git
 S:     Supported
+F:     Documentation/devicetree/bindings/usb/
 F:     Documentation/usb/
 F:     drivers/usb/
 F:     include/linux/usb.h
@@ -12130,6 +12151,7 @@ VIRTIO CORE, NET AND BLOCK DRIVERS
 M:     "Michael S. Tsirkin" <mst@redhat.com>
 L:     virtualization@lists.linux-foundation.org
 S:     Maintained
+F:     Documentation/devicetree/bindings/virtio/
 F:     drivers/virtio/
 F:     tools/virtio/
 F:     drivers/net/virtio_net.c
@@ -12518,7 +12540,8 @@ S:      Maintained
 F:     arch/x86/entry/vdso/
 
 XC2028/3028 TUNER DRIVER
-M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+M:     Mauro Carvalho Chehab <mchehab@s-opensource.com>
+M:     Mauro Carvalho Chehab <mchehab@kernel.org>
 L:     linux-media@vger.kernel.org
 W:     https://linuxtv.org
 T:     git git://linuxtv.org/media_tree.git
index b409076..6471f20 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 7
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc5
 NAME = Psychotic Stoned Sheep
 
 # *DOCUMENTATION*
index d794384..1599629 100644 (file)
@@ -226,8 +226,8 @@ config ARCH_INIT_TASK
 config ARCH_TASK_STRUCT_ALLOCATOR
        bool
 
-# Select if arch has its private alloc_thread_info() function
-config ARCH_THREAD_INFO_ALLOCATOR
+# Select if arch has its private alloc_thread_stack() function
+config ARCH_THREAD_STACK_ALLOCATOR
        bool
 
 # Select if arch wants to size task_struct dynamically via arch_task_struct_size:
@@ -606,6 +606,9 @@ config HAVE_ARCH_HASH
          file which provides platform-specific implementations of some
          functions in <linux/hash.h> or fs/namei.c.
 
+config ISA_BUS_API
+       def_bool ISA
+
 #
 # ABI hall of shame
 #
index aab14a0..c2ebb6f 100644 (file)
@@ -40,7 +40,7 @@ pgd_free(struct mm_struct *mm, pgd_t *pgd)
 static inline pmd_t *
 pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pmd_t *ret = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return ret;
 }
 
@@ -53,7 +53,7 @@ pmd_free(struct mm_struct *mm, pmd_t *pmd)
 static inline pte_t *
 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return pte;
 }
 
index 86ed671..3749234 100644 (file)
@@ -95,7 +95,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
 
-       pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO,
+       pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
                                         __get_order_pte());
 
        return pte;
@@ -107,7 +107,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
        pgtable_t pte_pg;
        struct page *page;
 
-       pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL | __GFP_REPEAT, __get_order_pte());
+       pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte());
        if (!pte_pg)
                return 0;
        memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t));
index 06b6c2d..414b427 100644 (file)
@@ -741,6 +741,7 @@ dtb-$(CONFIG_MACH_SUN7I) += \
        sun7i-a20-olimex-som-evb.dtb \
        sun7i-a20-olinuxino-lime.dtb \
        sun7i-a20-olinuxino-lime2.dtb \
+       sun7i-a20-olinuxino-lime2-emmc.dtb \
        sun7i-a20-olinuxino-micro.dtb \
        sun7i-a20-orangepi.dtb \
        sun7i-a20-orangepi-mini.dtb \
index d82dd6e..5687d6b 100644 (file)
        status = "okay";
        pinctrl-names = "default";
        pinctrl-0 = <&i2c0_pins>;
-       clock-frequency = <400000>;
+       clock-frequency = <100000>;
 
        tps@24 {
                compatible = "ti,tps65218";
index b01a594..0e63b9d 100644 (file)
 
                tps659038_pmic {
                        compatible = "ti,tps659038-pmic";
+
+                       smps12-in-supply = <&vmain>;
+                       smps3-in-supply = <&vmain>;
+                       smps45-in-supply = <&vmain>;
+                       smps6-in-supply = <&vmain>;
+                       smps7-in-supply = <&vmain>;
+                       smps8-in-supply = <&vmain>;
+                       smps9-in-supply = <&vmain>;
+                       ldo1-in-supply = <&vmain>;
+                       ldo2-in-supply = <&vmain>;
+                       ldo3-in-supply = <&vmain>;
+                       ldo4-in-supply = <&vmain>;
+                       ldo9-in-supply = <&vmain>;
+                       ldoln-in-supply = <&vmain>;
+                       ldousb-in-supply = <&vmain>;
+                       ldortc-in-supply = <&vmain>;
+
                        regulators {
                                smps12_reg: smps12 {
                                        /* VDD_MPU */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps12";
                                        regulator-min-microvolt = <850000>;
                                        regulator-max-microvolt = <1250000>;
@@ -73,7 +89,6 @@
 
                                smps3_reg: smps3 {
                                        /* VDD_DDR EMIF1 EMIF2 */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps3";
                                        regulator-min-microvolt = <1350000>;
                                        regulator-max-microvolt = <1350000>;
@@ -84,7 +99,6 @@
                                smps45_reg: smps45 {
                                        /* VDD_DSPEVE on AM572 */
                                        /* VDD_IVA + VDD_DSP on AM571 */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps45";
                                        regulator-min-microvolt = <850000>;
                                        regulator-max-microvolt = <1250000>;
 
                                smps6_reg: smps6 {
                                        /* VDD_GPU */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps6";
                                        regulator-min-microvolt = <850000>;
                                        regulator-max-microvolt = <1250000>;
 
                                smps7_reg: smps7 {
                                        /* VDD_CORE */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps7";
                                        regulator-min-microvolt = <850000>;
                                        regulator-max-microvolt = <1150000>;
                                smps8_reg: smps8 {
                                        /* 5728 - VDD_IVAHD */
                                        /* 5718 - N.C. test point */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps8";
                                };
 
                                smps9_reg: smps9 {
                                        /* VDD_3_3D */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "smps9";
                                        regulator-min-microvolt = <3300000>;
                                        regulator-max-microvolt = <3300000>;
                                ldo1_reg: ldo1 {
                                        /* VDDSHV8 - VSDMMC  */
                                        /* NOTE: on rev 1.3a, data supply */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo1";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <3300000>;
 
                                ldo2_reg: ldo2 {
                                        /* VDDSH18V */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo2";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
 
                                ldo3_reg: ldo3 {
                                        /* R1.3a 572x V1_8PHY_LDO3: USB, SATA */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo3";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
 
                                ldo4_reg: ldo4 {
                                        /* R1.3a 572x V1_8PHY_LDO4: PCIE, HDMI*/
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo4";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
 
                                ldo9_reg: ldo9 {
                                        /* VDD_RTC  */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldo9";
                                        regulator-min-microvolt = <840000>;
                                        regulator-max-microvolt = <1160000>;
 
                                ldoln_reg: ldoln {
                                        /* VDDA_1V8_PLL */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldoln";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
 
                                ldousb_reg: ldousb {
                                        /* VDDA_3V_USB: VDDA_USBHS33 */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldousb";
                                        regulator-min-microvolt = <3300000>;
                                        regulator-max-microvolt = <3300000>;
 
                                ldortc_reg: ldortc {
                                        /* VDDA_RTC  */
-                                       vin-supply = <&vmain>;
                                        regulator-name = "ldortc";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
index cbc17b0..4128fa9 100644 (file)
        };
 };
 
+&mmc1 {
+        status = "disabled";
+};
+
 &mmc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&sd1_pins>;
        cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
 };
 
+&mmc3 {
+        status = "disabled";
+};
+
 &pincntl {
        sd1_pins: pinmux_sd1_pins {
                pinctrl-single,pins = <
index 5d4313f..3f18486 100644 (file)
        phy-mode = "rgmii";
 };
 
+&mmc1 {
+       status = "disabled";
+};
+
+&mmc2 {
+       status = "disabled";
+};
+
 &mmc3 {
        pinctrl-names = "default";
        pinctrl-0 = <&sd2_pins>;
@@ -53,6 +61,7 @@
        dmas = <&edma_xbar 8 0 1        /* use SDTXEVT1 instead of MCASP0TX */
                &edma_xbar 9 0 2>;      /* use SDRXEVT1 instead of MCASP0RX */
        dma-names = "tx", "rx";
+       non-removable;
 };
 
 &pincntl {
index e007401..3a8f397 100644 (file)
                        ti,hwmods = "gpmc";
                        reg = <0x50000000 0x37c>;      /* device IO registers */
                        interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+                       dmas = <&edma_xbar 4 0>;
+                       dma-names = "rxtx";
                        gpmc,num-cs = <8>;
                        gpmc,num-waitpins = <2>;
                        #address-cells = <2>;
index 4220eef..5e06020 100644 (file)
        reg = <0x58000000 0x80>,
              <0x58004054 0x4>,
              <0x58004300 0x20>,
-             <0x58005054 0x4>,
-             <0x58005300 0x20>;
+             <0x58009054 0x4>,
+             <0x58009300 0x20>;
        reg-names = "dss", "pll1_clkctrl", "pll1",
                    "pll2_clkctrl", "pll2";
 
index ddfe1f5..fa14f77 100644 (file)
        hpd-gpios = <&gpx0 7 GPIO_ACTIVE_HIGH>;
 
        ports {
-               port0 {
+               port {
                        dp_out: endpoint {
                                remote-endpoint = <&bridge_in>;
                        };
                edid-emulation = <5>;
 
                ports {
-                       port0 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+
                                bridge_out: endpoint {
                                        remote-endpoint = <&panel_in>;
                                };
                        };
 
-                       port1 {
+                       port@1 {
+                               reg = <1>;
+
                                bridge_in: endpoint {
                                        remote-endpoint = <&dp_out>;
                                };
index f9d2e4f..1de972d 100644 (file)
        hpd-gpios = <&gpx2 6 GPIO_ACTIVE_HIGH>;
 
        ports {
-               port0 {
+               port {
                        dp_out: endpoint {
                                remote-endpoint = <&bridge_in>;
                        };
                use-external-pwm;
 
                ports {
-                       port0 {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       port@0 {
+                               reg = <0>;
+
                                bridge_out: endpoint {
                                        remote-endpoint = <&panel_in>;
                                };
                        };
 
-                       port1 {
+                       port@1 {
+                               reg = <1>;
+
                                bridge_in: endpoint {
                                        remote-endpoint = <&dp_out>;
                                };
index 76056ba..ed44982 100644 (file)
@@ -85,7 +85,7 @@
                        OMAP3_CORE1_IOPAD(0x2158, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_clk.sdmmc2_clk */
                        OMAP3_CORE1_IOPAD(0x215a, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_cmd.sdmmc2_cmd */
                        OMAP3_CORE1_IOPAD(0x215c, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat0.sdmmc2_dat0 */
-                       OMAP3_CORE1_IOPAD(0x215e, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
+                       OMAP3_CORE1_IOPAD(0x215e, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat1.sdmmc2_dat1 */
                        OMAP3_CORE1_IOPAD(0x2160, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat2.sdmmc2_dat2 */
                        OMAP3_CORE1_IOPAD(0x2162, PIN_INPUT_PULLUP | MUX_MODE0) /* sdmmc2_dat3.sdmmc2_dat3 */
                >;
index 41f5d38..f4f2ce4 100644 (file)
        vmmc-supply = <&vmmc1>;
        vmmc_aux-supply = <&vsim>;
        bus-width = <4>;
+       cd-gpios = <&twl_gpio 0 GPIO_ACTIVE_LOW>;
 };
 
 &mmc3 {
index d6f839c..b697106 100644 (file)
                        OMAP3630_CORE2_IOPAD(0x25f8, PIN_OUTPUT | MUX_MODE4) /* etk_d14.gpio_28 */
                >;
        };
+
+       mmc1_wp_pins: pinmux_mmc1_cd_pins {
+               pinctrl-single,pins = <
+                       OMAP3630_CORE2_IOPAD(0x25fa, PIN_INPUT | MUX_MODE4)   /* etk_d15.gpio_29 */
+               >;
+       };
 };
 
 &i2c3 {
                };
        };
 };
+
+&mmc1 {
+       pinctrl-0 = <&mmc1_pins &mmc1_wp_pins>;
+       wp-gpios = <&gpio1 29 GPIO_ACTIVE_LOW>; /* gpio_29 */
+};
index d9e2d9c..2b74a81 100644 (file)
                pinctrl-single,pins = <
                        OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1) /* ssi1_rdy_tx */
                        OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1)               /* ssi1_flag_tx */
-                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4)                /* ssi1_wake_tx (cawake) */
                        OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1)               /* ssi1_dat_tx */
                        OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1)                /* ssi1_dat_rx */
                        OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1)                /* ssi1_flag_rx */
        modem_pins: pinmux_modem {
                pinctrl-single,pins = <
                        OMAP3_CORE1_IOPAD(0x20dc, PIN_OUTPUT | MUX_MODE4)               /* gpio 70 => cmt_apeslpx */
-                       OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio 72 => ape_rst_rq */
+                       OMAP3_CORE1_IOPAD(0x20e0, PIN_INPUT | MUX_MODE4)                /* gpio 72 => ape_rst_rq */
                        OMAP3_CORE1_IOPAD(0x20e2, PIN_OUTPUT | MUX_MODE4)               /* gpio 73 => cmt_rst_rq */
                        OMAP3_CORE1_IOPAD(0x20e4, PIN_OUTPUT | MUX_MODE4)               /* gpio 74 => cmt_en */
                        OMAP3_CORE1_IOPAD(0x20e6, PIN_OUTPUT | MUX_MODE4)               /* gpio 75 => cmt_rst */
index a00ca76..927b17f 100644 (file)
@@ -97,7 +97,7 @@
                        OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE1)            /* ssi1_dat_tx */
                        OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE1)            /* ssi1_flag_tx */
                        OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLUP | MUX_MODE1)      /* ssi1_rdy_tx */
-                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4)        /* ssi1_wake_tx (cawake) */
                        OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE1)             /* ssi1_dat_rx */
                        OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE1)             /* ssi1_flag_rx */
                        OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE1)            /* ssi1_rdy_rx */
                        OMAP3_CORE1_IOPAD(0x217c, PIN_OUTPUT | MUX_MODE7)            /* ssi1_dat_tx */
                        OMAP3_CORE1_IOPAD(0x217e, PIN_OUTPUT | MUX_MODE7)            /* ssi1_flag_tx */
                        OMAP3_CORE1_IOPAD(0x2180, PIN_INPUT_PULLDOWN | MUX_MODE7)    /* ssi1_rdy_tx */
-                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* ssi1_wake_tx (cawake) */
+                       OMAP3_CORE1_IOPAD(0x2182, PIN_INPUT | MUX_MODE4)        /* ssi1_wake_tx (cawake) */
                        OMAP3_CORE1_IOPAD(0x2184, PIN_INPUT | MUX_MODE7)             /* ssi1_dat_rx */
                        OMAP3_CORE1_IOPAD(0x2186, PIN_INPUT | MUX_MODE7)             /* ssi1_flag_rx */
                        OMAP3_CORE1_IOPAD(0x2188, PIN_OUTPUT | MUX_MODE4)            /* ssi1_rdy_rx */
 
        modem_pins1: pinmux_modem_core1_pins {
                pinctrl-single,pins = <
-                       OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | WAKEUP_EN | MUX_MODE4) /* gpio_34 (ape_rst_rq) */
+                       OMAP3_CORE1_IOPAD(0x207a, PIN_INPUT | MUX_MODE4)        /* gpio_34 (ape_rst_rq) */
                        OMAP3_CORE1_IOPAD(0x2100, PIN_OUTPUT | MUX_MODE4)            /* gpio_88 (cmt_rst_rq) */
                        OMAP3_CORE1_IOPAD(0x210a, PIN_OUTPUT | MUX_MODE4)            /* gpio_93 (cmt_apeslpx) */
                >;
index f19170b..c29b41d 100644 (file)
@@ -98,7 +98,7 @@
                pinctrl-single,pins = <
                         OMAP3_CORE1_IOPAD(0x2174, PIN_INPUT_PULLUP | MUX_MODE0)        /* uart2_cts.uart2_cts */
                         OMAP3_CORE1_IOPAD(0x2176, PIN_OUTPUT | MUX_MODE0)              /* uart2_rts.uart2_rts */
-                        OMAP3_CORE1_IOPAD(0x217a, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart2_rx.uart2_rx */
+                        OMAP3_CORE1_IOPAD(0x217a, PIN_INPUT | MUX_MODE0)               /* uart2_rx.uart2_rx */
                         OMAP3_CORE1_IOPAD(0x2178, PIN_OUTPUT | MUX_MODE0)              /* uart2_tx.uart2_tx */
                >;
        };
                pinctrl-single,pins = <
                         OMAP3_CORE1_IOPAD(0x219a, PIN_INPUT_PULLDOWN | MUX_MODE0)      /* uart3_cts_rctx.uart3_cts_rctx */
                         OMAP3_CORE1_IOPAD(0x219c, PIN_OUTPUT | MUX_MODE0)              /* uart3_rts_sd.uart3_rts_sd */
-                        OMAP3_CORE1_IOPAD(0x219e, WAKEUP_EN | PIN_INPUT | MUX_MODE0) /* uart3_rx_irrx.uart3_rx_irrx */
+                        OMAP3_CORE1_IOPAD(0x219e, PIN_INPUT | MUX_MODE0)               /* uart3_rx_irrx.uart3_rx_irrx */
                         OMAP3_CORE1_IOPAD(0x21a0, PIN_OUTPUT | MUX_MODE0)              /* uart3_tx_irtx.uart3_tx_irtx */
                >;
        };
                pinctrl-single,pins = <
                        OMAP3630_CORE2_IOPAD(0x25d8, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_clk.sdmmc3_clk */
                        OMAP3630_CORE2_IOPAD(0x25e4, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_d4.sdmmc3_dat0 */
-                       OMAP3630_CORE2_IOPAD(0x25e6, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE2) /* etk_d5.sdmmc3_dat1 */
+                       OMAP3630_CORE2_IOPAD(0x25e6, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_d5.sdmmc3_dat1 */
                        OMAP3630_CORE2_IOPAD(0x25e8, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_d6.sdmmc3_dat2 */
                        OMAP3630_CORE2_IOPAD(0x25e2, PIN_INPUT_PULLUP | MUX_MODE2)      /* etk_d3.sdmmc3_dat3 */
                >;
index dc759a3..5d5b620 100644 (file)
                display0 = &hdmi0;
        };
 
+       vmain: fixedregulator-vmain {
+               compatible = "regulator-fixed";
+               regulator-name = "vmain";
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+       };
+
+       vsys_cobra: fixedregulator-vsys_cobra {
+               compatible = "regulator-fixed";
+               regulator-name = "vsys_cobra";
+               vin-supply = <&vmain>;
+               regulator-min-microvolt = <5000000>;
+               regulator-max-microvolt = <5000000>;
+       };
+
+       vdds_1v8_main: fixedregulator-vdds_1v8_main {
+               compatible = "regulator-fixed";
+               regulator-name = "vdds_1v8_main";
+               vin-supply = <&smps7_reg>;
+               regulator-min-microvolt = <1800000>;
+               regulator-max-microvolt = <1800000>;
+       };
+
        vmmcsd_fixed: fixedregulator-mmcsd {
                compatible = "regulator-fixed";
                regulator-name = "vmmcsd_fixed";
 
        wlcore_irq_pin: pinmux_wlcore_irq_pin {
                pinctrl-single,pins = <
-                       OMAP5_IOPAD(0x40, WAKEUP_EN | PIN_INPUT_PULLUP | MUX_MODE6)     /* llia_wakereqin.gpio1_wk14 */
+                       OMAP5_IOPAD(0x40, PIN_INPUT_PULLUP | MUX_MODE6) /* llia_wakereqin.gpio1_wk14 */
                >;
        };
 };
 
                        ti,ldo6-vibrator;
 
+                       smps123-in-supply = <&vsys_cobra>;
+                       smps45-in-supply = <&vsys_cobra>;
+                       smps6-in-supply = <&vsys_cobra>;
+                       smps7-in-supply = <&vsys_cobra>;
+                       smps8-in-supply = <&vsys_cobra>;
+                       smps9-in-supply = <&vsys_cobra>;
+                       smps10_out2-in-supply = <&vsys_cobra>;
+                       smps10_out1-in-supply = <&vsys_cobra>;
+                       ldo1-in-supply = <&vsys_cobra>;
+                       ldo2-in-supply = <&vsys_cobra>;
+                       ldo3-in-supply = <&vdds_1v8_main>;
+                       ldo4-in-supply = <&vdds_1v8_main>;
+                       ldo5-in-supply = <&vsys_cobra>;
+                       ldo6-in-supply = <&vdds_1v8_main>;
+                       ldo7-in-supply = <&vsys_cobra>;
+                       ldo8-in-supply = <&vsys_cobra>;
+                       ldo9-in-supply = <&vmmcsd_fixed>;
+                       ldoln-in-supply = <&vsys_cobra>;
+                       ldousb-in-supply = <&vsys_cobra>;
+
                        regulators {
                                smps123_reg: smps123 {
                                        /* VDD_OPP_MPU */
                pinctrl-0 = <&twl6040_pins>;
 
                interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */
-               ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>;  /* gpio line 141 */
+
+               /* audpwron gpio defined in the board specific dts */
 
                vio-supply = <&smps7_reg>;
                v2v1-supply = <&smps9_reg>;
index 46ecb1d..f75ce02 100644 (file)
        };
 };
 
+/* LDO4 is VPP1 - ball AD9 */
+&ldo4_reg {
+       regulator-min-microvolt = <2000000>;
+       regulator-max-microvolt = <2000000>;
+};
+
+/*
+ * LDO7 is used for HDMI: VDDA_DSIPORTA - ball AA33, VDDA_DSIPORTC - ball AE33,
+ * VDDA_HDMI - ball AN25
+ */
+&ldo7_reg {
+       status = "okay";
+       regulator-min-microvolt = <1800000>;
+       regulator-max-microvolt = <1800000>;
+};
+
 &omap5_pmx_core {
        i2c4_pins: pinmux_i2c4_pins {
                pinctrl-single,pins = <
                <&gpio7 3 0>;           /* 195, SDA */
 };
 
+&twl6040 {
+       ti,audpwron-gpio = <&gpio5 16 GPIO_ACTIVE_HIGH>;  /* gpio line 144 */
+};
+
+&twl6040_pins {
+       pinctrl-single,pins = <
+               OMAP5_IOPAD(0x1c4, PIN_OUTPUT | MUX_MODE6)      /* mcspi1_somi.gpio5_144 */
+               OMAP5_IOPAD(0x1ca, PIN_OUTPUT | MUX_MODE6)      /* perslimbus2_clock.gpio5_145 */
+       >;
+};
index 60b3fbb..a51e605 100644 (file)
                <&gpio9 1 GPIO_ACTIVE_HIGH>,    /* TCA6424A P00, LS OE */
                <&gpio7 1 GPIO_ACTIVE_HIGH>;    /* GPIO 193, HPD */
 };
+
+&twl6040 {
+       ti,audpwron-gpio = <&gpio5 13 GPIO_ACTIVE_HIGH>;  /* gpio line 141 */
+};
+
+&twl6040_pins {
+       pinctrl-single,pins = <
+               OMAP5_IOPAD(0x1be, PIN_OUTPUT | MUX_MODE6)      /* mcspi1_somi.gpio5_141 */
+       >;
+};
index a3601e4..b844473 100644 (file)
 &gmac1 {
        status = "okay";
        phy-mode = "rgmii";
+       phy-handle = <&phy1>;
 
        snps,reset-gpio = <&porta 0 GPIO_ACTIVE_LOW>;
        snps,reset-active-low;
index ad8ba10..d294e82 100644 (file)
                        compatible = "shared-dma-pool";
                        reg = <0x40000000 0x01000000>;
                        no-map;
+                       status = "disabled";
                };
 
                gp1_reserved: rproc@41000000 {
                        compatible = "shared-dma-pool";
                        reg = <0x41000000 0x01000000>;
                        no-map;
+                       status = "disabled";
                };
 
                audio_reserved: rproc@42000000 {
                        compatible = "shared-dma-pool";
                        reg = <0x42000000 0x01000000>;
                        no-map;
+                       status = "disabled";
                };
 
                dmu_reserved: rproc@43000000 {
index 68b479b..73c133f 100644 (file)
 };
 
 &reg_dc1sw {
-       regulator-min-microvolt = <3000000>;
-       regulator-max-microvolt = <3000000>;
        regulator-name = "vcc-lcd";
 };
 
index 360adfb..d6ad619 100644 (file)
 
 &reg_dc1sw {
        regulator-name = "vcc-lcd-usb2";
-       regulator-min-microvolt = <3000000>;
-       regulator-max-microvolt = <3000000>;
 };
 
 &reg_dc5ldo {
index 10f49ab..47195e8 100644 (file)
@@ -82,6 +82,7 @@ CONFIG_TOUCHSCREEN_MMS114=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_MAX77693_HAPTIC=y
 CONFIG_INPUT_MAX8997_HAPTIC=y
+CONFIG_KEYBOARD_SAMSUNG=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_SAMSUNG=y
 CONFIG_SERIAL_SAMSUNG_CONSOLE=y
index 8f85756..8a5fff1 100644 (file)
@@ -264,6 +264,7 @@ CONFIG_KEYBOARD_TEGRA=y
 CONFIG_KEYBOARD_SPEAR=y
 CONFIG_KEYBOARD_ST_KEYSCAN=y
 CONFIG_KEYBOARD_CROS_EC=m
+CONFIG_KEYBOARD_SAMSUNG=m
 CONFIG_MOUSE_PS2_ELANTECH=y
 CONFIG_MOUSE_CYAPA=m
 CONFIG_MOUSE_ELAN_I2C=y
index 19cfab5..20febb3 100644 (file)
@@ -29,7 +29,7 @@
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+       return (pmd_t *)get_zeroed_page(GFP_KERNEL);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
index aeddd28..92fd2c8 100644 (file)
@@ -193,6 +193,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
 
 #define pmd_large(pmd)         (pmd_val(pmd) & 2)
 #define pmd_bad(pmd)           (pmd_val(pmd) & 2)
+#define pmd_present(pmd)       (pmd_val(pmd))
 
 #define copy_pmd(pmdpd,pmdps)          \
        do {                            \
index fa70db7..2a029bc 100644 (file)
@@ -211,6 +211,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
                                                : !!(pmd_val(pmd) & (val)))
 #define pmd_isclear(pmd, val)  (!(pmd_val(pmd) & (val)))
 
+#define pmd_present(pmd)       (pmd_isset((pmd), L_PMD_SECT_VALID))
 #define pmd_young(pmd)         (pmd_isset((pmd), PMD_SECT_AF))
 #define pte_special(pte)       (pte_isset((pte), L_PTE_SPECIAL))
 static inline pte_t pte_mkspecial(pte_t pte)
@@ -249,10 +250,10 @@ PMD_BIT_FUNC(mkyoung,   |= PMD_SECT_AF);
 #define pfn_pmd(pfn,prot)      (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
 #define mk_pmd(page,prot)      pfn_pmd(page_to_pfn(page),prot)
 
-/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
+/* represent a notpresent pmd by faulting entry, this is used by pmdp_invalidate */
 static inline pmd_t pmd_mknotpresent(pmd_t pmd)
 {
-       return __pmd(0);
+       return __pmd(pmd_val(pmd) & ~L_PMD_SECT_VALID);
 }
 
 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
index 348caab..d622040 100644 (file)
@@ -182,7 +182,6 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 #define pgd_offset_k(addr)     pgd_offset(&init_mm, addr)
 
 #define pmd_none(pmd)          (!pmd_val(pmd))
-#define pmd_present(pmd)       (pmd_val(pmd))
 
 static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 {
index df90bc5..8615216 100644 (file)
@@ -486,7 +486,7 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = {
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
 {
-       trace_ipi_raise(target, ipi_types[ipinr]);
+       trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
        __smp_cross_call(target, ipinr);
 }
 
index e65aa7d..20dcf6e 100644 (file)
@@ -61,7 +61,6 @@ config ARCH_EXYNOS4
        select CLKSRC_SAMSUNG_PWM if CPU_EXYNOS4210
        select CPU_EXYNOS4210
        select GIC_NON_BANKED
-       select KEYBOARD_SAMSUNG if INPUT_KEYBOARD
        select MIGHT_HAVE_CACHE_L2X0
        help
          Samsung EXYNOS4 (Cortex-A9) SoC based systems
index a38b16b..b56de4b 100644 (file)
@@ -46,7 +46,7 @@ static int ksz8081_phy_fixup(struct phy_device *dev)
 static void __init imx6ul_enet_phy_init(void)
 {
        if (IS_BUILTIN(CONFIG_PHYLIB))
-               phy_register_fixup_for_uid(PHY_ID_KSZ8081, 0xffffffff,
+               phy_register_fixup_for_uid(PHY_ID_KSZ8081, MICREL_PHY_ID_MASK,
                                           ksz8081_phy_fixup);
 }
 
index 5d7fb59..bf60844 100644 (file)
@@ -43,8 +43,8 @@
 #define OTHERS_MASK                    (MODEM_IRQ_MASK | HOOK_SWITCH_MASK)
 
 /* IRQ handler register bitmasks */
-#define DEFERRED_FIQ_MASK              (0x1 << (INT_DEFERRED_FIQ % IH2_BASE))
-#define GPIO_BANK1_MASK                (0x1 << INT_GPIO_BANK1)
+#define DEFERRED_FIQ_MASK              OMAP_IRQ_BIT(INT_DEFERRED_FIQ)
+#define GPIO_BANK1_MASK                OMAP_IRQ_BIT(INT_GPIO_BANK1)
 
 /* Driver buffer byte offsets */
 #define BUF_MASK                       (FIQ_MASK * 4)
@@ -110,7 +110,7 @@ ENTRY(qwerty_fiqin_start)
        mov r8, #2                              @ reset FIQ agreement
        str r8, [r12, #IRQ_CONTROL_REG_OFFSET]
 
-       cmp r10, #INT_GPIO_BANK1                @ is it GPIO bank interrupt?
+       cmp r10, #(INT_GPIO_BANK1 - NR_IRQS_LEGACY)     @ is it GPIO interrupt?
        beq gpio                                @ yes - process it
 
        mov r8, #1
index d1f1209..ec760ae 100644 (file)
@@ -109,7 +109,8 @@ void __init ams_delta_init_fiq(void)
         * Since no set_type() method is provided by OMAP irq chip,
         * switch to edge triggered interrupt type manually.
         */
-       offset = IRQ_ILR0_REG_OFFSET + INT_DEFERRED_FIQ * 0x4;
+       offset = IRQ_ILR0_REG_OFFSET +
+                       ((INT_DEFERRED_FIQ - NR_IRQS_LEGACY) & 0x1f) * 0x4;
        val = omap_readl(DEFERRED_FIQ_IH_BASE + offset) & ~(1 << 1);
        omap_writel(val, DEFERRED_FIQ_IH_BASE + offset);
 
@@ -149,7 +150,7 @@ void __init ams_delta_init_fiq(void)
        /*
         * Redirect GPIO interrupts to FIQ
         */
-       offset = IRQ_ILR0_REG_OFFSET + INT_GPIO_BANK1 * 0x4;
+       offset = IRQ_ILR0_REG_OFFSET + (INT_GPIO_BANK1 - NR_IRQS_LEGACY) * 0x4;
        val = omap_readl(OMAP_IH1_BASE + offset) | 1;
        omap_writel(val, OMAP_IH1_BASE + offset);
 }
index adb5e76..6dfc3e1 100644 (file)
@@ -14,6 +14,8 @@
 #ifndef __AMS_DELTA_FIQ_H
 #define __AMS_DELTA_FIQ_H
 
+#include <mach/irqs.h>
+
 /*
  * Interrupt number used for passing control from FIQ to IRQ.
  * IRQ12, described as reserved, has been selected.
index 0517f0c..1a648e9 100644 (file)
@@ -17,6 +17,7 @@ config ARCH_OMAP3
        select PM_OPP if PM
        select PM if CPU_IDLE
        select SOC_HAS_OMAP2_SDRC
+       select ARM_ERRATA_430973
 
 config ARCH_OMAP4
        bool "TI OMAP4"
@@ -36,6 +37,7 @@ config ARCH_OMAP4
        select PM if CPU_IDLE
        select ARM_ERRATA_754322
        select ARM_ERRATA_775420
+       select OMAP_INTERCONNECT
 
 config SOC_OMAP5
        bool "TI OMAP5"
@@ -67,6 +69,8 @@ config SOC_AM43XX
        select HAVE_ARM_SCU
        select GENERIC_CLOCKEVENTS_BROADCAST
        select HAVE_ARM_TWD
+       select ARM_ERRATA_754322
+       select ARM_ERRATA_775420
 
 config SOC_DRA7XX
        bool "TI DRA7XX"
@@ -240,4 +244,12 @@ endmenu
 
 endif
 
+config OMAP5_ERRATA_801819
+       bool "Errata 801819: An eviction from L1 data cache might stall indefinitely"
+       depends on SOC_OMAP5 || SOC_DRA7XX
+       help
+         A livelock can occur in the L2 cache arbitration that might prevent
+         a snoop from completing. Under certain conditions this can cause the
+         system to deadlock.
+
 endmenu
index af2851f..bae263f 100644 (file)
@@ -46,6 +46,7 @@
 
 #define OMAP5_DRA7_MON_SET_CNTFRQ_INDEX        0x109
 #define OMAP5_MON_AMBA_IF_INDEX                0x108
+#define OMAP5_DRA7_MON_SET_ACR_INDEX   0x107
 
 /* Secure PPA(Primary Protected Application) APIs */
 #define OMAP4_PPA_L2_POR_INDEX         0x23
index c625cc1..8cd1de9 100644 (file)
@@ -50,6 +50,39 @@ void __iomem *omap4_get_scu_base(void)
        return scu_base;
 }
 
+#ifdef CONFIG_OMAP5_ERRATA_801819
+void omap5_erratum_workaround_801819(void)
+{
+       u32 acr, revidr;
+       u32 acr_mask;
+
+       /* REVIDR[3] indicates erratum fix available on silicon */
+       asm volatile ("mrc p15, 0, %0, c0, c0, 6" : "=r" (revidr));
+       if (revidr & (0x1 << 3))
+               return;
+
+       asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
+       /*
+        * BIT(27) - Disables streaming. All write-allocate lines allocate in
+        * the L1 or L2 cache.
+        * BIT(25) - Disables streaming. All write-allocate lines allocate in
+        * the L1 cache.
+        */
+       acr_mask = (0x3 << 25) | (0x3 << 27);
+       /* do we already have it done.. if yes, skip expensive smc */
+       if ((acr & acr_mask) == acr_mask)
+               return;
+
+       acr |= acr_mask;
+       omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
+
+       pr_debug("%s: ARM erratum workaround 801819 applied on CPU%d\n",
+                __func__, smp_processor_id());
+}
+#else
+static inline void omap5_erratum_workaround_801819(void) { }
+#endif
+
 static void omap4_secondary_init(unsigned int cpu)
 {
        /*
@@ -64,12 +97,15 @@ static void omap4_secondary_init(unsigned int cpu)
                omap_secure_dispatcher(OMAP4_PPA_CPU_ACTRL_SMP_INDEX,
                                                        4, 0, 0, 0, 0, 0);
 
-       /*
-        * Configure the CNTFRQ register for the secondary cpu's which
-        * indicates the frequency of the cpu local timers.
-        */
-       if (soc_is_omap54xx() || soc_is_dra7xx())
+       if (soc_is_omap54xx() || soc_is_dra7xx()) {
+               /*
+                * Configure the CNTFRQ register for the secondary cpu's which
+                * indicates the frequency of the cpu local timers.
+                */
                set_cntfreq();
+               /* Configure ACR to disable streaming WA for 801819 */
+               omap5_erratum_workaround_801819();
+       }
 
        /*
         * Synchronise with the boot thread.
@@ -218,6 +254,8 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
 
        if (cpu_is_omap446x())
                startup_addr = omap4460_secondary_startup;
+       if (soc_is_dra74x() || soc_is_omap54xx())
+               omap5_erratum_workaround_801819();
 
        /*
         * Write the address of secondary startup routine into the
index 78af6d8..daf2753 100644 (file)
@@ -186,8 +186,9 @@ static int _pwrdm_state_switch(struct powerdomain *pwrdm, int flag)
                        trace_state = (PWRDM_TRACE_STATES_FLAG |
                                       ((next & OMAP_POWERSTATE_MASK) << 8) |
                                       ((prev & OMAP_POWERSTATE_MASK) << 0));
-                       trace_power_domain_target(pwrdm->name, trace_state,
-                                                 smp_processor_id());
+                       trace_power_domain_target_rcuidle(pwrdm->name,
+                                                         trace_state,
+                                                         smp_processor_id());
                }
                break;
        default:
@@ -523,8 +524,8 @@ int pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst)
 
        if (arch_pwrdm && arch_pwrdm->pwrdm_set_next_pwrst) {
                /* Trace the pwrdm desired target state */
-               trace_power_domain_target(pwrdm->name, pwrst,
-                                         smp_processor_id());
+               trace_power_domain_target_rcuidle(pwrdm->name, pwrst,
+                                                 smp_processor_id());
                /* Program the pwrdm desired target state */
                ret = arch_pwrdm->pwrdm_set_next_pwrst(pwrdm, pwrst);
        }
index 0ec2d00..eb350a6 100644 (file)
@@ -36,14 +36,7 @@ static struct powerdomain iva_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_IVA_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
-       .pwrsts_logic_ret = PWRSTS_OFF,
        .banks            = 4,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* hwa_mem */
-               [1] = PWRSTS_OFF_RET,   /* sl2_mem */
-               [2] = PWRSTS_OFF_RET,   /* tcm1_mem */
-               [3] = PWRSTS_OFF_RET,   /* tcm2_mem */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* hwa_mem */
                [1] = PWRSTS_ON,        /* sl2_mem */
@@ -76,12 +69,7 @@ static struct powerdomain ipu_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_IPU_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
-       .pwrsts_logic_ret = PWRSTS_OFF,
        .banks            = 2,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* aessmem */
-               [1] = PWRSTS_OFF_RET,   /* periphmem */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* aessmem */
                [1] = PWRSTS_ON,        /* periphmem */
@@ -95,11 +83,7 @@ static struct powerdomain dss_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_DSS_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
-       .pwrsts_logic_ret = PWRSTS_OFF,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* dss_mem */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* dss_mem */
        },
@@ -111,13 +95,8 @@ static struct powerdomain l4per_7xx_pwrdm = {
        .name             = "l4per_pwrdm",
        .prcm_offs        = DRA7XX_PRM_L4PER_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
-       .pwrsts           = PWRSTS_RET_ON,
-       .pwrsts_logic_ret = PWRSTS_RET,
+       .pwrsts           = PWRSTS_ON,
        .banks            = 2,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* nonretained_bank */
-               [1] = PWRSTS_OFF_RET,   /* retained_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* nonretained_bank */
                [1] = PWRSTS_ON,        /* retained_bank */
@@ -132,9 +111,6 @@ static struct powerdomain gpu_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* gpu_mem */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* gpu_mem */
        },
@@ -148,8 +124,6 @@ static struct powerdomain wkupaon_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* wkup_bank */
        },
@@ -161,15 +135,7 @@ static struct powerdomain core_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_CORE_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_ON,
-       .pwrsts_logic_ret = PWRSTS_RET,
        .banks            = 5,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* core_nret_bank */
-               [1] = PWRSTS_OFF_RET,   /* core_ocmram */
-               [2] = PWRSTS_OFF_RET,   /* core_other_bank */
-               [3] = PWRSTS_OFF_RET,   /* ipu_l2ram */
-               [4] = PWRSTS_OFF_RET,   /* ipu_unicache */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* core_nret_bank */
                [1] = PWRSTS_ON,        /* core_ocmram */
@@ -226,11 +192,7 @@ static struct powerdomain vpe_7xx_pwrdm = {
        .prcm_offs        = DRA7XX_PRM_VPE_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
-       .pwrsts_logic_ret = PWRSTS_OFF,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* vpe_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* vpe_bank */
        },
@@ -260,14 +222,8 @@ static struct powerdomain l3init_7xx_pwrdm = {
        .name             = "l3init_pwrdm",
        .prcm_offs        = DRA7XX_PRM_L3INIT_INST,
        .prcm_partition   = DRA7XX_PRM_PARTITION,
-       .pwrsts           = PWRSTS_RET_ON,
-       .pwrsts_logic_ret = PWRSTS_RET,
+       .pwrsts           = PWRSTS_ON,
        .banks            = 3,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* gmac_bank */
-               [1] = PWRSTS_OFF_RET,   /* l3init_bank1 */
-               [2] = PWRSTS_OFF_RET,   /* l3init_bank2 */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* gmac_bank */
                [1] = PWRSTS_ON,        /* l3init_bank1 */
@@ -283,9 +239,6 @@ static struct powerdomain eve3_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* eve3_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* eve3_bank */
        },
@@ -299,9 +252,6 @@ static struct powerdomain emu_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* emu_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* emu_bank */
        },
@@ -314,11 +264,6 @@ static struct powerdomain dsp2_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 3,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* dsp2_edma */
-               [1] = PWRSTS_OFF_RET,   /* dsp2_l1 */
-               [2] = PWRSTS_OFF_RET,   /* dsp2_l2 */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* dsp2_edma */
                [1] = PWRSTS_ON,        /* dsp2_l1 */
@@ -334,11 +279,6 @@ static struct powerdomain dsp1_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 3,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* dsp1_edma */
-               [1] = PWRSTS_OFF_RET,   /* dsp1_l1 */
-               [2] = PWRSTS_OFF_RET,   /* dsp1_l2 */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* dsp1_edma */
                [1] = PWRSTS_ON,        /* dsp1_l1 */
@@ -354,9 +294,6 @@ static struct powerdomain cam_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* vip_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* vip_bank */
        },
@@ -370,9 +307,6 @@ static struct powerdomain eve4_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* eve4_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* eve4_bank */
        },
@@ -386,9 +320,6 @@ static struct powerdomain eve2_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* eve2_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* eve2_bank */
        },
@@ -402,9 +333,6 @@ static struct powerdomain eve1_7xx_pwrdm = {
        .prcm_partition   = DRA7XX_PRM_PARTITION,
        .pwrsts           = PWRSTS_OFF_ON,
        .banks            = 1,
-       .pwrsts_mem_ret = {
-               [0] = PWRSTS_OFF_RET,   /* eve1_bank */
-       },
        .pwrsts_mem_on  = {
                [0] = PWRSTS_ON,        /* eve1_bank */
        },
index 5b385bb..cb9497a 100644 (file)
@@ -496,8 +496,7 @@ void __init omap_init_time(void)
        __omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
                        2, "timer_sys_ck", NULL, false);
 
-       if (of_have_populated_dt())
-               clocksource_probe();
+       clocksource_probe();
 }
 
 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX)
@@ -505,6 +504,8 @@ void __init omap3_secure_sync32k_timer_init(void)
 {
        __omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure",
                        2, "timer_sys_ck", NULL, false);
+
+       clocksource_probe();
 }
 #endif /* CONFIG_ARCH_OMAP3 */
 
@@ -513,6 +514,8 @@ void __init omap3_gptimer_timer_init(void)
 {
        __omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
                        1, "timer_sys_ck", "ti,timer-alwon", true);
+
+       clocksource_probe();
 }
 #endif
 
index 84baa16..e93aa67 100644 (file)
@@ -68,7 +68,7 @@
 #include <linux/platform_data/asoc-s3c.h>
 #include <linux/platform_data/spi-s3c64xx.h>
 
-static u64 samsung_device_dma_mask = DMA_BIT_MASK(32);
+#define samsung_device_dma_mask (*((u64[]) { DMA_BIT_MASK(32) }))
 
 /* AC97 */
 #ifdef CONFIG_CPU_S3C2440
index 7085e32..648a32c 100644 (file)
@@ -95,7 +95,7 @@ boot := arch/arm64/boot
 Image: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
-Image.%: vmlinux
+Image.%: Image
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
 zinstall install:
index 3a4e9a2..fbafa24 100644 (file)
                #size-cells = <1>;
                #interrupts-cells = <3>;
 
-               compatible = "arm,amba-bus";
+               compatible = "simple-bus";
                interrupt-parent = <&gic>;
                ranges;
 
index 46f325a..d7f8e06 100644 (file)
        };
 
        amba {
-               compatible = "arm,amba-bus";
+               compatible = "simple-bus";
                #address-cells = <2>;
                #size-cells = <2>;
                ranges;
index f69f69c..da84645 100644 (file)
@@ -38,25 +38,54 @@ extern int kgdb_fault_expected;
 #endif /* !__ASSEMBLY__ */
 
 /*
- * gdb is expecting the following registers layout.
+ * gdb remote procotol (well most versions of it) expects the following
+ * register layout.
  *
  * General purpose regs:
  *     r0-r30: 64 bit
  *     sp,pc : 64 bit
- *     pstate  : 64 bit
- *     Total: 34
+ *     pstate  : 32 bit
+ *     Total: 33 + 1
  * FPU regs:
  *     f0-f31: 128 bit
- *     Total: 32
- * Extra regs
  *     fpsr & fpcr: 32 bit
- *     Total: 2
+ *     Total: 32 + 2
  *
+ * To expand a little on the "most versions of it"... when the gdb remote
+ * protocol for AArch64 was developed it depended on a statement in the
+ * Architecture Reference Manual that claimed "SPSR_ELx is a 32-bit register".
+ * and, as a result, allocated only 32-bits for the PSTATE in the remote
+ * protocol. In fact this statement is still present in ARM DDI 0487A.i.
+ *
+ * Unfortunately "is a 32-bit register" has a very special meaning for
+ * system registers. It means that "the upper bits, bits[63:32], are
+ * RES0.". RES0 is heavily used in the ARM architecture documents as a
+ * way to leave space for future architecture changes. So to translate a
+ * little for people who don't spend their spare time reading ARM architecture
+ * manuals, what "is a 32-bit register" actually means in this context is
+ * "is a 64-bit register but one with no meaning allocated to any of the
+ * upper 32-bits... *yet*".
+ *
+ * Perhaps then we should not be surprised that this has led to some
+ * confusion. Specifically a patch, influenced by the above translation,
+ * that extended PSTATE to 64-bit was accepted into gdb-7.7 but the patch
+ * was reverted in gdb-7.8.1 and all later releases, when this was
+ * discovered to be an undocumented protocol change.
+ *
+ * So... it is *not* wrong for us to only allocate 32-bits to PSTATE
+ * here even though the kernel itself allocates 64-bits for the same
+ * state. That is because this bit of code tells the kernel how the gdb
+ * remote protocol (well most versions of it) describes the register state.
+ *
+ * Note that if you are using one of the versions of gdb that supports
+ * the gdb-7.7 version of the protocol you cannot use kgdb directly
+ * without providing a custom register description (gdb can load new
+ * protocol descriptions at runtime).
  */
 
-#define _GP_REGS               34
+#define _GP_REGS               33
 #define _FP_REGS               32
-#define _EXTRA_REGS            2
+#define _EXTRA_REGS            3
 /*
  * general purpose registers size in bytes.
  * pstate is only 4 bytes. subtract 4 bytes
index ff98585..d25f4f1 100644 (file)
@@ -26,7 +26,7 @@
 
 #define check_pgt_cache()              do { } while (0)
 
-#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
 #define PGD_SIZE       (PTRS_PER_PGD * sizeof(pgd_t))
 
 #if CONFIG_PGTABLE_LEVELS > 2
index 433e504..0226447 100644 (file)
@@ -124,6 +124,18 @@ static inline void cpu_panic_kernel(void)
        cpu_park_loop();
 }
 
+/*
+ * If a secondary CPU enters the kernel but fails to come online,
+ * (e.g. due to mismatched features), and cannot exit the kernel,
+ * we increment cpus_stuck_in_kernel and leave the CPU in a
+ * quiesecent loop within the kernel text. The memory containing
+ * this loop must not be re-used for anything else as the 'stuck'
+ * core is executing it.
+ *
+ * This function is used to inhibit features like kexec and hibernate.
+ */
+bool cpus_are_stuck_in_kernel(void);
+
 #endif /* ifndef __ASSEMBLY__ */
 
 #endif /* ifndef __ASM_SMP_H */
index fc9682b..e875a5a 100644 (file)
@@ -30,22 +30,53 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
 {
        unsigned int tmp;
        arch_spinlock_t lockval;
+       u32 owner;
+
+       /*
+        * Ensure prior spin_lock operations to other locks have completed
+        * on this CPU before we test whether "lock" is locked.
+        */
+       smp_mb();
+       owner = READ_ONCE(lock->owner) << 16;
 
        asm volatile(
 "      sevl\n"
 "1:    wfe\n"
 "2:    ldaxr   %w0, %2\n"
+       /* Is the lock free? */
 "      eor     %w1, %w0, %w0, ror #16\n"
-"      cbnz    %w1, 1b\n"
+"      cbz     %w1, 3f\n"
+       /* Lock taken -- has there been a subsequent unlock->lock transition? */
+"      eor     %w1, %w3, %w0, lsl #16\n"
+"      cbz     %w1, 1b\n"
+       /*
+        * The owner has been updated, so there was an unlock->lock
+        * transition that we missed. That means we can rely on the
+        * store-release of the unlock operation paired with the
+        * load-acquire of the lock operation to publish any of our
+        * previous stores to the new lock owner and therefore don't
+        * need to bother with the writeback below.
+        */
+"      b       4f\n"
+"3:\n"
+       /*
+        * Serialise against any concurrent lockers by writing back the
+        * unlocked lock value
+        */
        ARM64_LSE_ATOMIC_INSN(
        /* LL/SC */
 "      stxr    %w1, %w0, %2\n"
-"      cbnz    %w1, 2b\n", /* Serialise against any concurrent lockers */
-       /* LSE atomics */
 "      nop\n"
-"      nop\n")
+"      nop\n",
+       /* LSE atomics */
+"      mov     %w1, %w0\n"
+"      cas     %w0, %w0, %2\n"
+"      eor     %w1, %w1, %w0\n")
+       /* Somebody else wrote to the lock, GOTO 10 and reload the value */
+"      cbnz    %w1, 2b\n"
+"4:"
        : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
-       :
+       : "r" (owner)
        : "memory");
 }
 
@@ -148,6 +179,7 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
+       smp_mb(); /* See arch_spin_unlock_wait */
        return !arch_spin_value_unlocked(READ_ONCE(*lock));
 }
 
index f8df75d..21ab5df 100644 (file)
@@ -33,6 +33,7 @@
 #include <asm/pgtable.h>
 #include <asm/pgtable-hwdef.h>
 #include <asm/sections.h>
+#include <asm/smp.h>
 #include <asm/suspend.h>
 #include <asm/virt.h>
 
@@ -236,6 +237,11 @@ int swsusp_arch_suspend(void)
        unsigned long flags;
        struct sleep_stack_data state;
 
+       if (cpus_are_stuck_in_kernel()) {
+               pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
+               return -EBUSY;
+       }
+
        local_dbg_save(flags);
 
        if (__cpu_suspend_enter(&state)) {
index b67531a..b5f063e 100644 (file)
@@ -58,7 +58,17 @@ struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
        { "x30", 8, offsetof(struct pt_regs, regs[30])},
        { "sp", 8, offsetof(struct pt_regs, sp)},
        { "pc", 8, offsetof(struct pt_regs, pc)},
-       { "pstate", 8, offsetof(struct pt_regs, pstate)},
+       /*
+        * struct pt_regs thinks PSTATE is 64-bits wide but gdb remote
+        * protocol disagrees. Therefore we must extract only the lower
+        * 32-bits. Look for the big comment in asm/kgdb.h for more
+        * detail.
+        */
+       { "pstate", 4, offsetof(struct pt_regs, pstate)
+#ifdef CONFIG_CPU_BIG_ENDIAN
+                                                       + 4
+#endif
+       },
        { "v0", 16, -1 },
        { "v1", 16, -1 },
        { "v2", 16, -1 },
@@ -128,6 +138,8 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
        memset((char *)gdb_regs, 0, NUMREGBYTES);
        thread_regs = task_pt_regs(task);
        memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
+       /* Special case for PSTATE (check comments in asm/kgdb.h for details) */
+       dbg_get_reg(33, gdb_regs + GP_REG_BYTES, thread_regs);
 }
 
 void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
index 678e084..62ff3c0 100644 (file)
@@ -909,3 +909,21 @@ int setup_profiling_timer(unsigned int multiplier)
 {
        return -EINVAL;
 }
+
+static bool have_cpu_die(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+       int any_cpu = raw_smp_processor_id();
+
+       if (cpu_ops[any_cpu]->cpu_die)
+               return true;
+#endif
+       return false;
+}
+
+bool cpus_are_stuck_in_kernel(void)
+{
+       bool smp_spin_tables = (num_possible_cpus() > 1 && !have_cpu_die());
+
+       return !!cpus_stuck_in_kernel || smp_spin_tables;
+}
index f7cf463..2a43012 100644 (file)
@@ -64,8 +64,7 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
 
        /*
         * We need to switch to kernel mode so that we can use __get_user
-        * to safely read from kernel space.  Note that we now dump the
-        * code first, just in case the backtrace kills us.
+        * to safely read from kernel space.
         */
        fs = get_fs();
        set_fs(KERNEL_DS);
@@ -111,21 +110,12 @@ static void dump_backtrace_entry(unsigned long where)
        print_ip_sym(where);
 }
 
-static void dump_instr(const char *lvl, struct pt_regs *regs)
+static void __dump_instr(const char *lvl, struct pt_regs *regs)
 {
        unsigned long addr = instruction_pointer(regs);
-       mm_segment_t fs;
        char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
        int i;
 
-       /*
-        * We need to switch to kernel mode so that we can use __get_user
-        * to safely read from kernel space.  Note that we now dump the
-        * code first, just in case the backtrace kills us.
-        */
-       fs = get_fs();
-       set_fs(KERNEL_DS);
-
        for (i = -4; i < 1; i++) {
                unsigned int val, bad;
 
@@ -139,8 +129,18 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
                }
        }
        printk("%sCode: %s\n", lvl, str);
+}
 
-       set_fs(fs);
+static void dump_instr(const char *lvl, struct pt_regs *regs)
+{
+       if (!user_mode(regs)) {
+               mm_segment_t fs = get_fs();
+               set_fs(KERNEL_DS);
+               __dump_instr(lvl, regs);
+               set_fs(fs);
+       } else {
+               __dump_instr(lvl, regs);
+       }
 }
 
 static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
index b7b3978..efcf1f7 100644 (file)
@@ -179,7 +179,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
                                                 &asid_generation);
        flush_context(cpu);
 
-       /* We have at least 1 ASID per CPU, so this will always succeed */
+       /* We have more ASIDs than CPUs, so this will always succeed */
        asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
 
 set_asid:
@@ -227,8 +227,11 @@ switch_mm_fastpath:
 static int asids_init(void)
 {
        asid_bits = get_cpu_asid_bits();
-       /* If we end up with more CPUs than ASIDs, expect things to crash */
-       WARN_ON(NUM_USER_ASIDS < num_possible_cpus());
+       /*
+        * Expect allocation after rollover to fail if we don't have at least
+        * one more ASID than CPUs. ASID #0 is reserved for init_mm.
+        */
+       WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
        atomic64_set(&asid_generation, ASID_FIRST_VERSION);
        asid_map = kzalloc(BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(*asid_map),
                           GFP_KERNEL);
index ba3fc12..013e2cb 100644 (file)
@@ -441,7 +441,7 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
        return 1;
 }
 
-static struct fault_info {
+static const struct fault_info {
        int     (*fn)(unsigned long addr, unsigned int esr, struct pt_regs *regs);
        int     sig;
        int     code;
index dbd12ea..43a76b0 100644 (file)
@@ -71,10 +71,6 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
 {
        struct page *page = pte_page(pte);
 
-       /* no flushing needed for anonymous pages */
-       if (!page_mapping(page))
-               return;
-
        if (!test_and_set_bit(PG_dcache_clean, &page->flags))
                sync_icache_aliases(page_address(page),
                                    PAGE_SIZE << compound_order(page));
index 1aba19d..db039cb 100644 (file)
@@ -43,7 +43,7 @@ static inline void pgd_ctor(void *x)
  */
 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
 {
-       return quicklist_alloc(QUICK_PGD, GFP_KERNEL | __GFP_REPEAT, pgd_ctor);
+       return quicklist_alloc(QUICK_PGD, GFP_KERNEL, pgd_ctor);
 }
 
 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -54,7 +54,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+       return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -63,7 +63,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
        struct page *page;
        void *pg;
 
-       pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+       pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
        if (!pg)
                return NULL;
 
index 235ece4..42f1aff 100644 (file)
@@ -24,14 +24,14 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return pte;
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        struct page *pte;
-       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+       pte = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
        if (!pte)
                return NULL;
        if (!pgtable_page_ctor(pte)) {
index 41907d2..c9ed14f 100644 (file)
@@ -22,7 +22,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
        if (pte)
                clear_page(pte);
        return pte;
@@ -33,9 +33,9 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
        struct page *page;
 
 #ifdef CONFIG_HIGHPTE
-       page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+       page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
 #else
-       page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+       page = alloc_pages(GFP_KERNEL, 0);
 #endif
        if (!page)
                return NULL;
index 77da3b0..eeebf86 100644 (file)
@@ -64,7 +64,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
 {
        struct page *pte;
 
-       pte = alloc_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+       pte = alloc_page(GFP_KERNEL | __GFP_ZERO);
        if (!pte)
                return NULL;
        if (!pgtable_page_ctor(pte)) {
@@ -78,7 +78,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       gfp_t flags =  GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
+       gfp_t flags =  GFP_KERNEL | __GFP_ZERO;
        return (pte_t *) __get_free_page(flags);
 }
 
index f80758c..e109ee9 100644 (file)
@@ -45,7 +45,7 @@ config IA64
        select GENERIC_SMP_IDLE_THREAD
        select ARCH_INIT_TASK
        select ARCH_TASK_STRUCT_ALLOCATOR
-       select ARCH_THREAD_INFO_ALLOCATOR
+       select ARCH_THREAD_STACK_ALLOCATOR
        select ARCH_CLOCKSOURCE_DATA
        select GENERIC_TIME_VSYSCALL_OLD
        select SYSCTL_ARCH_UNALIGN_NO_WARN
index aa995b6..d1212b8 100644 (file)
@@ -48,15 +48,15 @@ struct thread_info {
 #ifndef ASM_OFFSETS_C
 /* how to get the thread information struct from C */
 #define current_thread_info()  ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
-#define alloc_thread_info_node(tsk, node)      \
-               ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#define alloc_thread_stack_node(tsk, node)     \
+               ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
 #define task_thread_info(tsk)  ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
 #else
 #define current_thread_info()  ((struct thread_info *) 0)
-#define alloc_thread_info_node(tsk, node)      ((struct thread_info *) 0)
+#define alloc_thread_stack_node(tsk, node)     ((unsigned long *) 0)
 #define task_thread_info(tsk)  ((struct thread_info *) 0)
 #endif
-#define free_thread_info(ti)   /* nothing */
+#define free_thread_stack(ti)  /* nothing */
 #define task_stack_page(tsk)   ((void *)(tsk))
 
 #define __HAVE_THREAD_FUNCTIONS
index f9efe97..0eaa89f 100644 (file)
@@ -26,6 +26,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
  * handled. This is done by having a special ".data..init_task" section...
  */
 #define init_thread_info       init_task_mem.s.thread_info
+#define init_stack             init_task_mem.stack
 
 union {
        struct {
index f9924fb..fb95aed 100644 (file)
@@ -14,7 +14,7 @@ extern const char bad_pmd_string[];
 extern inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
        unsigned long address)
 {
-       unsigned long page = __get_free_page(GFP_DMA|__GFP_REPEAT);
+       unsigned long page = __get_free_page(GFP_DMA);
 
        if (!page)
                return NULL;
@@ -51,7 +51,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
 static inline struct page *pte_alloc_one(struct mm_struct *mm,
        unsigned long address)
 {
-       struct page *page = alloc_pages(GFP_DMA|__GFP_REPEAT, 0);
+       struct page *page = alloc_pages(GFP_DMA, 0);
        pte_t *pte;
 
        if (!page)
index 24bcba4..c895b98 100644 (file)
@@ -11,7 +11,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long ad
 {
        pte_t *pte;
 
-       pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        if (pte) {
                __flush_page_to_ram(pte);
                flush_tlb_kernel_page(pte);
@@ -32,7 +32,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addres
        struct page *page;
        pte_t *pte;
 
-       page = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
+       page = alloc_pages(GFP_KERNEL|__GFP_ZERO, 0);
        if(!page)
                return NULL;
        if (!pgtable_page_ctor(page)) {
index 0931388..1901f61 100644 (file)
@@ -37,7 +37,7 @@ do {                                                  \
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       unsigned long page = __get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       unsigned long page = __get_free_page(GFP_KERNEL);
 
        if (!page)
                return NULL;
@@ -49,7 +49,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
                                        unsigned long address)
 {
-        struct page *page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+        struct page *page = alloc_pages(GFP_KERNEL, 0);
 
        if (page == NULL)
                return NULL;
index 3104df0..c2caa1e 100644 (file)
@@ -42,8 +42,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT |
-                                             __GFP_ZERO);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
        return pte;
 }
 
@@ -51,7 +50,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
                                      unsigned long address)
 {
        struct page *pte;
-       pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+       pte = alloc_pages(GFP_KERNEL  | __GFP_ZERO, 0);
        if (!pte)
                return NULL;
        if (!pgtable_page_ctor(pte)) {
index 61436d6..7c89390 100644 (file)
@@ -116,9 +116,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
        struct page *ptepage;
 
 #ifdef CONFIG_HIGHPTE
-       int flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_REPEAT;
+       int flags = GFP_KERNEL | __GFP_HIGHMEM;
 #else
-       int flags = GFP_KERNEL | __GFP_REPEAT;
+       int flags = GFP_KERNEL;
 #endif
 
        ptepage = alloc_pages(flags, 0);
index 4f4520e..eb99fcc 100644 (file)
@@ -239,8 +239,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
        if (mem_init_done) {
-               pte = (pte_t *)__get_free_page(GFP_KERNEL |
-                                       __GFP_REPEAT | __GFP_ZERO);
+               pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
        } else {
                pte = (pte_t *)early_get_page();
                if (pte)
index 6733ac5..36a391d 100644 (file)
@@ -74,7 +74,7 @@
 #define KVM_GUEST_KUSEG                        0x00000000UL
 #define KVM_GUEST_KSEG0                        0x40000000UL
 #define KVM_GUEST_KSEG23               0x60000000UL
-#define KVM_GUEST_KSEGX(a)             ((_ACAST32_(a)) & 0x60000000)
+#define KVM_GUEST_KSEGX(a)             ((_ACAST32_(a)) & 0xe0000000)
 #define KVM_GUEST_CPHYSADDR(a)         ((_ACAST32_(a)) & 0x1fffffff)
 
 #define KVM_GUEST_CKSEG0ADDR(a)                (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
@@ -338,6 +338,7 @@ struct kvm_mips_tlb {
 #define KVM_MIPS_GUEST_TLB_SIZE        64
 struct kvm_vcpu_arch {
        void *host_ebase, *guest_ebase;
+       int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
        unsigned long host_stack;
        unsigned long host_gp;
 
index b336037..93c079a 100644 (file)
@@ -69,7 +69,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
 
-       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER);
+       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
 
        return pte;
 }
@@ -79,7 +79,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
 {
        struct page *pte;
 
-       pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+       pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
        if (!pte)
                return NULL;
        clear_highpage(pte);
@@ -113,7 +113,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        pmd_t *pmd;
 
-       pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER);
+       pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
        if (pmd)
                pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
        return pmd;
index 396df6e..645c8a1 100644 (file)
@@ -1636,6 +1636,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
                if (index < 0) {
                        vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
                        vcpu->arch.host_cp0_badvaddr = va;
+                       vcpu->arch.pc = curr_pc;
                        er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
                                                         vcpu);
                        preempt_enable();
@@ -1647,6 +1648,8 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
                         * invalid exception to the guest
                         */
                        if (!TLB_IS_VALID(*tlb, va)) {
+                               vcpu->arch.host_cp0_badvaddr = va;
+                               vcpu->arch.pc = curr_pc;
                                er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
                                                                run, vcpu);
                                preempt_enable();
@@ -1666,7 +1669,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
                        cache, op, base, arch->gprs[base], offset);
                er = EMULATE_FAIL;
                preempt_enable();
-               goto dont_update_pc;
+               goto done;
 
        }
 
@@ -1694,16 +1697,20 @@ skip_fault:
                kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
                        cache, op, base, arch->gprs[base], offset);
                er = EMULATE_FAIL;
-               preempt_enable();
-               goto dont_update_pc;
        }
 
        preempt_enable();
+done:
+       /* Rollback PC only if emulation was unsuccessful */
+       if (er == EMULATE_FAIL)
+               vcpu->arch.pc = curr_pc;
 
 dont_update_pc:
-       /* Rollback PC */
-       vcpu->arch.pc = curr_pc;
-done:
+       /*
+        * This is for exceptions whose emulation updates the PC, so do not
+        * overwrite the PC under any circumstances
+        */
+
        return er;
 }
 
index 4ab4bdf..2143884 100644 (file)
@@ -28,6 +28,7 @@
 #define MIPS_EXC_MAX                12
 /* XXXSL More to follow */
 
+extern char __kvm_mips_vcpu_run_end[];
 extern char mips32_exception[], mips32_exceptionEnd[];
 extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
 
index 3ef0300..828fcfc 100644 (file)
@@ -202,6 +202,7 @@ FEXPORT(__kvm_mips_load_k0k1)
 
        /* Jump to guest */
        eret
+EXPORT(__kvm_mips_vcpu_run_end)
 
 VECTOR(MIPSX(exception), unknown)
 /* Find out what mode we came from and jump to the proper handler. */
index dc052fb..44da525 100644 (file)
@@ -315,6 +315,15 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
        memcpy(gebase + offset, mips32_GuestException,
               mips32_GuestExceptionEnd - mips32_GuestException);
 
+#ifdef MODULE
+       offset += mips32_GuestExceptionEnd - mips32_GuestException;
+       memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
+              __kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
+       vcpu->arch.vcpu_run = gebase + offset;
+#else
+       vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
+#endif
+
        /* Invalidate the icache for these ranges */
        local_flush_icache_range((unsigned long)gebase,
                                (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
@@ -404,7 +413,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
        /* Disable hardware page table walking while in guest */
        htw_stop();
 
-       r = __kvm_mips_vcpu_run(run, vcpu);
+       r = vcpu->arch.vcpu_run(run, vcpu);
 
        /* Re-enable HTW before enabling interrupts */
        htw_start();
index 4861a78..f5f90bb 100644 (file)
@@ -115,7 +115,7 @@ static inline unsigned long current_stack_pointer(void)
 }
 
 #ifndef CONFIG_KGDB
-void arch_release_thread_info(struct thread_info *ti);
+void arch_release_thread_stack(unsigned long *stack);
 #endif
 #define get_thread_info(ti)    get_task_struct((ti)->task)
 #define put_thread_info(ti)    put_task_struct((ti)->task)
index 9977082..2d7986c 100644 (file)
@@ -397,8 +397,9 @@ static bool kgdb_arch_undo_singlestep(struct pt_regs *regs)
  * single-step state is cleared.  At this point the breakpoints should have
  * been removed by __switch_to().
  */
-void arch_release_thread_info(struct thread_info *ti)
+void arch_release_thread_stack(unsigned long *stack)
 {
+       struct thread_info *ti = (void *)stack;
        if (kgdb_sstep_thread == ti) {
                kgdb_sstep_thread = NULL;
 
index e77a7c7..9577cf7 100644 (file)
@@ -63,7 +63,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL);
        if (pte)
                clear_page(pte);
        return pte;
@@ -74,9 +74,9 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
        struct page *pte;
 
 #ifdef CONFIG_HIGHPTE
-       pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
+       pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM, 0);
 #else
-       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+       pte = alloc_pages(GFP_KERNEL, 0);
 #endif
        if (!pte)
                return NULL;
index 6e2985e..bb47d08 100644 (file)
@@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
 
-       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
-                                       PTE_ORDER);
+       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
 
        return pte;
 }
@@ -53,7 +52,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
 {
        struct page *pte;
 
-       pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+       pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
        if (pte) {
                if (!pgtable_page_ctor(pte)) {
                        __free_page(pte);
index 21484e5..87eebd1 100644 (file)
@@ -77,7 +77,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
                                         unsigned long address)
 {
        struct page *pte;
-       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
+       pte = alloc_pages(GFP_KERNEL, 0);
        if (!pte)
                return NULL;
        clear_page(page_address(pte));
index 62b08ef..5b2a951 100644 (file)
@@ -122,7 +122,7 @@ pte_t __init_refok *pte_alloc_one_kernel(struct mm_struct *mm,
        pte_t *pte;
 
        if (likely(mem_init_done)) {
-               pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_REPEAT);
+               pte = (pte_t *) __get_free_page(GFP_KERNEL);
        } else {
                pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
 #if 0
index f2fd327..f08dda3 100644 (file)
@@ -63,8 +63,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
-                                              PMD_ORDER);
+       pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL, PMD_ORDER);
        if (pmd)
                memset(pmd, 0, PAGE_SIZE<<PMD_ORDER);
        return pmd;
@@ -124,7 +123,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
 static inline pgtable_t
 pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       struct page *page = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       struct page *page = alloc_page(GFP_KERNEL|__GFP_ZERO);
        if (!page)
                return NULL;
        if (!pgtable_page_ctor(page)) {
@@ -137,7 +136,7 @@ pte_alloc_one(struct mm_struct *mm, unsigned long address)
 static inline pte_t *
 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
 {
-       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return pte;
 }
 
index 01f7464..0a9d439 100644 (file)
@@ -128,7 +128,7 @@ config PPC
        select IRQ_FORCED_THREADING
        select HAVE_RCU_TABLE_FREE if SMP
        select HAVE_SYSCALL_TRACEPOINTS
-       select HAVE_CBPF_JIT
+       select HAVE_CBPF_JIT if CPU_BIG_ENDIAN
        select HAVE_ARCH_JUMP_LABEL
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
        select ARCH_HAS_GCOV_PROFILE_ALL
index a235019..8e21bb4 100644 (file)
@@ -102,7 +102,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
-       tlb_flush_pgtable(tlb, address);
        pgtable_page_dtor(table);
        pgtable_free_tlb(tlb, page_address(table), 0);
 }
index 290157e..74839f2 100644 (file)
@@ -88,6 +88,7 @@
 #define HPTE_R_RPN_SHIFT       12
 #define HPTE_R_RPN             ASM_CONST(0x0ffffffffffff000)
 #define HPTE_R_PP              ASM_CONST(0x0000000000000003)
+#define HPTE_R_PPP             ASM_CONST(0x8000000000000003)
 #define HPTE_R_N               ASM_CONST(0x0000000000000004)
 #define HPTE_R_G               ASM_CONST(0x0000000000000008)
 #define HPTE_R_M               ASM_CONST(0x0000000000000010)
index 488279e..cd5e7aa 100644 (file)
@@ -41,7 +41,7 @@ extern struct kmem_cache *pgtable_cache[];
                        pgtable_cache[(shift) - 1];     \
                })
 
-#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
 
 extern pte_t *pte_fragment_alloc(struct mm_struct *, unsigned long, int);
 extern void pte_fragment_free(unsigned long *, int);
@@ -56,7 +56,7 @@ static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm)
        return (pgd_t *)__get_free_page(PGALLOC_GFP);
 #else
        struct page *page;
-       page = alloc_pages(PGALLOC_GFP, 4);
+       page = alloc_pages(PGALLOC_GFP | __GFP_REPEAT, 4);
        if (!page)
                return NULL;
        return (pgd_t *) page_address(page);
@@ -93,8 +93,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -110,13 +109,17 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
                                   unsigned long address)
 {
+       /*
+        * By now all the pud entries should be none entries. So go
+        * ahead and flush the page walk cache
+        */
+       flush_tlb_pgtable(tlb, address);
         pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
 }
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
@@ -127,6 +130,11 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
                                   unsigned long address)
 {
+       /*
+        * By now all the pud entries should be none entries. So go
+        * ahead and flush the page walk cache
+        */
+       flush_tlb_pgtable(tlb, address);
         return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX);
 }
 
@@ -151,7 +159,7 @@ static inline pgtable_t pmd_pgtable(pmd_t pmd)
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+       return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -198,7 +206,11 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
 static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
                                  unsigned long address)
 {
-       tlb_flush_pgtable(tlb, address);
+       /*
+        * By now all the pud entries should be none entries. So go
+        * ahead and flush the page walk cache
+        */
+       flush_tlb_pgtable(tlb, address);
        pgtable_free_tlb(tlb, table, 0);
 }
 
index 937d4e2..df29422 100644 (file)
@@ -228,5 +228,20 @@ extern void radix__vmemmap_remove_mapping(unsigned long start,
 
 extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
                                 pgprot_t flags, unsigned int psz);
+
+static inline unsigned long radix__get_tree_size(void)
+{
+       unsigned long rts_field;
+       /*
+        * we support 52 bits, hence 52-31 = 21, 0b10101
+        * RTS encoding details
+        * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
+        * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
+        */
+       rts_field = (0x5UL << 5); /* 6 - 8 bits */
+       rts_field |= (0x2UL << 61);
+
+       return rts_field;
+}
 #endif /* __ASSEMBLY__ */
 #endif
index 13ef388..3fa94fc 100644 (file)
@@ -18,16 +18,19 @@ extern void radix__local_flush_tlb_mm(struct mm_struct *mm);
 extern void radix__local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 extern void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                                    unsigned long ap, int nid);
+extern void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
 extern void radix__tlb_flush(struct mmu_gather *tlb);
 #ifdef CONFIG_SMP
 extern void radix__flush_tlb_mm(struct mm_struct *mm);
 extern void radix__flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
 extern void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                              unsigned long ap, int nid);
+extern void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr);
 #else
 #define radix__flush_tlb_mm(mm)                radix__local_flush_tlb_mm(mm)
 #define radix__flush_tlb_page(vma,addr)        radix__local_flush_tlb_page(vma,addr)
 #define radix___flush_tlb_page(mm,addr,p,i)    radix___local_flush_tlb_page(mm,addr,p,i)
+#define radix__flush_tlb_pwc(tlb, addr)        radix__local_flush_tlb_pwc(tlb, addr)
 #endif
 
 #endif
index d98424a..96e5769 100644 (file)
@@ -72,5 +72,19 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
 #define flush_tlb_mm(mm)               local_flush_tlb_mm(mm)
 #define flush_tlb_page(vma, addr)      local_flush_tlb_page(vma, addr)
 #endif /* CONFIG_SMP */
+/*
+ * flush the page walk cache for the address
+ */
+static inline void flush_tlb_pgtable(struct mmu_gather *tlb, unsigned long address)
+{
+       /*
+        * Flush the page table walk cache on freeing a page table. We already
+        * have marked the upper/higher level page table entry none by now.
+        * So it is safe to flush PWC here.
+        */
+       if (!radix_enabled())
+               return;
 
+       radix__flush_tlb_pwc(tlb, address);
+}
 #endif /*  _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */
index 54f591e..c0a69ae 100644 (file)
@@ -4,11 +4,6 @@
 #include <linux/mm.h>
 
 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
-static inline void tlb_flush_pgtable(struct mmu_gather *tlb,
-                                    unsigned long address)
-{
-
-}
 
 #ifdef CONFIG_PPC64
 #include <asm/book3s/64/pgalloc.h>
index 069369f..897d2e1 100644 (file)
@@ -57,8 +57,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), GFP_KERNEL);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -88,7 +87,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
+       return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -190,8 +189,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX),
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), GFP_KERNEL);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
index 2714a3b..b5f73cb 100644 (file)
@@ -642,7 +642,6 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
                if (pe->type & EEH_PE_VF) {
                        eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
                } else {
-                       eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
                        pci_lock_rescan_remove();
                        pci_hp_remove_devices(bus);
                        pci_unlock_rescan_remove();
@@ -692,10 +691,12 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
                 */
                edev = list_first_entry(&pe->edevs, struct eeh_dev, list);
                eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
-               if (pe->type & EEH_PE_VF)
+               if (pe->type & EEH_PE_VF) {
                        eeh_add_virt_device(edev, NULL);
-               else
+               } else {
+                       eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
                        pci_hp_add_devices(bus);
+               }
        } else if (frozen_bus && rmv_data->removed) {
                pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
                ssleep(5);
index 4c94406..8bcc1b4 100644 (file)
@@ -1399,11 +1399,12 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_RADIX)
        lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
 
        mtlr    r10
-BEGIN_MMU_FTR_SECTION
-       b       2f
-END_MMU_FTR_SECTION_IFSET(MMU_FTR_RADIX)
        andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
+BEGIN_MMU_FTR_SECTION
        beq-    2f
+FTR_SECTION_ELSE
+       b       2f
+ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_RADIX)
 
 .machine       push
 .machine       "power4"
index 40e05e7..f8a871a 100644 (file)
@@ -316,8 +316,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
                        DBG_LOW(" -> hit\n");
                        /* Update the HPTE */
                        hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
-                                               ~(HPTE_R_PP | HPTE_R_N)) |
-                                              (newpp & (HPTE_R_PP | HPTE_R_N |
+                                               ~(HPTE_R_PPP | HPTE_R_N)) |
+                                              (newpp & (HPTE_R_PPP | HPTE_R_N |
                                                         HPTE_R_C)));
                }
                native_unlock_hpte(hptep);
@@ -385,8 +385,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
 
        /* Update the HPTE */
        hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
-                       ~(HPTE_R_PP | HPTE_R_N)) |
-               (newpp & (HPTE_R_PP | HPTE_R_N)));
+                               ~(HPTE_R_PPP | HPTE_R_N)) |
+                              (newpp & (HPTE_R_PPP | HPTE_R_N)));
        /*
         * Ensure it is out of the tlb too. Bolted entries base and
         * actual page size will be same.
index b2740c6..5b22ba0 100644 (file)
@@ -201,9 +201,8 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
        /*
         * We can't allow hardware to update hpte bits. Hence always
         * set 'R' bit and set 'C' if it is a write fault
-        * Memory coherence is always enabled
         */
-       rflags |=  HPTE_R_R | HPTE_R_M;
+       rflags |=  HPTE_R_R;
 
        if (pteflags & _PAGE_DIRTY)
                rflags |= HPTE_R_C;
@@ -213,10 +212,15 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
 
        if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_TOLERANT)
                rflags |= HPTE_R_I;
-       if ((pteflags & _PAGE_CACHE_CTL ) == _PAGE_NON_IDEMPOTENT)
+       else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
                rflags |= (HPTE_R_I | HPTE_R_G);
-       if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
-               rflags |= (HPTE_R_I | HPTE_R_W);
+       else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
+               rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
+       else
+               /*
+                * Add memory coherence if cache inhibited is not set
+                */
+               rflags |= HPTE_R_M;
 
        return rflags;
 }
index 5aac1a3..119d186 100644 (file)
@@ -73,7 +73,7 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
        cachep = PGT_CACHE(pdshift - pshift);
 #endif
 
-       new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
+       new = kmem_cache_zalloc(cachep, GFP_KERNEL);
 
        BUG_ON(pshift > HUGEPD_SHIFT_MASK);
        BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
index 227b2a6..1962222 100644 (file)
@@ -65,7 +65,7 @@ static int radix__init_new_context(struct mm_struct *mm, int index)
        /*
         * set the process table entry,
         */
-       rts_field = 3ull << PPC_BITLSHIFT(2);
+       rts_field = radix__get_tree_size();
        process_tb[index].prtb0 = cpu_to_be64(rts_field | __pa(mm->pgd) | RADIX_PGD_INDEX_SIZE);
        return 0;
 }
index c939e6e..e58707d 100644 (file)
@@ -160,9 +160,8 @@ redo:
        process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT);
        /*
         * Fill in the process table.
-        * we support 52 bits, hence 52-28 = 24, 11000
         */
-       rts_field = 3ull << PPC_BITLSHIFT(2);
+       rts_field = radix__get_tree_size();
        process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
        /*
         * Fill in the partition table. We are suppose to use effective address
@@ -176,10 +175,8 @@ redo:
 static void __init radix_init_partition_table(void)
 {
        unsigned long rts_field;
-       /*
-        * we support 52 bits, hence 52-28 = 24, 11000
-        */
-       rts_field = 3ull << PPC_BITLSHIFT(2);
+
+       rts_field = radix__get_tree_size();
 
        BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 24), "Partition table size too large.");
        partition_tb = early_alloc_pgtable(1UL << PATB_SIZE_SHIFT);
index bf7bf32..7f922f5 100644 (file)
@@ -84,7 +84,7 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add
        pte_t *pte;
 
        if (slab_is_available()) {
-               pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+               pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        } else {
                pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
                if (pte)
@@ -97,7 +97,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        struct page *ptepage;
 
-       gfp_t flags = GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO;
+       gfp_t flags = GFP_KERNEL | __GFP_ZERO;
 
        ptepage = alloc_pages(flags, 0);
        if (!ptepage)
index e009e06..f5e8d4e 100644 (file)
@@ -350,8 +350,7 @@ static pte_t *get_from_cache(struct mm_struct *mm)
 static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
 {
        void *ret = NULL;
-       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
-                                      __GFP_REPEAT | __GFP_ZERO);
+       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
        if (!page)
                return NULL;
        if (!kernel && !pgtable_page_ctor(page)) {
index 54efba2..ab2f60e 100644 (file)
 
 static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 
-static inline void __tlbiel_pid(unsigned long pid, int set)
+#define RIC_FLUSH_TLB 0
+#define RIC_FLUSH_PWC 1
+#define RIC_FLUSH_ALL 2
+
+static inline void __tlbiel_pid(unsigned long pid, int set,
+                               unsigned long ric)
 {
-       unsigned long rb,rs,ric,prs,r;
+       unsigned long rb,rs,prs,r;
 
        rb = PPC_BIT(53); /* IS = 1 */
        rb |= set << PPC_BITLSHIFT(51);
        rs = ((unsigned long)pid) << PPC_BITLSHIFT(31);
        prs = 1; /* process scoped */
        r = 1;   /* raidx format */
-       ric = 2;  /* invalidate all the caches */
 
        asm volatile("ptesync": : :"memory");
        asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@@ -39,25 +43,24 @@ static inline void __tlbiel_pid(unsigned long pid, int set)
 /*
  * We use 128 set in radix mode and 256 set in hpt mode.
  */
-static inline void _tlbiel_pid(unsigned long pid)
+static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
 {
        int set;
 
        for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
-               __tlbiel_pid(pid, set);
+               __tlbiel_pid(pid, set, ric);
        }
        return;
 }
 
-static inline void _tlbie_pid(unsigned long pid)
+static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
 {
-       unsigned long rb,rs,ric,prs,r;
+       unsigned long rb,rs,prs,r;
 
        rb = PPC_BIT(53); /* IS = 1 */
        rs = pid << PPC_BITLSHIFT(31);
        prs = 1; /* process scoped */
        r = 1;   /* raidx format */
-       ric = 2;  /* invalidate all the caches */
 
        asm volatile("ptesync": : :"memory");
        asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@@ -67,16 +70,15 @@ static inline void _tlbie_pid(unsigned long pid)
 }
 
 static inline void _tlbiel_va(unsigned long va, unsigned long pid,
-                             unsigned long ap)
+                             unsigned long ap, unsigned long ric)
 {
-       unsigned long rb,rs,ric,prs,r;
+       unsigned long rb,rs,prs,r;
 
        rb = va & ~(PPC_BITMASK(52, 63));
        rb |= ap << PPC_BITLSHIFT(58);
        rs = pid << PPC_BITLSHIFT(31);
        prs = 1; /* process scoped */
        r = 1;   /* raidx format */
-       ric = 0;  /* no cluster flush yet */
 
        asm volatile("ptesync": : :"memory");
        asm volatile(".long 0x7c000224 | (%0 << 11) | (%1 << 16) |"
@@ -86,16 +88,15 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
 }
 
 static inline void _tlbie_va(unsigned long va, unsigned long pid,
-                            unsigned long ap)
+                            unsigned long ap, unsigned long ric)
 {
-       unsigned long rb,rs,ric,prs,r;
+       unsigned long rb,rs,prs,r;
 
        rb = va & ~(PPC_BITMASK(52, 63));
        rb |= ap << PPC_BITLSHIFT(58);
        rs = pid << PPC_BITLSHIFT(31);
        prs = 1; /* process scoped */
        r = 1;   /* raidx format */
-       ric = 0;  /* no cluster flush yet */
 
        asm volatile("ptesync": : :"memory");
        asm volatile(".long 0x7c000264 | (%0 << 11) | (%1 << 16) |"
@@ -122,11 +123,26 @@ void radix__local_flush_tlb_mm(struct mm_struct *mm)
        preempt_disable();
        pid = mm->context.id;
        if (pid != MMU_NO_CONTEXT)
-               _tlbiel_pid(pid);
+               _tlbiel_pid(pid, RIC_FLUSH_ALL);
        preempt_enable();
 }
 EXPORT_SYMBOL(radix__local_flush_tlb_mm);
 
+void radix__local_flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+       unsigned long pid;
+       struct mm_struct *mm = tlb->mm;
+
+       preempt_disable();
+
+       pid = mm->context.id;
+       if (pid != MMU_NO_CONTEXT)
+               _tlbiel_pid(pid, RIC_FLUSH_PWC);
+
+       preempt_enable();
+}
+EXPORT_SYMBOL(radix__local_flush_tlb_pwc);
+
 void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                            unsigned long ap, int nid)
 {
@@ -135,7 +151,7 @@ void radix___local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
        preempt_disable();
        pid = mm ? mm->context.id : 0;
        if (pid != MMU_NO_CONTEXT)
-               _tlbiel_va(vmaddr, pid, ap);
+               _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
        preempt_enable();
 }
 
@@ -172,16 +188,42 @@ void radix__flush_tlb_mm(struct mm_struct *mm)
 
                if (lock_tlbie)
                        raw_spin_lock(&native_tlbie_lock);
-               _tlbie_pid(pid);
+               _tlbie_pid(pid, RIC_FLUSH_ALL);
                if (lock_tlbie)
                        raw_spin_unlock(&native_tlbie_lock);
        } else
-               _tlbiel_pid(pid);
+               _tlbiel_pid(pid, RIC_FLUSH_ALL);
 no_context:
        preempt_enable();
 }
 EXPORT_SYMBOL(radix__flush_tlb_mm);
 
+void radix__flush_tlb_pwc(struct mmu_gather *tlb, unsigned long addr)
+{
+       unsigned long pid;
+       struct mm_struct *mm = tlb->mm;
+
+       preempt_disable();
+
+       pid = mm->context.id;
+       if (unlikely(pid == MMU_NO_CONTEXT))
+               goto no_context;
+
+       if (!mm_is_core_local(mm)) {
+               int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
+
+               if (lock_tlbie)
+                       raw_spin_lock(&native_tlbie_lock);
+               _tlbie_pid(pid, RIC_FLUSH_PWC);
+               if (lock_tlbie)
+                       raw_spin_unlock(&native_tlbie_lock);
+       } else
+               _tlbiel_pid(pid, RIC_FLUSH_PWC);
+no_context:
+       preempt_enable();
+}
+EXPORT_SYMBOL(radix__flush_tlb_pwc);
+
 void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
                       unsigned long ap, int nid)
 {
@@ -196,11 +238,11 @@ void radix___flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
 
                if (lock_tlbie)
                        raw_spin_lock(&native_tlbie_lock);
-               _tlbie_va(vmaddr, pid, ap);
+               _tlbie_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
                if (lock_tlbie)
                        raw_spin_unlock(&native_tlbie_lock);
        } else
-               _tlbiel_va(vmaddr, pid, ap);
+               _tlbiel_va(vmaddr, pid, ap, RIC_FLUSH_TLB);
 bail:
        preempt_enable();
 }
@@ -224,7 +266,7 @@ void radix__flush_tlb_kernel_range(unsigned long start, unsigned long end)
 
        if (lock_tlbie)
                raw_spin_lock(&native_tlbie_lock);
-       _tlbie_pid(0);
+       _tlbie_pid(0, RIC_FLUSH_ALL);
        if (lock_tlbie)
                raw_spin_unlock(&native_tlbie_lock);
 }
index 37b9017..ac82e8e 100644 (file)
@@ -245,6 +245,7 @@ struct kvm_vcpu_stat {
        u32 exit_stop_request;
        u32 exit_validity;
        u32 exit_instruction;
+       u32 exit_pei;
        u32 halt_successful_poll;
        u32 halt_attempted_poll;
        u32 halt_poll_invalid;
index 59215c5..7ec63b1 100644 (file)
@@ -649,6 +649,8 @@ static int cpumf_pmu_commit_txn(struct pmu *pmu)
 
 /* Performance monitoring unit for s390x */
 static struct pmu cpumf_pmu = {
+       .task_ctx_nr  = perf_sw_context,
+       .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
        .pmu_enable   = cpumf_pmu_enable,
        .pmu_disable  = cpumf_pmu_disable,
        .event_init   = cpumf_pmu_event_init,
@@ -708,12 +710,6 @@ static int __init cpumf_pmu_init(void)
                goto out;
        }
 
-       /* The CPU measurement counter facility does not have overflow
-        * interrupts to do sampling.  Sampling must be provided by
-        * external means, for example, by timers.
-        */
-       cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
-
        cpumf_pmu.attr_groups = cpumf_cf_event_group();
        rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
        if (rc) {
index 2e6b54e..2521571 100644 (file)
@@ -341,6 +341,8 @@ static int handle_mvpg_pei(struct kvm_vcpu *vcpu)
 
 static int handle_partial_execution(struct kvm_vcpu *vcpu)
 {
+       vcpu->stat.exit_pei++;
+
        if (vcpu->arch.sie_block->ipa == 0xb254)        /* MVPG */
                return handle_mvpg_pei(vcpu);
        if (vcpu->arch.sie_block->ipa >> 8 == 0xae)     /* SIGP */
index 6d8ec3a..43f2a2b 100644 (file)
@@ -61,6 +61,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "exit_external_request", VCPU_STAT(exit_external_request) },
        { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
        { "exit_instruction", VCPU_STAT(exit_instruction) },
+       { "exit_pei", VCPU_STAT(exit_pei) },
        { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
        { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
        { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
@@ -657,7 +658,7 @@ static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
                kvm->arch.model.cpuid = proc->cpuid;
                lowest_ibc = sclp.ibc >> 16 & 0xfff;
                unblocked_ibc = sclp.ibc & 0xfff;
-               if (lowest_ibc) {
+               if (lowest_ibc && proc->ibc) {
                        if (proc->ibc > unblocked_ibc)
                                kvm->arch.model.ibc = unblocked_ibc;
                        else if (proc->ibc < lowest_ibc)
index e8b5962..e2565d2 100644 (file)
@@ -169,7 +169,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
                        return table;
        }
        /* Allocate a fresh page */
-       page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
+       page = alloc_page(GFP_KERNEL);
        if (!page)
                return NULL;
        if (!pgtable_page_ctor(page)) {
index 4324b87..9f0ce0e 100644 (file)
@@ -437,7 +437,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
        pgste = pgste_get_lock(ptep);
        pgstev = pgste_val(pgste);
        pte = *ptep;
-       if (pte_swap(pte) &&
+       if (!reset && pte_swap(pte) &&
            ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
             (pgstev & _PGSTE_GPS_ZERO))) {
                ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
index 2e06765..49b012d 100644 (file)
@@ -42,8 +42,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 {
        pte_t *pte;
 
-       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO,
-                                       PTE_ORDER);
+       pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
 
        return pte;
 }
@@ -53,7 +52,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
 {
        struct page *pte;
 
-       pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+       pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
        if (!pte)
                return NULL;
        clear_highpage(pte);
index a33673b..f3f42c8 100644 (file)
@@ -34,7 +34,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                                          unsigned long address)
 {
-       return quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+       return quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
 }
 
 static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
@@ -43,7 +43,7 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
        struct page *page;
        void *pg;
 
-       pg = quicklist_alloc(QUICK_PT, GFP_KERNEL | __GFP_REPEAT, NULL);
+       pg = quicklist_alloc(QUICK_PT, GFP_KERNEL, NULL);
        if (!pg)
                return NULL;
        page = virt_to_page(pg);
index 26e03a1..a62bd86 100644 (file)
@@ -1,7 +1,7 @@
 #include <linux/mm.h>
 #include <linux/slab.h>
 
-#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
 
 static struct kmem_cache *pgd_cachep;
 #if PAGETABLE_LEVELS > 2
index 5e31871..3529f13 100644 (file)
@@ -41,8 +41,7 @@ static inline void __pud_populate(pud_t *pud, pmd_t *pmd)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(pgtable_cache,
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
@@ -52,8 +51,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return kmem_cache_alloc(pgtable_cache,
-                               GFP_KERNEL|__GFP_REPEAT);
+       return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
 }
 
 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
index 14bb0d5..aec508e 100644 (file)
@@ -2704,8 +2704,7 @@ void __flush_tlb_all(void)
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                            unsigned long address)
 {
-       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
-                                      __GFP_REPEAT | __GFP_ZERO);
+       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
        pte_t *pte = NULL;
 
        if (page)
@@ -2717,8 +2716,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 pgtable_t pte_alloc_one(struct mm_struct *mm,
                        unsigned long address)
 {
-       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK |
-                                      __GFP_REPEAT | __GFP_ZERO);
+       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
        if (!page)
                return NULL;
        if (!pgtable_page_ctor(page)) {
index 4b7cef9..c1467ac 100644 (file)
@@ -78,7 +78,7 @@ struct thread_info {
 
 #ifndef __ASSEMBLY__
 
-void arch_release_thread_info(struct thread_info *info);
+void arch_release_thread_stack(unsigned long *stack);
 
 /* How to get the thread information struct from C. */
 register unsigned long stack_pointer __asm__("sp");
index 6b705cc..a465d83 100644 (file)
@@ -73,8 +73,9 @@ void arch_cpu_idle(void)
 /*
  * Release a thread_info structure
  */
-void arch_release_thread_info(struct thread_info *info)
+void arch_release_thread_stack(unsigned long *stack)
 {
+       struct thread_info *info = (void *)stack;
        struct single_step_state *step_state = info->step_state;
 
        if (step_state) {
index 7bf2491..c4d5bf8 100644 (file)
@@ -231,7 +231,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
                               int order)
 {
-       gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
+       gfp_t flags = GFP_KERNEL|__GFP_ZERO;
        struct page *p;
        int i;
 
index b2a2dff..e7437ec 100644 (file)
@@ -204,7 +204,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
        pte_t *pte;
 
-       pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
        return pte;
 }
 
@@ -212,7 +212,7 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        struct page *pte;
 
-       pte = alloc_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
+       pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
        if (!pte)
                return NULL;
        if (!pgtable_page_ctor(pte)) {
index 2e02d13..2677579 100644 (file)
@@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
 #define pgd_alloc(mm)                  get_pgd_slow(mm)
 #define pgd_free(mm, pgd)              free_pgd_slow(mm, pgd)
 
-#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
 
 /*
  * Allocate one PTE table.
index 0a7b885..d9a94da 100644 (file)
@@ -2439,6 +2439,15 @@ config PCI_CNB20LE_QUIRK
 
 source "drivers/pci/Kconfig"
 
+config ISA_BUS
+       bool "ISA-style bus support on modern systems" if EXPERT
+       select ISA_BUS_API
+       help
+         Enables ISA-style drivers on modern systems. This is necessary to
+         support PC/104 devices on X86_64 platforms.
+
+         If unsure, say N.
+
 # x86_64 have no ISA slots, but can have ISA-style DMA.
 config ISA_DMA_API
        bool "ISA-style DMA support" if (X86_64 && EXPERT)
index 4421b5d..d1d1e50 100644 (file)
@@ -38,12 +38,11 @@ typedef u8 kprobe_opcode_t;
 #define RELATIVECALL_OPCODE 0xe8
 #define RELATIVE_ADDR_SIZE 4
 #define MAX_STACK_SIZE 64
-#define MIN_STACK_SIZE(ADDR)                                          \
-       (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
-                             THREAD_SIZE - (unsigned long)(ADDR)))    \
-        ? (MAX_STACK_SIZE)                                            \
-        : (((unsigned long)current_thread_info()) +                   \
-           THREAD_SIZE - (unsigned long)(ADDR)))
+#define CUR_STACK_SIZE(ADDR) \
+       (current_top_of_stack() - (unsigned long)(ADDR))
+#define MIN_STACK_SIZE(ADDR)                           \
+       (MAX_STACK_SIZE < CUR_STACK_SIZE(ADDR) ?        \
+        MAX_STACK_SIZE : CUR_STACK_SIZE(ADDR))
 
 #define flush_insn_slot(p)     do { } while (0)
 
index e0fbe7e..69e6286 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/irqbypass.h>
 #include <linux/hyperv.h>
 
+#include <asm/apic.h>
 #include <asm/pvclock-abi.h>
 #include <asm/desc.h>
 #include <asm/mtrr.h>
@@ -1368,4 +1369,14 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
 
 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
 
+static inline int kvm_cpu_get_apicid(int mps_cpu)
+{
+#ifdef CONFIG_X86_LOCAL_APIC
+       return __default_cpu_present_to_apicid(mps_cpu);
+#else
+       WARN_ON_ONCE(1);
+       return BAD_APICID;
+#endif
+}
+
 #endif /* _ASM_X86_KVM_HOST_H */
index bf7f8b5..574c23c 100644 (file)
@@ -81,7 +81,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
        struct page *page;
-       page = alloc_pages(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO, 0);
+       page = alloc_pages(GFP_KERNEL |  __GFP_ZERO, 0);
        if (!page)
                return NULL;
        if (!pgtable_pmd_page_ctor(page)) {
@@ -125,7 +125,7 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
 
 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
-       return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
+       return (pud_t *)get_zeroed_page(GFP_KERNEL);
 }
 
 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
index 7c247e7..0944218 100644 (file)
@@ -14,7 +14,7 @@ extern int kstack_depth_to_print;
 struct thread_info;
 struct stacktrace_ops;
 
-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
+typedef unsigned long (*walk_stack_t)(struct task_struct *task,
                                      unsigned long *stack,
                                      unsigned long bp,
                                      const struct stacktrace_ops *ops,
@@ -23,13 +23,13 @@ typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
                                      int *graph);
 
 extern unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task,
                    unsigned long *stack, unsigned long bp,
                    const struct stacktrace_ops *ops, void *data,
                    unsigned long *end, int *graph);
 
 extern unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task,
                       unsigned long *stack, unsigned long bp,
                       const struct stacktrace_ops *ops, void *data,
                       unsigned long *end, int *graph);
index 2bb25c3..ef8017c 100644 (file)
@@ -42,16 +42,14 @@ void printk_address(unsigned long address)
 static void
 print_ftrace_graph_addr(unsigned long addr, void *data,
                        const struct stacktrace_ops *ops,
-                       struct thread_info *tinfo, int *graph)
+                       struct task_struct *task, int *graph)
 {
-       struct task_struct *task;
        unsigned long ret_addr;
        int index;
 
        if (addr != (unsigned long)return_to_handler)
                return;
 
-       task = tinfo->task;
        index = task->curr_ret_stack;
 
        if (!task->ret_stack || index < *graph)
@@ -68,7 +66,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
 static inline void
 print_ftrace_graph_addr(unsigned long addr, void *data,
                        const struct stacktrace_ops *ops,
-                       struct thread_info *tinfo, int *graph)
+                       struct task_struct *task, int *graph)
 { }
 #endif
 
@@ -79,10 +77,10 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
  * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
  */
 
-static inline int valid_stack_ptr(struct thread_info *tinfo,
+static inline int valid_stack_ptr(struct task_struct *task,
                        void *p, unsigned int size, void *end)
 {
-       void *t = tinfo;
+       void *t = task_stack_page(task);
        if (end) {
                if (p < end && p >= (end-THREAD_SIZE))
                        return 1;
@@ -93,14 +91,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
 }
 
 unsigned long
-print_context_stack(struct thread_info *tinfo,
+print_context_stack(struct task_struct *task,
                unsigned long *stack, unsigned long bp,
                const struct stacktrace_ops *ops, void *data,
                unsigned long *end, int *graph)
 {
        struct stack_frame *frame = (struct stack_frame *)bp;
 
-       while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
+       while (valid_stack_ptr(task, stack, sizeof(*stack), end)) {
                unsigned long addr;
 
                addr = *stack;
@@ -112,7 +110,7 @@ print_context_stack(struct thread_info *tinfo,
                        } else {
                                ops->address(data, addr, 0);
                        }
-                       print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+                       print_ftrace_graph_addr(addr, data, ops, task, graph);
                }
                stack++;
        }
@@ -121,7 +119,7 @@ print_context_stack(struct thread_info *tinfo,
 EXPORT_SYMBOL_GPL(print_context_stack);
 
 unsigned long
-print_context_stack_bp(struct thread_info *tinfo,
+print_context_stack_bp(struct task_struct *task,
                       unsigned long *stack, unsigned long bp,
                       const struct stacktrace_ops *ops, void *data,
                       unsigned long *end, int *graph)
@@ -129,7 +127,7 @@ print_context_stack_bp(struct thread_info *tinfo,
        struct stack_frame *frame = (struct stack_frame *)bp;
        unsigned long *ret_addr = &frame->return_address;
 
-       while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
+       while (valid_stack_ptr(task, ret_addr, sizeof(*ret_addr), end)) {
                unsigned long addr = *ret_addr;
 
                if (!__kernel_text_address(addr))
@@ -139,7 +137,7 @@ print_context_stack_bp(struct thread_info *tinfo,
                        break;
                frame = frame->next_frame;
                ret_addr = &frame->return_address;
-               print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
+               print_ftrace_graph_addr(addr, data, ops, task, graph);
        }
 
        return (unsigned long)frame;
index 464ffd6..fef917e 100644 (file)
@@ -61,15 +61,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                bp = stack_frame(task, regs);
 
        for (;;) {
-               struct thread_info *context;
                void *end_stack;
 
                end_stack = is_hardirq_stack(stack, cpu);
                if (!end_stack)
                        end_stack = is_softirq_stack(stack, cpu);
 
-               context = task_thread_info(task);
-               bp = ops->walk_stack(context, stack, bp, ops, data,
+               bp = ops->walk_stack(task, stack, bp, ops, data,
                                     end_stack, &graph);
 
                /* Stop if not on irq stack */
index 5f1c626..d558a8a 100644 (file)
@@ -153,7 +153,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                const struct stacktrace_ops *ops, void *data)
 {
        const unsigned cpu = get_cpu();
-       struct thread_info *tinfo;
        unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu);
        unsigned long dummy;
        unsigned used = 0;
@@ -179,7 +178,6 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
         * current stack address. If the stacks consist of nested
         * exceptions
         */
-       tinfo = task_thread_info(task);
        while (!done) {
                unsigned long *stack_end;
                enum stack_type stype;
@@ -202,7 +200,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
                        if (ops->stack(data, id) < 0)
                                break;
 
-                       bp = ops->walk_stack(tinfo, stack, bp, ops,
+                       bp = ops->walk_stack(task, stack, bp, ops,
                                             data, stack_end, &graph);
                        ops->stack(data, "<EOE>");
                        /*
@@ -218,7 +216,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
 
                        if (ops->stack(data, "IRQ") < 0)
                                break;
-                       bp = ops->walk_stack(tinfo, stack, bp,
+                       bp = ops->walk_stack(task, stack, bp,
                                     ops, data, stack_end, &graph);
                        /*
                         * We link to the next stack (which would be
@@ -240,7 +238,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
        /*
         * This handles the process stack:
         */
-       bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
+       bp = ops->walk_stack(task, stack, bp, ops, data, NULL, &graph);
        put_cpu();
 }
 EXPORT_SYMBOL(dump_trace);
index 4d38416..04f89ca 100644 (file)
@@ -57,7 +57,7 @@
 # error "Need more than one PGD for the ESPFIX hack"
 #endif
 
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
 
 /* This contains the *bottom* address of the espfix stack */
 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
index 38da8f2..c627bf8 100644 (file)
@@ -130,11 +130,9 @@ void irq_ctx_init(int cpu)
 
 void do_softirq_own_stack(void)
 {
-       struct thread_info *curstk;
        struct irq_stack *irqstk;
        u32 *isp, *prev_esp;
 
-       curstk = current_stack();
        irqstk = __this_cpu_read(softirq_stack);
 
        /* build the stack frame on the softirq stack */
index 38cf7a7..7847e5c 100644 (file)
@@ -961,7 +961,19 @@ int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
                 * normal page fault.
                 */
                regs->ip = (unsigned long)cur->addr;
+               /*
+                * Trap flag (TF) has been set here because this fault
+                * happened where the single stepping will be done.
+                * So clear it by resetting the current kprobe:
+                */
+               regs->flags &= ~X86_EFLAGS_TF;
+
+               /*
+                * If the TF flag was set before the kprobe hit,
+                * don't touch it:
+                */
                regs->flags |= kcb->kprobe_old_flags;
+
                if (kcb->kprobe_status == KPROBE_REENTER)
                        restore_previous_kprobe(kcb);
                else
index 1163e81..16ef31b 100644 (file)
@@ -238,7 +238,9 @@ module_param(nested, int, S_IRUGO);
 
 /* enable / disable AVIC */
 static int avic;
+#ifdef CONFIG_X86_LOCAL_APIC
 module_param(avic, int, S_IRUGO);
+#endif
 
 static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
 static void svm_flush_tlb(struct kvm_vcpu *vcpu);
@@ -981,11 +983,14 @@ static __init int svm_hardware_setup(void)
        } else
                kvm_disable_tdp();
 
-       if (avic && (!npt_enabled || !boot_cpu_has(X86_FEATURE_AVIC)))
-               avic = false;
-
-       if (avic)
-               pr_info("AVIC enabled\n");
+       if (avic) {
+               if (!npt_enabled ||
+                   !boot_cpu_has(X86_FEATURE_AVIC) ||
+                   !IS_ENABLED(CONFIG_X86_LOCAL_APIC))
+                       avic = false;
+               else
+                       pr_info("AVIC enabled\n");
+       }
 
        return 0;
 
@@ -1324,7 +1329,7 @@ free_avic:
 static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
 {
        u64 entry;
-       int h_physical_id = __default_cpu_present_to_apicid(vcpu->cpu);
+       int h_physical_id = kvm_cpu_get_apicid(vcpu->cpu);
        struct vcpu_svm *svm = to_svm(vcpu);
 
        if (!kvm_vcpu_apicv_active(vcpu))
@@ -1349,7 +1354,7 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        u64 entry;
        /* ID = 0xff (broadcast), ID > 0xff (reserved) */
-       int h_physical_id = __default_cpu_present_to_apicid(cpu);
+       int h_physical_id = kvm_cpu_get_apicid(cpu);
        struct vcpu_svm *svm = to_svm(vcpu);
 
        if (!kvm_vcpu_apicv_active(vcpu))
@@ -4236,7 +4241,7 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
 
        if (avic_vcpu_is_running(vcpu))
                wrmsrl(SVM_AVIC_DOORBELL,
-                      __default_cpu_present_to_apicid(vcpu->cpu));
+                      kvm_cpu_get_apicid(vcpu->cpu));
        else
                kvm_vcpu_wake_up(vcpu);
 }
index fb93010..003618e 100644 (file)
@@ -2072,7 +2072,8 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
        unsigned int dest;
 
        if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
+               !kvm_vcpu_apicv_active(vcpu))
                return;
 
        do {
@@ -2180,7 +2181,8 @@ static void vmx_vcpu_pi_put(struct kvm_vcpu *vcpu)
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
 
        if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
+               !kvm_vcpu_apicv_active(vcpu))
                return;
 
        /* Set SN when the vCPU is preempted */
@@ -10714,7 +10716,8 @@ static int vmx_pre_block(struct kvm_vcpu *vcpu)
        struct pi_desc *pi_desc = vcpu_to_pi_desc(vcpu);
 
        if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
+               !kvm_vcpu_apicv_active(vcpu))
                return 0;
 
        vcpu->pre_pcpu = vcpu->cpu;
@@ -10780,7 +10783,8 @@ static void vmx_post_block(struct kvm_vcpu *vcpu)
        unsigned long flags;
 
        if (!kvm_arch_has_assigned_device(vcpu->kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP)  ||
+               !kvm_vcpu_apicv_active(vcpu))
                return;
 
        do {
@@ -10833,7 +10837,8 @@ static int vmx_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
        int idx, ret = -EINVAL;
 
        if (!kvm_arch_has_assigned_device(kvm) ||
-               !irq_remapping_cap(IRQ_POSTING_CAP))
+               !irq_remapping_cap(IRQ_POSTING_CAP) ||
+               !kvm_vcpu_apicv_active(kvm->vcpus[0]))
                return 0;
 
        idx = srcu_read_lock(&kvm->irq_srcu);
index 4eb287e..aa0ff4b 100644 (file)
@@ -6,7 +6,7 @@
 #include <asm/fixmap.h>
 #include <asm/mtrr.h>
 
-#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
+#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO
 
 #ifdef CONFIG_HIGHPTE
 #define PGALLOC_USER_GFP __GFP_HIGHMEM
index 6e7242b..b226b3f 100644 (file)
@@ -139,7 +139,7 @@ int __init efi_alloc_page_tables(void)
        if (efi_enabled(EFI_OLD_MEMMAP))
                return 0;
 
-       gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO;
+       gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
        efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
        if (!efi_pgd)
                return -ENOMEM;
index 478a2de..6743371 100644 (file)
@@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
 
        /* NOTE: The loop is more greedy than the cleanup_highmap variant.
         * We include the PMD passed in on _both_ boundaries. */
-       for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
+       for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
                        pmd++, vaddr += PMD_SIZE) {
                if (pmd_none(*pmd))
                        continue;
@@ -1551,41 +1551,6 @@ static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
 #endif
 }
 
-#ifdef CONFIG_X86_32
-static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
-{
-       /* If there's an existing pte, then don't allow _PAGE_RW to be set */
-       if (pte_val_ma(*ptep) & _PAGE_PRESENT)
-               pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
-                              pte_val_ma(pte));
-
-       return pte;
-}
-#else /* CONFIG_X86_64 */
-static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
-{
-       unsigned long pfn;
-
-       if (xen_feature(XENFEAT_writable_page_tables) ||
-           xen_feature(XENFEAT_auto_translated_physmap) ||
-           xen_start_info->mfn_list >= __START_KERNEL_map)
-               return pte;
-
-       /*
-        * Pages belonging to the initial p2m list mapped outside the default
-        * address range must be mapped read-only. This region contains the
-        * page tables for mapping the p2m list, too, and page tables MUST be
-        * mapped read-only.
-        */
-       pfn = pte_pfn(pte);
-       if (pfn >= xen_start_info->first_p2m_pfn &&
-           pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
-               pte = __pte_ma(pte_val_ma(pte) & ~_PAGE_RW);
-
-       return pte;
-}
-#endif /* CONFIG_X86_64 */
-
 /*
  * Init-time set_pte while constructing initial pagetables, which
  * doesn't allow RO page table pages to be remapped RW.
@@ -1600,13 +1565,37 @@ static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte)
  * so always write the PTE directly and rely on Xen trapping and
  * emulating any updates as necessary.
  */
-static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
+__visible pte_t xen_make_pte_init(pteval_t pte)
 {
-       if (pte_mfn(pte) != INVALID_P2M_ENTRY)
-               pte = mask_rw_pte(ptep, pte);
-       else
-               pte = __pte_ma(0);
+#ifdef CONFIG_X86_64
+       unsigned long pfn;
+
+       /*
+        * Pages belonging to the initial p2m list mapped outside the default
+        * address range must be mapped read-only. This region contains the
+        * page tables for mapping the p2m list, too, and page tables MUST be
+        * mapped read-only.
+        */
+       pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT;
+       if (xen_start_info->mfn_list < __START_KERNEL_map &&
+           pfn >= xen_start_info->first_p2m_pfn &&
+           pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames)
+               pte &= ~_PAGE_RW;
+#endif
+       pte = pte_pfn_to_mfn(pte);
+       return native_make_pte(pte);
+}
+PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init);
 
+static void __init xen_set_pte_init(pte_t *ptep, pte_t pte)
+{
+#ifdef CONFIG_X86_32
+       /* If there's an existing pte, then don't allow _PAGE_RW to be set */
+       if (pte_mfn(pte) != INVALID_P2M_ENTRY
+           && pte_val_ma(*ptep) & _PAGE_PRESENT)
+               pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) &
+                              pte_val_ma(pte));
+#endif
        native_set_pte(ptep, pte);
 }
 
@@ -2407,6 +2396,7 @@ static void __init xen_post_allocator_init(void)
        pv_mmu_ops.alloc_pud = xen_alloc_pud;
        pv_mmu_ops.release_pud = xen_release_pud;
 #endif
+       pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte);
 
 #ifdef CONFIG_X86_64
        pv_mmu_ops.write_cr3 = &xen_write_cr3;
@@ -2455,7 +2445,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
        .pte_val = PV_CALLEE_SAVE(xen_pte_val),
        .pgd_val = PV_CALLEE_SAVE(xen_pgd_val),
 
-       .make_pte = PV_CALLEE_SAVE(xen_make_pte),
+       .make_pte = PV_CALLEE_SAVE(xen_make_pte_init),
        .make_pgd = PV_CALLEE_SAVE(xen_make_pgd),
 
 #ifdef CONFIG_X86_PAE
index cab9f76..dd2a49a 100644 (file)
@@ -182,7 +182,7 @@ static void * __ref alloc_p2m_page(void)
        if (unlikely(!slab_is_available()))
                return alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
 
-       return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
+       return (void *)__get_free_page(GFP_KERNEL);
 }
 
 static void __ref free_p2m_page(void *p)
index d38eb92..1065bc8 100644 (file)
@@ -44,7 +44,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
        pte_t *ptep;
        int i;
 
-       ptep = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
+       ptep = (pte_t *)__get_free_page(GFP_KERNEL);
        if (!ptep)
                return NULL;
        for (i = 0; i < 1024; i++)
index a1d177d..21932d6 100644 (file)
@@ -108,7 +108,9 @@ acpi_ex_add_table(u32 table_index,
 
        /* Add the table to the namespace */
 
+       acpi_ex_exit_interpreter();
        status = acpi_ns_load_table(table_index, parent_node);
+       acpi_ex_enter_interpreter();
        if (ACPI_FAILURE(status)) {
                acpi_ut_remove_reference(obj_desc);
                *ddb_handle = NULL;
index daceb80..3b7fb99 100644 (file)
@@ -306,12 +306,6 @@ acpi_status acpi_hw_read(u32 *value, struct acpi_generic_address *reg)
 acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
 {
        u64 address;
-       u8 access_width;
-       u32 bit_width;
-       u8 bit_offset;
-       u64 value64;
-       u32 new_value32, old_value32;
-       u8 index;
        acpi_status status;
 
        ACPI_FUNCTION_NAME(hw_write);
@@ -323,145 +317,23 @@ acpi_status acpi_hw_write(u32 value, struct acpi_generic_address *reg)
                return (status);
        }
 
-       /* Convert access_width into number of bits based */
-
-       access_width = acpi_hw_get_access_bit_width(reg, 32);
-       bit_width = reg->bit_offset + reg->bit_width;
-       bit_offset = reg->bit_offset;
-
        /*
         * Two address spaces supported: Memory or IO. PCI_Config is
         * not supported here because the GAS structure is insufficient
         */
-       index = 0;
-       while (bit_width) {
-               /*
-                * Use offset style bit reads because "Index * AccessWidth" is
-                * ensured to be less than 32-bits by acpi_hw_validate_register().
-                */
-               new_value32 = ACPI_GET_BITS(&value, index * access_width,
-                                           ACPI_MASK_BITS_ABOVE_32
-                                           (access_width));
-
-               if (bit_offset >= access_width) {
-                       bit_offset -= access_width;
-               } else {
-                       /*
-                        * Use offset style bit masks because access_width is ensured
-                        * to be less than 32-bits by acpi_hw_validate_register() and
-                        * bit_offset/bit_width is less than access_width here.
-                        */
-                       if (bit_offset) {
-                               new_value32 &= ACPI_MASK_BITS_BELOW(bit_offset);
-                       }
-                       if (bit_width < access_width) {
-                               new_value32 &= ACPI_MASK_BITS_ABOVE(bit_width);
-                       }
-
-                       if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
-                               if (bit_offset || bit_width < access_width) {
-                                       /*
-                                        * Read old values in order not to modify the bits that
-                                        * are beyond the register bit_width/bit_offset setting.
-                                        */
-                                       status =
-                                           acpi_os_read_memory((acpi_physical_address)
-                                                               address +
-                                                               index *
-                                                               ACPI_DIV_8
-                                                               (access_width),
-                                                               &value64,
-                                                               access_width);
-                                       old_value32 = (u32)value64;
-
-                                       /*
-                                        * Use offset style bit masks because access_width is
-                                        * ensured to be less than 32-bits by
-                                        * acpi_hw_validate_register() and bit_offset/bit_width is
-                                        * less than access_width here.
-                                        */
-                                       if (bit_offset) {
-                                               old_value32 &=
-                                                   ACPI_MASK_BITS_ABOVE
-                                                   (bit_offset);
-                                               bit_offset = 0;
-                                       }
-                                       if (bit_width < access_width) {
-                                               old_value32 &=
-                                                   ACPI_MASK_BITS_BELOW
-                                                   (bit_width);
-                                       }
-
-                                       new_value32 |= old_value32;
-                               }
-
-                               value64 = (u64)new_value32;
-                               status =
-                                   acpi_os_write_memory((acpi_physical_address)
-                                                        address +
-                                                        index *
-                                                        ACPI_DIV_8
-                                                        (access_width),
-                                                        value64, access_width);
-                       } else {        /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
-
-                               if (bit_offset || bit_width < access_width) {
-                                       /*
-                                        * Read old values in order not to modify the bits that
-                                        * are beyond the register bit_width/bit_offset setting.
-                                        */
-                                       status =
-                                           acpi_hw_read_port((acpi_io_address)
-                                                             address +
-                                                             index *
-                                                             ACPI_DIV_8
-                                                             (access_width),
-                                                             &old_value32,
-                                                             access_width);
-
-                                       /*
-                                        * Use offset style bit masks because access_width is
-                                        * ensured to be less than 32-bits by
-                                        * acpi_hw_validate_register() and bit_offset/bit_width is
-                                        * less than access_width here.
-                                        */
-                                       if (bit_offset) {
-                                               old_value32 &=
-                                                   ACPI_MASK_BITS_ABOVE
-                                                   (bit_offset);
-                                               bit_offset = 0;
-                                       }
-                                       if (bit_width < access_width) {
-                                               old_value32 &=
-                                                   ACPI_MASK_BITS_BELOW
-                                                   (bit_width);
-                                       }
-
-                                       new_value32 |= old_value32;
-                               }
-
-                               status = acpi_hw_write_port((acpi_io_address)
-                                                           address +
-                                                           index *
-                                                           ACPI_DIV_8
-                                                           (access_width),
-                                                           new_value32,
-                                                           access_width);
-                       }
-               }
-
-               /*
-                * Index * access_width is ensured to be less than 32-bits by
-                * acpi_hw_validate_register().
-                */
-               bit_width -=
-                   bit_width > access_width ? access_width : bit_width;
-               index++;
+       if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+               status = acpi_os_write_memory((acpi_physical_address)
+                                             address, (u64)value,
+                                             reg->bit_width);
+       } else {                /* ACPI_ADR_SPACE_SYSTEM_IO, validated earlier */
+
+               status = acpi_hw_write_port((acpi_io_address)
+                                           address, value, reg->bit_width);
        }
 
        ACPI_DEBUG_PRINT((ACPI_DB_IO,
                          "Wrote: %8.8X width %2d   to %8.8X%8.8X (%s)\n",
-                         value, access_width, ACPI_FORMAT_UINT64(address),
+                         value, reg->bit_width, ACPI_FORMAT_UINT64(address),
                          acpi_ut_get_region_name(reg->space_id)));
 
        return (status);
index f631a47..1783cd7 100644 (file)
@@ -47,6 +47,7 @@
 #include "acparser.h"
 #include "acdispat.h"
 #include "actables.h"
+#include "acinterp.h"
 
 #define _COMPONENT          ACPI_NAMESPACE
 ACPI_MODULE_NAME("nsparse")
@@ -170,6 +171,8 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
 
        ACPI_FUNCTION_TRACE(ns_parse_table);
 
+       acpi_ex_enter_interpreter();
+
        /*
         * AML Parse, pass 1
         *
@@ -185,7 +188,7 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
        status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS1,
                                            table_index, start_node);
        if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
+               goto error_exit;
        }
 
        /*
@@ -201,8 +204,10 @@ acpi_ns_parse_table(u32 table_index, struct acpi_namespace_node *start_node)
        status = acpi_ns_one_complete_parse(ACPI_IMODE_LOAD_PASS2,
                                            table_index, start_node);
        if (ACPI_FAILURE(status)) {
-               return_ACPI_STATUS(status);
+               goto error_exit;
        }
 
+error_exit:
+       acpi_ex_exit_interpreter();
        return_ACPI_STATUS(status);
 }
index 61dc7a9..c6f0174 100644 (file)
@@ -606,7 +606,7 @@ void ata_scsi_error(struct Scsi_Host *host)
        ata_scsi_port_error_handler(host, ap);
 
        /* finish or retry handled scmd's and clean up */
-       WARN_ON(host->host_failed || !list_empty(&eh_work_q));
+       WARN_ON(!list_empty(&eh_work_q));
 
        DPRINTK("EXIT\n");
 }
index 6b2a84e..2609ba2 100644 (file)
@@ -10,7 +10,7 @@ obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
 obj-y                  += power/
 obj-$(CONFIG_HAS_DMA)  += dma-mapping.o
 obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
-obj-$(CONFIG_ISA)      += isa.o
+obj-$(CONFIG_ISA_BUS_API)      += isa.o
 obj-$(CONFIG_FW_LOADER)        += firmware_class.o
 obj-$(CONFIG_NUMA)     += node.o
 obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
index 91dba65..cd6ccdc 100644 (file)
@@ -180,4 +180,4 @@ static int __init isa_bus_init(void)
        return error;
 }
 
-device_initcall(isa_bus_init);
+postcore_initcall(isa_bus_init);
index db930d3..2a21578 100644 (file)
@@ -24,10 +24,12 @@ static char *make_driver_name(struct device_driver *drv)
 
 static void module_create_drivers_dir(struct module_kobject *mk)
 {
-       if (!mk || mk->drivers_dir)
-               return;
+       static DEFINE_MUTEX(drivers_dir_mutex);
 
-       mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
+       mutex_lock(&drivers_dir_mutex);
+       if (mk && !mk->drivers_dir)
+               mk->drivers_dir = kobject_create_and_add("drivers", &mk->kobj);
+       mutex_unlock(&drivers_dir_mutex);
 }
 
 void module_add_driver(struct module *mod, struct device_driver *drv)
index 83d6e7b..8c3434b 100644 (file)
@@ -211,7 +211,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
                }
 
                /* Mark opp-table as multiple CPUs are sharing it now */
-               opp_table->shared_opp = true;
+               opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
        }
 unlock:
        mutex_unlock(&opp_table_lock);
@@ -227,7 +227,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
  *
  * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
  *
- * Returns -ENODEV if OPP table isn't already present.
+ * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP
+ * table's status is access-unknown.
  *
  * Locking: The internal opp_table and opp structures are RCU protected.
  * Hence this function internally uses RCU updater strategy with mutex locks
@@ -249,9 +250,14 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
                goto unlock;
        }
 
+       if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) {
+               ret = -EINVAL;
+               goto unlock;
+       }
+
        cpumask_clear(cpumask);
 
-       if (opp_table->shared_opp) {
+       if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) {
                list_for_each_entry(opp_dev, &opp_table->dev_list, node)
                        cpumask_set_cpu(opp_dev->dev->id, cpumask);
        } else {
index 94d2010..1dfd3dd 100644 (file)
@@ -34,7 +34,10 @@ static struct opp_table *_managed_opp(const struct device_node *np)
                         * But the OPPs will be considered as shared only if the
                         * OPP table contains a "opp-shared" property.
                         */
-                       return opp_table->shared_opp ? opp_table : NULL;
+                       if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED)
+                               return opp_table;
+
+                       return NULL;
                }
        }
 
@@ -353,7 +356,10 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
        }
 
        opp_table->np = opp_np;
-       opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
+       if (of_property_read_bool(opp_np, "opp-shared"))
+               opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED;
+       else
+               opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE;
 
        mutex_unlock(&opp_table_lock);
 
index 20f3be2..fabd5ca 100644 (file)
@@ -119,6 +119,12 @@ struct opp_device {
 #endif
 };
 
+enum opp_table_access {
+       OPP_TABLE_ACCESS_UNKNOWN = 0,
+       OPP_TABLE_ACCESS_EXCLUSIVE = 1,
+       OPP_TABLE_ACCESS_SHARED = 2,
+};
+
 /**
  * struct opp_table - Device opp structure
  * @node:      table node - contains the devices with OPPs that
@@ -166,7 +172,7 @@ struct opp_table {
        /* For backward compatibility with v1 bindings */
        unsigned int voltage_tolerance_v1;
 
-       bool shared_opp;
+       enum opp_table_access shared_opp;
        struct dev_pm_opp *suspend_opp;
 
        unsigned int *supported_hw;
index d597e43..ab19adb 100644 (file)
@@ -1750,7 +1750,7 @@ aoecmd_init(void)
        int ret;
 
        /* get_zeroed_page returns page with ref count 1 */
-       p = (void *) get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+       p = (void *) get_zeroed_page(GFP_KERNEL);
        if (!p)
                return -ENOMEM;
        empty_page = virt_to_page(p);
index 94fb407..44b1bd6 100644 (file)
@@ -3820,6 +3820,7 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
        while (!list_empty(&intf->waiting_rcv_msgs)) {
                smi_msg = list_entry(intf->waiting_rcv_msgs.next,
                                     struct ipmi_smi_msg, link);
+               list_del(&smi_msg->link);
                if (!run_to_completion)
                        spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
                                               flags);
@@ -3829,11 +3830,14 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
                if (rv > 0) {
                        /*
                         * To preserve message order, quit if we
-                        * can't handle a message.
+                        * can't handle a message.  Add the message
+                        * back at the head, this is safe because this
+                        * tasklet is the only thing that pulls the
+                        * messages.
                         */
+                       list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
                        break;
                } else {
-                       list_del(&smi_msg->link);
                        if (rv == 0)
                                /* Message handled */
                                ipmi_free_smi_msg(smi_msg);
index ee367e9..fe9dc17 100644 (file)
@@ -372,26 +372,9 @@ static bool intel_pstate_get_ppc_enable_status(void)
        return acpi_ppc;
 }
 
-/*
- * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
- * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
- * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
- * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
- * target ratio 0x17. The _PSS control value stores in a format which can be
- * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
- * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
- * This function converts the _PSS control value to intel pstate driver format
- * for comparison and assignment.
- */
-static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
-{
-       return cpu->acpi_perf_data.states[index].control >> 8;
-}
-
 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu;
-       int turbo_pss_ctl;
        int ret;
        int i;
 
@@ -441,11 +424,10 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
         * max frequency, which will cause a reduced performance as
         * this driver uses real max turbo frequency as the max
         * frequency. So correct this frequency in _PSS table to
-        * correct max turbo frequency based on the turbo ratio.
+        * correct max turbo frequency based on the turbo state.
         * Also need to convert to MHz as _PSS freq is in MHz.
         */
-       turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
-       if (turbo_pss_ctl > cpu->pstate.max_pstate)
+       if (!limits->turbo_disabled)
                cpu->acpi_perf_data.states[0].core_frequency =
                                        policy->cpuinfo.max_freq / 1000;
        cpu->valid_pss_table = true;
index 808a320..a7ecb9a 100644 (file)
@@ -487,7 +487,7 @@ static int __init pcc_cpufreq_probe(void)
        doorbell.space_id = reg_resource->space_id;
        doorbell.bit_width = reg_resource->bit_width;
        doorbell.bit_offset = reg_resource->bit_offset;
-       doorbell.access_width = 64;
+       doorbell.access_width = 4;
        doorbell.address = reg_resource->address;
 
        pr_debug("probe: doorbell: space_id is %d, bit_width is %d, "
index 1d6c803..e92418f 100644 (file)
@@ -268,8 +268,11 @@ int update_devfreq(struct devfreq *devfreq)
        devfreq_notify_transition(devfreq, &freqs, DEVFREQ_PRECHANGE);
 
        err = devfreq->profile->target(devfreq->dev.parent, &freq, flags);
-       if (err)
+       if (err) {
+               freqs.new = cur_freq;
+               devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
                return err;
+       }
 
        freqs.new = freq;
        devfreq_notify_transition(devfreq, &freqs, DEVFREQ_POSTCHANGE);
@@ -552,6 +555,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
        devfreq->profile = profile;
        strncpy(devfreq->governor_name, governor_name, DEVFREQ_NAME_LEN);
        devfreq->previous_freq = profile->initial_freq;
+       devfreq->last_status.current_frequency = profile->initial_freq;
        devfreq->data = data;
        devfreq->nb.notifier_call = devfreq_notifier_call;
 
@@ -561,23 +565,22 @@ struct devfreq *devfreq_add_device(struct device *dev,
                mutex_lock(&devfreq->lock);
        }
 
-       devfreq->trans_table =  devm_kzalloc(dev, sizeof(unsigned int) *
-                                               devfreq->profile->max_state *
-                                               devfreq->profile->max_state,
-                                               GFP_KERNEL);
-       devfreq->time_in_state = devm_kzalloc(dev, sizeof(unsigned long) *
-                                               devfreq->profile->max_state,
-                                               GFP_KERNEL);
-       devfreq->last_stat_updated = jiffies;
-
        dev_set_name(&devfreq->dev, "%s", dev_name(dev));
        err = device_register(&devfreq->dev);
        if (err) {
-               put_device(&devfreq->dev);
                mutex_unlock(&devfreq->lock);
                goto err_out;
        }
 
+       devfreq->trans_table =  devm_kzalloc(&devfreq->dev, sizeof(unsigned int) *
+                                               devfreq->profile->max_state *
+                                               devfreq->profile->max_state,
+                                               GFP_KERNEL);
+       devfreq->time_in_state = devm_kzalloc(&devfreq->dev, sizeof(unsigned long) *
+                                               devfreq->profile->max_state,
+                                               GFP_KERNEL);
+       devfreq->last_stat_updated = jiffies;
+
        srcu_init_notifier_head(&devfreq->transition_notifier_list);
 
        mutex_unlock(&devfreq->lock);
@@ -603,7 +606,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
 err_init:
        list_del(&devfreq->node);
        device_unregister(&devfreq->dev);
-       kfree(devfreq);
 err_out:
        return ERR_PTR(err);
 }
@@ -621,7 +623,6 @@ int devfreq_remove_device(struct devfreq *devfreq)
                return -EINVAL;
 
        device_unregister(&devfreq->dev);
-       put_device(&devfreq->dev);
 
        return 0;
 }
index 6b6a5f3..a584140 100644 (file)
@@ -220,9 +220,6 @@ static int exynos_nocp_parse_dt(struct platform_device *pdev,
 
        /* Maps the memory mapped IO to control nocp register */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (IS_ERR(res))
-               return PTR_ERR(res);
-
        base = devm_ioremap_resource(dev, res);
        if (IS_ERR(base))
                return PTR_ERR(base);
index 8e304b1..75bd662 100644 (file)
@@ -242,7 +242,7 @@ struct at_xdmac_lld {
        u32             mbr_dus;        /* Destination Microblock Stride Register */
 };
 
-
+/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
 struct at_xdmac_desc {
        struct at_xdmac_lld             lld;
        enum dma_transfer_direction     direction;
@@ -253,7 +253,7 @@ struct at_xdmac_desc {
        unsigned int                    xfer_size;
        struct list_head                descs_list;
        struct list_head                xfer_node;
-};
+} __aligned(sizeof(u64));
 
 static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
 {
@@ -1400,6 +1400,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        u32                     cur_nda, check_nda, cur_ubc, mask, value;
        u8                      dwidth = 0;
        unsigned long           flags;
+       bool                    initd;
 
        ret = dma_cookie_status(chan, cookie, txstate);
        if (ret == DMA_COMPLETE)
@@ -1424,7 +1425,16 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        residue = desc->xfer_size;
        /*
         * Flush FIFO: only relevant when the transfer is source peripheral
-        * synchronized.
+        * synchronized. Flush is needed before reading CUBC because data in
+        * the FIFO are not reported by CUBC. Reporting a residue of the
+        * transfer length while we have data in FIFO can cause issue.
+        * Usecase: atmel USART has a timeout which means I have received
+        * characters but there is no more character received for a while. On
+        * timeout, it requests the residue. If the data are in the DMA FIFO,
+        * we will return a residue of the transfer length. It means no data
+        * received. If an application is waiting for these data, it will hang
+        * since we won't have another USART timeout without receiving new
+        * data.
         */
        mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
        value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
@@ -1435,34 +1445,43 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        }
 
        /*
-        * When processing the residue, we need to read two registers but we
-        * can't do it in an atomic way. AT_XDMAC_CNDA is used to find where
-        * we stand in the descriptor list and AT_XDMAC_CUBC is used
-        * to know how many data are remaining for the current descriptor.
-        * Since the dma channel is not paused to not loose data, between the
-        * AT_XDMAC_CNDA and AT_XDMAC_CUBC read, we may have change of
-        * descriptor.
-        * For that reason, after reading AT_XDMAC_CUBC, we check if we are
-        * still using the same descriptor by reading a second time
-        * AT_XDMAC_CNDA. If AT_XDMAC_CNDA has changed, it means we have to
-        * read again AT_XDMAC_CUBC.
+        * The easiest way to compute the residue should be to pause the DMA
+        * but doing this can lead to miss some data as some devices don't
+        * have FIFO.
+        * We need to read several registers because:
+        * - DMA is running therefore a descriptor change is possible while
+        * reading these registers
+        * - When the block transfer is done, the value of the CUBC register
+        * is set to its initial value until the fetch of the next descriptor.
+        * This value will corrupt the residue calculation so we have to skip
+        * it.
+        *
+        * INITD --------                    ------------
+        *              |____________________|
+        *       _______________________  _______________
+        * NDA       @desc2             \/   @desc3
+        *       _______________________/\_______________
+        *       __________  ___________  _______________
+        * CUBC       0    \/ MAX desc1 \/  MAX desc2
+        *       __________/\___________/\_______________
+        *
+        * Since descriptors are aligned on 64 bits, we can assume that
+        * the update of NDA and CUBC is atomic.
         * Memory barriers are used to ensure the read order of the registers.
-        * A max number of retries is set because unlikely it can never ends if
-        * we are transferring a lot of data with small buffers.
+        * A max number of retries is set because unlikely it could never ends.
         */
-       cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
-       rmb();
-       cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
        for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
-               rmb();
                check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
-
-               if (likely(cur_nda == check_nda))
-                       break;
-
-               cur_nda = check_nda;
+               rmb();
+               initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
                rmb();
                cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
+               rmb();
+               cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
+               rmb();
+
+               if ((check_nda == cur_nda) && initd)
+                       break;
        }
 
        if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
@@ -1470,6 +1489,19 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
                goto spin_unlock;
        }
 
+       /*
+        * Flush FIFO: only relevant when the transfer is source peripheral
+        * synchronized. Another flush is needed here because CUBC is updated
+        * when the controller sends the data write command. It can lead to
+        * report data that are not written in the memory or the device. The
+        * FIFO flush ensures that data are really written.
+        */
+       if ((desc->lld.mbr_cfg & mask) == value) {
+               at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
+               while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
+                       cpu_relax();
+       }
+
        /*
         * Remove size of all microblocks already transferred and the current
         * one. Then add the remaining size to transfer of the current
index 25d1dad..d0446a7 100644 (file)
@@ -703,8 +703,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
                goto free_resources;
        }
 
-       src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
-                                PAGE_SIZE, DMA_TO_DEVICE);
+       src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src),
+                              (size_t)src & ~PAGE_MASK, PAGE_SIZE,
+                              DMA_TO_DEVICE);
        unmap->addr[0] = src_dma;
 
        ret = dma_mapping_error(dma_chan->device->dev, src_dma);
@@ -714,8 +715,9 @@ static int mv_chan_memcpy_self_test(struct mv_xor_chan *mv_chan)
        }
        unmap->to_cnt = 1;
 
-       dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
-                                 PAGE_SIZE, DMA_FROM_DEVICE);
+       dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest),
+                               (size_t)dest & ~PAGE_MASK, PAGE_SIZE,
+                               DMA_FROM_DEVICE);
        unmap->addr[1] = dest_dma;
 
        ret = dma_mapping_error(dma_chan->device->dev, dest_dma);
index 8b3226d..caff46c 100644 (file)
@@ -360,6 +360,8 @@ static int palmas_usb_probe(struct platform_device *pdev)
 
        palmas_enable_irq(palmas_usb);
        /* perform initial detection */
+       if (palmas_usb->enable_gpio_vbus_detection)
+               palmas_vbus_irq_handler(palmas_usb->gpio_vbus_irq, palmas_usb);
        palmas_gpio_id_detect(&palmas_usb->wq_detectid.work);
        device_set_wakeup_capable(&pdev->dev, true);
        return 0;
index a116609..cebcb40 100644 (file)
@@ -531,7 +531,7 @@ menu "Port-mapped I/O GPIO drivers"
 
 config GPIO_104_DIO_48E
        tristate "ACCES 104-DIO-48E GPIO support"
-       depends on ISA
+       depends on ISA_BUS_API
        select GPIOLIB_IRQCHIP
        help
          Enables GPIO support for the ACCES 104-DIO-48E series (104-DIO-48E,
@@ -541,7 +541,7 @@ config GPIO_104_DIO_48E
 
 config GPIO_104_IDIO_16
        tristate "ACCES 104-IDIO-16 GPIO support"
-       depends on ISA
+       depends on ISA_BUS_API
        select GPIOLIB_IRQCHIP
        help
          Enables GPIO support for the ACCES 104-IDIO-16 family (104-IDIO-16,
@@ -552,7 +552,7 @@ config GPIO_104_IDIO_16
 
 config GPIO_104_IDI_48
        tristate "ACCES 104-IDI-48 GPIO support"
-       depends on ISA
+       depends on ISA_BUS_API
        select GPIOLIB_IRQCHIP
        help
          Enables GPIO support for the ACCES 104-IDI-48 family (104-IDI-48A,
@@ -628,7 +628,7 @@ config GPIO_TS5500
 
 config GPIO_WS16C48
        tristate "WinSystems WS16C48 GPIO support"
-       depends on ISA
+       depends on ISA_BUS_API
        select GPIOLIB_IRQCHIP
        help
          Enables GPIO support for the WinSystems WS16C48. The base port
index 6c75c83..2d2763e 100644 (file)
@@ -247,6 +247,7 @@ static int idi_48_probe(struct device *dev, unsigned int id)
        idi48gpio->irq = irq[id];
 
        spin_lock_init(&idi48gpio->lock);
+       spin_lock_init(&idi48gpio->ack_lock);
 
        dev_set_drvdata(dev, idi48gpio);
 
index 58d822d..570771e 100644 (file)
@@ -1373,8 +1373,12 @@ done:
 #define VALIDATE_DESC(desc) do { \
        if (!desc) \
                return 0; \
+       if (IS_ERR(desc)) {                                             \
+               pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
+               return PTR_ERR(desc); \
+       } \
        if (!desc->gdev) { \
-               pr_warn("%s: invalid GPIO\n", __func__); \
+               pr_warn("%s: invalid GPIO (no device)\n", __func__); \
                return -EINVAL; \
        } \
        if ( !desc->gdev->chip ) { \
@@ -1386,8 +1390,12 @@ done:
 #define VALIDATE_DESC_VOID(desc) do { \
        if (!desc) \
                return; \
+       if (IS_ERR(desc)) {                                             \
+               pr_warn("%s: invalid GPIO (errorpointer)\n", __func__); \
+               return; \
+       } \
        if (!desc->gdev) { \
-               pr_warn("%s: invalid GPIO\n", __func__); \
+               pr_warn("%s: invalid GPIO (no device)\n", __func__); \
                return; \
        } \
        if (!desc->gdev->chip) { \
@@ -2056,7 +2064,14 @@ int gpiod_to_irq(const struct gpio_desc *desc)
        struct gpio_chip *chip;
        int offset;
 
-       VALIDATE_DESC(desc);
+       /*
+        * Cannot VALIDATE_DESC() here as gpiod_to_irq() consumer semantics
+        * requires this function to not return zero on an invalid descriptor
+        * but rather a negative error number.
+        */
+       if (!desc || IS_ERR(desc) || !desc->gdev || !desc->gdev->chip)
+               return -EINVAL;
+
        chip = desc->gdev->chip;
        offset = gpio_chip_hwgpio(desc);
        if (chip->to_irq) {
index 01c36b8..e055d5b 100644 (file)
@@ -799,7 +799,6 @@ struct amdgpu_ring {
        unsigned                cond_exe_offs;
        u64                             cond_exe_gpu_addr;
        volatile u32    *cond_exe_cpu_addr;
-       int                     vmid;
 };
 
 /*
@@ -937,8 +936,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
                    unsigned vm_id, uint64_t pd_addr,
                    uint32_t gds_base, uint32_t gds_size,
                    uint32_t gws_base, uint32_t gws_size,
-                   uint32_t oa_base, uint32_t oa_size,
-                   bool vmid_switch);
+                   uint32_t oa_base, uint32_t oa_size);
 void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
 uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
 int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
@@ -1822,6 +1820,8 @@ struct amdgpu_asic_funcs {
        /* MM block clocks */
        int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
        int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
+       /* query virtual capabilities */
+       u32 (*get_virtual_caps)(struct amdgpu_device *adev);
 };
 
 /*
@@ -1916,8 +1916,12 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
 
 
 /* GPU virtualization */
+#define AMDGPU_VIRT_CAPS_SRIOV_EN       (1 << 0)
+#define AMDGPU_VIRT_CAPS_IS_VF          (1 << 1)
 struct amdgpu_virtualization {
        bool supports_sr_iov;
+       bool is_virtual;
+       u32 caps;
 };
 
 /*
@@ -2206,6 +2210,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
 #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
 #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
 #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
+#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
 #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
 #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
 #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
index 8943099..cf6f49f 100644 (file)
@@ -909,7 +909,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
        struct cgs_acpi_method_argument *argument = NULL;
        uint32_t i, count;
        acpi_status status;
-       int result;
+       int result = 0;
        uint32_t func_no = 0xFFFFFFFF;
 
        handle = ACPI_HANDLE(&adev->pdev->dev);
index 964f314..6e92008 100644 (file)
@@ -1385,6 +1385,15 @@ static int amdgpu_resume(struct amdgpu_device *adev)
        return 0;
 }
 
+static bool amdgpu_device_is_virtual(void)
+{
+#ifdef CONFIG_X86
+       return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+       return false;
+#endif
+}
+
 /**
  * amdgpu_device_init - initialize the driver
  *
@@ -1519,8 +1528,14 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        adev->virtualization.supports_sr_iov =
                amdgpu_atombios_has_gpu_virtualization_table(adev);
 
+       /* Check if we are executing in a virtualized environment */
+       adev->virtualization.is_virtual = amdgpu_device_is_virtual();
+       adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev);
+
        /* Post card if necessary */
-       if (!amdgpu_card_posted(adev)) {
+       if (!amdgpu_card_posted(adev) ||
+           (adev->virtualization.is_virtual &&
+            !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
                if (!adev->bios) {
                        dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
                        return -EINVAL;
index 7a0b1e5..34e3542 100644 (file)
@@ -122,7 +122,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        bool skip_preamble, need_ctx_switch;
        unsigned patch_offset = ~0;
        struct amdgpu_vm *vm;
-       int vmid = 0, old_vmid = ring->vmid;
        struct fence *hwf;
        uint64_t ctx;
 
@@ -136,11 +135,9 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        if (job) {
                vm = job->vm;
                ctx = job->ctx;
-               vmid = job->vm_id;
        } else {
                vm = NULL;
                ctx = 0;
-               vmid = 0;
        }
 
        if (!ring->ready) {
@@ -166,8 +163,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr,
                                    job->gds_base, job->gds_size,
                                    job->gws_base, job->gws_size,
-                                   job->oa_base, job->oa_size,
-                                   (ring->current_ctx == ctx) && (old_vmid != vmid));
+                                   job->oa_base, job->oa_size);
                if (r) {
                        amdgpu_ring_undo(ring);
                        return r;
@@ -184,6 +180,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
        need_ctx_switch = ring->current_ctx != ctx;
        for (i = 0; i < num_ibs; ++i) {
                ib = &ibs[i];
+
                /* drop preamble IBs if we don't have a context switch */
                if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && skip_preamble)
                        continue;
@@ -191,7 +188,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                amdgpu_ring_emit_ib(ring, ib, job ? job->vm_id : 0,
                                    need_ctx_switch);
                need_ctx_switch = false;
-               ring->vmid = vmid;
        }
 
        if (ring->funcs->emit_hdp_invalidate)
@@ -202,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
                dev_err(adev->dev, "failed to emit fence (%d)\n", r);
                if (job && job->vm_id)
                        amdgpu_vm_reset_id(adev, job->vm_id);
-               ring->vmid = old_vmid;
                amdgpu_ring_undo(ring);
                return r;
        }
index 40a2370..d851ea1 100644 (file)
@@ -447,7 +447,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                        dev_info.max_memory_clock = adev->pm.default_mclk * 10;
                }
                dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask;
-               dev_info.num_rb_pipes = adev->gfx.config.num_rbs;
+               dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se *
+                       adev->gfx.config.max_shader_engines;
                dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts;
                dev_info._pad = 0;
                dev_info.ids_flags = 0;
index 589b36e..0e13d80 100644 (file)
@@ -270,30 +270,28 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
        struct drm_device *ddev = dev_get_drvdata(dev);
        struct amdgpu_device *adev = ddev->dev_private;
        enum amd_pm_state_type state = 0;
-       long idx;
+       unsigned long idx;
        int ret;
 
        if (strlen(buf) == 1)
                adev->pp_force_state_enabled = false;
-       else {
-               ret = kstrtol(buf, 0, &idx);
+       else if (adev->pp_enabled) {
+               struct pp_states_info data;
 
-               if (ret) {
+               ret = kstrtoul(buf, 0, &idx);
+               if (ret || idx >= ARRAY_SIZE(data.states)) {
                        count = -EINVAL;
                        goto fail;
                }
 
-               if (adev->pp_enabled) {
-                       struct pp_states_info data;
-                       amdgpu_dpm_get_pp_num_states(adev, &data);
-                       state = data.states[idx];
-                       /* only set user selected power states */
-                       if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
-                               state != POWER_STATE_TYPE_DEFAULT) {
-                               amdgpu_dpm_dispatch_task(adev,
-                                               AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
-                               adev->pp_force_state_enabled = true;
-                       }
+               amdgpu_dpm_get_pp_num_states(adev, &data);
+               state = data.states[idx];
+               /* only set user selected power states */
+               if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
+                   state != POWER_STATE_TYPE_DEFAULT) {
+                       amdgpu_dpm_dispatch_task(adev,
+                                       AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
+                       adev->pp_force_state_enabled = true;
                }
        }
 fail:
index 62a4c12..9f36ed3 100644 (file)
@@ -298,8 +298,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
                    unsigned vm_id, uint64_t pd_addr,
                    uint32_t gds_base, uint32_t gds_size,
                    uint32_t gws_base, uint32_t gws_size,
-                   uint32_t oa_base, uint32_t oa_size,
-                   bool vmid_switch)
+                   uint32_t oa_base, uint32_t oa_size)
 {
        struct amdgpu_device *adev = ring->adev;
        struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id];
@@ -313,7 +312,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
        int r;
 
        if (ring->funcs->emit_pipeline_sync && (
-           pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || vmid_switch))
+           pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed ||
+                   ring->type == AMDGPU_RING_TYPE_COMPUTE))
                amdgpu_ring_emit_pipeline_sync(ring);
 
        if (ring->funcs->emit_vm_flush &&
index 07bc795..9104318 100644 (file)
@@ -962,6 +962,12 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev,
        return true;
 }
 
+static u32 cik_get_virtual_caps(struct amdgpu_device *adev)
+{
+       /* CIK does not support SR-IOV */
+       return 0;
+}
+
 static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {
        {mmGRBM_STATUS, false},
        {mmGB_ADDR_CONFIG, false},
@@ -2007,6 +2013,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
        .get_xclk = &cik_get_xclk,
        .set_uvd_clocks = &cik_set_uvd_clocks,
        .set_vce_clocks = &cik_set_vce_clocks,
+       .get_virtual_caps = &cik_get_virtual_caps,
        /* these should be moved to their own ip modules */
        .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
        .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
index 8c6ad1e..fc8ff4d 100644 (file)
@@ -4833,7 +4833,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev,
        case 2:
                for (i = 0; i < adev->gfx.num_compute_rings; i++) {
                        ring = &adev->gfx.compute_ring[i];
-                       if ((ring->me == me_id) & (ring->pipe == pipe_id))
+                       if ((ring->me == me_id) && (ring->pipe == pipe_id))
                                amdgpu_fence_process(ring);
                }
                break;
index 9f6f866..1a5cbaf 100644 (file)
@@ -297,7 +297,8 @@ static const u32 polaris11_golden_common_all[] =
 static const u32 golden_settings_polaris10_a11[] =
 {
        mmATC_MISC_CG, 0x000c0fc0, 0x000c0200,
-       mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208,
+       mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208,
+       mmCB_HW_CONTROL_2, 0, 0x0f000000,
        mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040,
        mmDB_DEBUG2, 0xf00fffff, 0x00000400,
        mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
index 2c88d0b..a65c960 100644 (file)
@@ -421,6 +421,20 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev,
        return true;
 }
 
+static u32 vi_get_virtual_caps(struct amdgpu_device *adev)
+{
+       u32 caps = 0;
+       u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
+
+       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE))
+               caps |= AMDGPU_VIRT_CAPS_SRIOV_EN;
+
+       if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER))
+               caps |= AMDGPU_VIRT_CAPS_IS_VF;
+
+       return caps;
+}
+
 static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = {
        {mmGB_MACROTILE_MODE7, true},
 };
@@ -1118,6 +1132,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
        .get_xclk = &vi_get_xclk,
        .set_uvd_clocks = &vi_set_uvd_clocks,
        .set_vce_clocks = &vi_set_vce_clocks,
+       .get_virtual_caps = &vi_get_virtual_caps,
        /* these should be moved to their own ip modules */
        .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
        .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
index ac00579..7708d90 100644 (file)
@@ -242,13 +242,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
        pqm_uninit(&p->pqm);
 
        /* Iterate over all process device data structure and check
-        * if we should reset all wavefronts */
-       list_for_each_entry(pdd, &p->per_device_data, per_device_list)
+        * if we should delete debug managers and reset all wavefronts
+        */
+       list_for_each_entry(pdd, &p->per_device_data, per_device_list) {
+               if ((pdd->dev->dbgmgr) &&
+                               (pdd->dev->dbgmgr->pasid == p->pasid))
+                       kfd_dbgmgr_destroy(pdd->dev->dbgmgr);
+
                if (pdd->reset_wavefronts) {
                        pr_warn("amdkfd: Resetting all wave fronts\n");
                        dbgdev_wave_reset_wavefronts(pdd->dev, p);
                        pdd->reset_wavefronts = false;
                }
+       }
 
        mutex_unlock(&p->mutex);
 
@@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
 
        idx = srcu_read_lock(&kfd_processes_srcu);
 
+       /*
+        * Look for the process that matches the pasid. If there is no such
+        * process, we either released it in amdkfd's own notifier, or there
+        * is a bug. Unfortunately, there is no way to tell...
+        */
        hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
-               if (p->pasid == pasid)
-                       break;
+               if (p->pasid == pasid) {
 
-       srcu_read_unlock(&kfd_processes_srcu, idx);
+                       srcu_read_unlock(&kfd_processes_srcu, idx);
 
-       BUG_ON(p->pasid != pasid);
+                       pr_debug("Unbinding process %d from IOMMU\n", pasid);
 
-       mutex_lock(&p->mutex);
+                       mutex_lock(&p->mutex);
 
-       if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
-               kfd_dbgmgr_destroy(dev->dbgmgr);
+                       if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
+                               kfd_dbgmgr_destroy(dev->dbgmgr);
 
-       pqm_uninit(&p->pqm);
+                       pqm_uninit(&p->pqm);
 
-       pdd = kfd_get_process_device_data(dev, p);
+                       pdd = kfd_get_process_device_data(dev, p);
 
-       if (!pdd) {
-               mutex_unlock(&p->mutex);
-               return;
-       }
+                       if (!pdd) {
+                               mutex_unlock(&p->mutex);
+                               return;
+                       }
 
-       if (pdd->reset_wavefronts) {
-               dbgdev_wave_reset_wavefronts(pdd->dev, p);
-               pdd->reset_wavefronts = false;
-       }
+                       if (pdd->reset_wavefronts) {
+                               dbgdev_wave_reset_wavefronts(pdd->dev, p);
+                               pdd->reset_wavefronts = false;
+                       }
 
-       /*
-        * Just mark pdd as unbound, because we still need it to call
-        * amd_iommu_unbind_pasid() in when the process exits.
-        * We don't call amd_iommu_unbind_pasid() here
-        * because the IOMMU called us.
-        */
-       pdd->bound = false;
+                       /*
+                        * Just mark pdd as unbound, because we still need it
+                        * to call amd_iommu_unbind_pasid() in when the
+                        * process exits.
+                        * We don't call amd_iommu_unbind_pasid() here
+                        * because the IOMMU called us.
+                        */
+                       pdd->bound = false;
 
-       mutex_unlock(&p->mutex);
+                       mutex_unlock(&p->mutex);
+
+                       return;
+               }
+
+       srcu_read_unlock(&kfd_processes_srcu, idx);
 }
 
 struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
index 74909e7..884c96f 100644 (file)
@@ -666,7 +666,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
                        dev->node_props.simd_count);
 
        if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
-               pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
+               pr_info_once("kfd: mem_banks_count truncated from %d to %d\n",
                                dev->node_props.mem_banks_count,
                                dev->mem_bank_count);
                sysfs_show_32bit_prop(buffer, "mem_banks_count",
index 32f3e34..3493da5 100644 (file)
@@ -5538,6 +5538,78 @@ typedef struct  _ATOM_ASIC_PROFILING_INFO_V3_5
   ULONG  ulReserved[12];
 }ATOM_ASIC_PROFILING_INFO_V3_5;
 
+/* for Polars10/11 AVFS parameters */
+typedef struct  _ATOM_ASIC_PROFILING_INFO_V3_6
+{
+  ATOM_COMMON_TABLE_HEADER         asHeader;
+  ULONG  ulMaxVddc;
+  ULONG  ulMinVddc;
+  USHORT usLkgEuseIndex;
+  UCHAR  ucLkgEfuseBitLSB;
+  UCHAR  ucLkgEfuseLength;
+  ULONG  ulLkgEncodeLn_MaxDivMin;
+  ULONG  ulLkgEncodeMax;
+  ULONG  ulLkgEncodeMin;
+  EFUSE_LINEAR_FUNC_PARAM sRoFuse;
+  ULONG  ulEvvDefaultVddc;
+  ULONG  ulEvvNoCalcVddc;
+  ULONG  ulSpeed_Model;
+  ULONG  ulSM_A0;
+  ULONG  ulSM_A1;
+  ULONG  ulSM_A2;
+  ULONG  ulSM_A3;
+  ULONG  ulSM_A4;
+  ULONG  ulSM_A5;
+  ULONG  ulSM_A6;
+  ULONG  ulSM_A7;
+  UCHAR  ucSM_A0_sign;
+  UCHAR  ucSM_A1_sign;
+  UCHAR  ucSM_A2_sign;
+  UCHAR  ucSM_A3_sign;
+  UCHAR  ucSM_A4_sign;
+  UCHAR  ucSM_A5_sign;
+  UCHAR  ucSM_A6_sign;
+  UCHAR  ucSM_A7_sign;
+  ULONG  ulMargin_RO_a;
+  ULONG  ulMargin_RO_b;
+  ULONG  ulMargin_RO_c;
+  ULONG  ulMargin_fixed;
+  ULONG  ulMargin_Fmax_mean;
+  ULONG  ulMargin_plat_mean;
+  ULONG  ulMargin_Fmax_sigma;
+  ULONG  ulMargin_plat_sigma;
+  ULONG  ulMargin_DC_sigma;
+  ULONG  ulLoadLineSlop;
+  ULONG  ulaTDClimitPerDPM[8];
+  ULONG  ulaNoCalcVddcPerDPM[8];
+  ULONG  ulAVFS_meanNsigma_Acontant0;
+  ULONG  ulAVFS_meanNsigma_Acontant1;
+  ULONG  ulAVFS_meanNsigma_Acontant2;
+  USHORT usAVFS_meanNsigma_DC_tol_sigma;
+  USHORT usAVFS_meanNsigma_Platform_mean;
+  USHORT usAVFS_meanNsigma_Platform_sigma;
+  ULONG  ulGB_VDROOP_TABLE_CKSOFF_a0;
+  ULONG  ulGB_VDROOP_TABLE_CKSOFF_a1;
+  ULONG  ulGB_VDROOP_TABLE_CKSOFF_a2;
+  ULONG  ulGB_VDROOP_TABLE_CKSON_a0;
+  ULONG  ulGB_VDROOP_TABLE_CKSON_a1;
+  ULONG  ulGB_VDROOP_TABLE_CKSON_a2;
+  ULONG  ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+  USHORT usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+  ULONG  ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+  ULONG  ulAVFSGB_FUSE_TABLE_CKSON_m1;
+  USHORT usAVFSGB_FUSE_TABLE_CKSON_m2;
+  ULONG  ulAVFSGB_FUSE_TABLE_CKSON_b;
+  USHORT usMaxVoltage_0_25mv;
+  UCHAR  ucEnableGB_VDROOP_TABLE_CKSOFF;
+  UCHAR  ucEnableGB_VDROOP_TABLE_CKSON;
+  UCHAR  ucEnableGB_FUSE_TABLE_CKSOFF;
+  UCHAR  ucEnableGB_FUSE_TABLE_CKSON;
+  USHORT usPSM_Age_ComFactor;
+  UCHAR  ucEnableApplyAVFS_CKS_OFF_Voltage;
+  UCHAR  ucReserved;
+}ATOM_ASIC_PROFILING_INFO_V3_6;
+
 
 typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{
   ULONG  ulMaxSclkFreq;
index 586f732..92912ab 100644 (file)
@@ -633,6 +633,8 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE;
        data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE;
 
+       data->force_pcie_gen = PP_PCIEGenInvalid;
+
        if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
                        VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
                data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2;
index fa208ad..efb77ed 100644 (file)
@@ -306,10 +306,14 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
 {
        PHM_FUNC_CHECK(hwmgr);
 
-       if (hwmgr->hwmgr_func->store_cc6_data == NULL)
+       if (display_config == NULL)
                return -EINVAL;
 
        hwmgr->display_config = *display_config;
+
+       if (hwmgr->hwmgr_func->store_cc6_data == NULL)
+               return -EINVAL;
+
        /* to do pass other display configuration in furture */
 
        if (hwmgr->hwmgr_func->store_cc6_data)
index 347fef1..2930a33 100644 (file)
@@ -39,6 +39,7 @@ struct phm_ppt_v1_clock_voltage_dependency_record {
        uint8_t phases;
        uint8_t cks_enable;
        uint8_t cks_voffset;
+       uint32_t sclk_offset;
 };
 
 typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record;
index aa6be03..64ee78f 100644 (file)
@@ -999,7 +999,7 @@ static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr,
                                vddci = phm_find_closest_vddci(&(data->vddci_voltage_table),
                                                (dep_table->entries[i].vddc -
                                                                (uint16_t)data->vddc_vddci_delta));
-                               *voltage |= (vddci * VOLTAGE_SCALE) <<  VDDCI_SHIFT;
+                               *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT;
                        }
 
                        if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control)
@@ -1296,7 +1296,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
        }
 
        mem_level->MclkFrequency = clock;
-       mem_level->StutterEnable = 0;
        mem_level->EnabledForThrottle = 1;
        mem_level->EnabledForActivity = 0;
        mem_level->UpHyst = 0;
@@ -1304,7 +1303,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
        mem_level->VoltageDownHyst = 0;
        mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target;
        mem_level->StutterEnable = false;
-
        mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
 
        data->display_timing.num_existing_displays = info.display_count;
@@ -1363,7 +1361,7 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
         * a higher state by default such that we are not effected by
         * up threshold or and MCLK DPM latency.
         */
-       levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target;
+       levels[0].ActivityLevel = 0x1f;
        CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel);
 
        data->smc_state_table.MemoryDpmLevelCount =
@@ -1761,12 +1759,9 @@ static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr)
 
 static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
 {
-       uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks,
-                       volt_with_cks, value;
-       uint16_t clock_freq_u16;
+       uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
-       uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2,
-                       volt_offset = 0;
+       uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
        struct phm_ppt_v1_information *table_info =
                        (struct phm_ppt_v1_information *)(hwmgr->pptable);
        struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1778,50 +1773,38 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
         * if the part is SS or FF. if RO >= 1660MHz, part is FF.
         */
        efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixSMU_EFUSE_0 + (146 * 4));
-       efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixSMU_EFUSE_0 + (148 * 4));
+                       ixSMU_EFUSE_0 + (67 * 4));
        efuse &= 0xFF000000;
        efuse = efuse >> 24;
-       efuse2 &= 0xF;
-
-       if (efuse2 == 1)
-               ro = (2300 - 1350) * efuse / 255 + 1350;
-       else
-               ro = (2500 - 1000) * efuse / 255 + 1000;
 
-       if (ro >= 1660)
-               type = 0;
-       else
-               type = 1;
+       if (hwmgr->chip_id == CHIP_POLARIS10) {
+               min = 1000;
+               max = 2300;
+       } else {
+               min = 1100;
+               max = 2100;
+       }
 
-       /* Populate Stretch amount */
-       data->smc_state_table.ClockStretcherAmount = stretch_amount;
+       ro = efuse * (max -min)/255 + min;
 
        /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */
        for (i = 0; i < sclk_table->count; i++) {
                data->smc_state_table.Sclk_CKS_masterEn0_7 |=
                                sclk_table->entries[i].cks_enable << i;
-               volt_without_cks = (uint32_t)((14041 *
-                       (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 /
-                       (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000)));
-               volt_with_cks = (uint32_t)((13946 *
-                       (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 /
-                       (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000)));
+
+               volt_without_cks =  (uint32_t)(((ro - 40) * 1000 - 2753594 - sclk_table->entries[i].clk/100 * 136418 /1000) / \
+                                       (sclk_table->entries[i].clk/100 * 1132925 /10000 - 242418)/100);
+
+               volt_with_cks = (uint32_t)((ro * 1000 -2396351 - sclk_table->entries[i].clk/100 * 329021/1000) / \
+                               (sclk_table->entries[i].clk/10000 * 649434 /1000  - 18005)/10);
+
                if (volt_without_cks >= volt_with_cks)
                        volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks +
                                        sclk_table->entries[i].cks_voffset) * 100 / 625) + 1);
+
                data->smc_state_table.Sclk_voltageOffset[i] = volt_offset;
        }
 
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       STRETCH_ENABLE, 0x0);
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       masterReset, 0x1);
-       /* PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, staticEnable, 0x1); */
-       PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE,
-                       masterReset, 0x0);
-
        /* Populate CKS Lookup Table */
        if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
                stretch_amount2 = 0;
@@ -1835,69 +1818,6 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
                                return -EINVAL);
        }
 
-       value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixPWR_CKS_CNTL);
-       value &= 0xFFC2FF87;
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq =
-                       polaris10_clock_stretcher_lookup_table[stretch_amount2][0];
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq =
-                       polaris10_clock_stretcher_lookup_table[stretch_amount2][1];
-       clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table.
-                       GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].SclkSetting.SclkFrequency) / 100);
-       if (polaris10_clock_stretcher_lookup_table[stretch_amount2][0] < clock_freq_u16
-       && polaris10_clock_stretcher_lookup_table[stretch_amount2][1] > clock_freq_u16) {
-               /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */
-               value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 16;
-               /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */
-               value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][2]) << 18;
-               /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */
-               value |= (polaris10_clock_stretch_amount_conversion
-                               [polaris10_clock_stretcher_lookup_table[stretch_amount2][3]]
-                                [stretch_amount]) << 3;
-       }
-       CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq);
-       CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq);
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting =
-                       polaris10_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F;
-       data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |=
-                       (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 7;
-
-       cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
-                       ixPWR_CKS_CNTL, value);
-
-       /* Populate DDT Lookup Table */
-       for (i = 0; i < 4; i++) {
-               /* Assign the minimum and maximum VID stored
-                * in the last row of Clock Stretcher Voltage Table.
-                */
-               data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].minVID =
-                               (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][2];
-               data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].maxVID =
-                               (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][3];
-               /* Loop through each SCLK and check the frequency
-                * to see if it lies within the frequency for clock stretcher.
-                */
-               for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) {
-                       cks_setting = 0;
-                       clock_freq = PP_SMC_TO_HOST_UL(
-                                       data->smc_state_table.GraphicsLevel[j].SclkSetting.SclkFrequency);
-                       /* Check the allowed frequency against the sclk level[j].
-                        *  Sclk's endianness has already been converted,
-                        *  and it's in 10Khz unit,
-                        *  as opposed to Data table, which is in Mhz unit.
-                        */
-                       if (clock_freq >= (polaris10_clock_stretcher_ddt_table[type][i][0]) * 100) {
-                               cks_setting |= 0x2;
-                               if (clock_freq < (polaris10_clock_stretcher_ddt_table[type][i][1]) * 100)
-                                       cks_setting |= 0x1;
-                       }
-                       data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting
-                                                       |= cks_setting << (j * 2);
-               }
-               CONVERT_FROM_HOST_TO_SMC_US(
-                       data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting);
-       }
-
        value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL);
        value &= 0xFFFFFFFE;
        cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value);
@@ -1956,6 +1876,90 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
        return 0;
 }
 
+
+int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
+{
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       SMU74_Discrete_DpmTable  *table = &(data->smc_state_table);
+       int result = 0;
+       struct pp_atom_ctrl__avfs_parameters avfs_params = {0};
+       AVFS_meanNsigma_t AVFS_meanNsigma = { {0} };
+       AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} };
+       uint32_t tmp, i;
+       struct pp_smumgr *smumgr = hwmgr->smumgr;
+       struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+
+       struct phm_ppt_v1_information *table_info =
+                       (struct phm_ppt_v1_information *)hwmgr->pptable;
+       struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
+                       table_info->vdd_dep_on_sclk;
+
+
+       if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
+               return result;
+
+       result = atomctrl_get_avfs_information(hwmgr, &avfs_params);
+
+       if (0 == result) {
+               table->BTCGB_VDROOP_TABLE[0].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0);
+               table->BTCGB_VDROOP_TABLE[0].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1);
+               table->BTCGB_VDROOP_TABLE[0].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2);
+               table->BTCGB_VDROOP_TABLE[1].a0  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0);
+               table->BTCGB_VDROOP_TABLE[1].a1  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1);
+               table->BTCGB_VDROOP_TABLE[1].a2  = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2);
+               table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1);
+               table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2);
+               table->AVFSGB_VDROOP_TABLE[0].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b);
+               table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24;
+               table->AVFSGB_VDROOP_TABLE[0].m2_shift  = 12;
+               table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
+               table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2);
+               table->AVFSGB_VDROOP_TABLE[1].b  = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b);
+               table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24;
+               table->AVFSGB_VDROOP_TABLE[1].m2_shift  = 12;
+               table->MaxVoltage                = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv);
+               AVFS_meanNsigma.Aconstant[0]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0);
+               AVFS_meanNsigma.Aconstant[1]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1);
+               AVFS_meanNsigma.Aconstant[2]      = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2);
+               AVFS_meanNsigma.DC_tol_sigma      = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma);
+               AVFS_meanNsigma.Platform_mean     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean);
+               AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor);
+               AVFS_meanNsigma.Platform_sigma     = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma);
+
+               for (i = 0; i < NUM_VFT_COLUMNS; i++) {
+                       AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625);
+                       AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100);
+               }
+
+               result = polaris10_read_smc_sram_dword(smumgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma),
+                               &tmp, data->sram_end);
+
+               polaris10_copy_bytes_to_smc(smumgr,
+                                       tmp,
+                                       (uint8_t *)&AVFS_meanNsigma,
+                                       sizeof(AVFS_meanNsigma_t),
+                                       data->sram_end);
+
+               result = polaris10_read_smc_sram_dword(smumgr,
+                               SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable),
+                               &tmp, data->sram_end);
+               polaris10_copy_bytes_to_smc(smumgr,
+                                       tmp,
+                                       (uint8_t *)&AVFS_SclkOffset,
+                                       sizeof(AVFS_Sclk_Offset_t),
+                                       data->sram_end);
+
+               data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) |
+                                               (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT);
+               data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false;
+       }
+       return result;
+}
+
+
 /**
 * Initializes the SMC table and uploads it
 *
@@ -2056,6 +2060,10 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr)
                                "Failed to populate Clock Stretcher Data Table!",
                                return result);
        }
+
+       result = polaris10_populate_avfs_parameters(hwmgr);
+       PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;);
+
        table->CurrSclkPllRange = 0xff;
        table->GraphicsVoltageChangeEnable  = 1;
        table->GraphicsThermThrottleEnable  = 1;
@@ -2252,6 +2260,9 @@ static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
 static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
+       uint32_t soft_register_value = 0;
+       uint32_t handshake_disables_offset = data->soft_regs_start
+                               + offsetof(SMU74_SoftRegisters, HandshakeDisables);
 
        /* enable SCLK dpm */
        if (!data->sclk_dpm_key_disabled)
@@ -2262,6 +2273,12 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
 
        /* enable MCLK dpm */
        if (0 == data->mclk_dpm_key_disabled) {
+/* Disable UVD - SMU handshake for MCLK. */
+               soft_register_value = cgs_read_ind_register(hwmgr->device,
+                                       CGS_IND_REG__SMC, handshake_disables_offset);
+               soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE;
+               cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
+                               handshake_disables_offset, soft_register_value);
 
                PP_ASSERT_WITH_CODE(
                                (0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -2269,7 +2286,6 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
                                "Failed to enable MCLK DPM during DPM Start Function!",
                                return -1);
 
-
                PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1);
 
                cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5);
@@ -2606,6 +2622,7 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
 
        phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                                PHM_PlatformCaps_FanSpeedInTableIsRPM);
+
        if (hwmgr->chip_id == CHIP_POLARIS11)
                phm_cap_set(hwmgr->platform_descriptor.platformCaps,
                                        PHM_PlatformCaps_SPLLShutdownSupport);
@@ -2938,6 +2955,11 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE;
        data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE;
 
+       data->enable_tdc_limit_feature = true;
+       data->enable_pkg_pwr_tracking_feature = true;
+       data->force_pcie_gen = PP_PCIEGenInvalid;
+       data->mclk_stutter_mode_threshold = 40000;
+
        if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
                        VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
                data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
@@ -2962,6 +2984,10 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
                        data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2;
        }
 
+       if (table_info->cac_dtp_table->usClockStretchAmount != 0)
+               phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+                                       PHM_PlatformCaps_ClockStretcher);
+
        polaris10_set_features_platform_caps(hwmgr);
 
        polaris10_init_dpm_defaults(hwmgr);
@@ -3520,10 +3546,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
        ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state;
        ATOM_Tonga_POWERPLAYTABLE *powerplay_table =
                        (ATOM_Tonga_POWERPLAYTABLE *)pp_table;
-       ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
-                       (ATOM_Tonga_SCLK_Dependency_Table *)
+       PPTable_Generic_SubTable_Header *sclk_dep_table =
+                       (PPTable_Generic_SubTable_Header *)
                        (((unsigned long)powerplay_table) +
                                le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
+
        ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
                        (ATOM_Tonga_MCLK_Dependency_Table *)
                        (((unsigned long)powerplay_table) +
@@ -3575,7 +3602,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
        /* Performance levels are arranged from low to high. */
        performance_level->memory_clock = mclk_dep_table->entries
                        [state_entry->ucMemoryClockIndexLow].ulMclk;
-       performance_level->engine_clock = sclk_dep_table->entries
+       if (sclk_dep_table->ucRevId == 0)
+               performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
+                       [state_entry->ucEngineClockIndexLow].ulSclk;
+       else if (sclk_dep_table->ucRevId == 1)
+               performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
                        [state_entry->ucEngineClockIndexLow].ulSclk;
        performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
                        state_entry->ucPCIEGenLow);
@@ -3586,8 +3617,14 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
                        [polaris10_power_state->performance_level_count++]);
        performance_level->memory_clock = mclk_dep_table->entries
                        [state_entry->ucMemoryClockIndexHigh].ulMclk;
-       performance_level->engine_clock = sclk_dep_table->entries
+
+       if (sclk_dep_table->ucRevId == 0)
+               performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries
                        [state_entry->ucEngineClockIndexHigh].ulSclk;
+       else if (sclk_dep_table->ucRevId == 1)
+               performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries
+                       [state_entry->ucEngineClockIndexHigh].ulSclk;
+
        performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap,
                        state_entry->ucPCIEGenHigh);
        performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap,
@@ -3645,7 +3682,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
                switch (state->classification.ui_label) {
                case PP_StateUILabel_Performance:
                        data->use_pcie_performance_levels = true;
-
                        for (i = 0; i < ps->performance_level_count; i++) {
                                if (data->pcie_gen_performance.max <
                                                ps->performance_levels[i].pcie_gen)
@@ -3661,7 +3697,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
                                                ps->performance_levels[i].pcie_lane)
                                        data->pcie_lane_performance.max =
                                                        ps->performance_levels[i].pcie_lane;
-
                                if (data->pcie_lane_performance.min >
                                                ps->performance_levels[i].pcie_lane)
                                        data->pcie_lane_performance.min =
@@ -4187,12 +4222,9 @@ int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate)
 {
        struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
        uint32_t mm_boot_level_offset, mm_boot_level_value;
-       struct phm_ppt_v1_information *table_info =
-                       (struct phm_ppt_v1_information *)(hwmgr->pptable);
 
        if (!bgate) {
-               data->smc_state_table.SamuBootLevel =
-                               (uint8_t) (table_info->mm_dep_table->count - 1);
+               data->smc_state_table.SamuBootLevel = 0;
                mm_boot_level_offset = data->dpm_table_start +
                                offsetof(SMU74_Discrete_DpmTable, SamuBootLevel);
                mm_boot_level_offset /= 4;
index beedf35..d717789 100644 (file)
@@ -312,6 +312,9 @@ struct polaris10_hwmgr {
 
        /* soft pptable for re-uploading into smu */
        void *soft_pp_table;
+
+       uint32_t                              avfs_vdroop_override_setting;
+       bool                                  apply_avfs_cks_off_voltage;
 };
 
 /* To convert to Q8.8 format for firmware */
index aba167f..b206632 100644 (file)
@@ -625,10 +625,14 @@ static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr,
        int ret;
        struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr);
        struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend);
+       struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
 
-       if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS)
+       if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED)
                return 0;
 
+       ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+                       PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting);
+
        ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ?
                        0 : -1;
 
index 58742e0..a3c38bb 100644 (file)
@@ -44,6 +44,20 @@ bool acpi_atcs_functions_supported(void *device, uint32_t index)
        return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false;
 }
 
+bool acpi_atcs_notify_pcie_device_ready(void *device)
+{
+       int32_t temp_buffer = 1;
+
+       return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS,
+                               ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION,
+                                               &temp_buffer,
+                                               NULL,
+                                               0,
+                                               sizeof(temp_buffer),
+                                               0);
+}
+
+
 int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
 {
        struct atcs_pref_req_input atcs_input;
@@ -52,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
        int result;
        struct cgs_system_info info = {0};
 
-       if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST))
+       if( 0 != acpi_atcs_notify_pcie_device_ready(device))
                return -EINVAL;
 
        info.size = sizeof(struct cgs_system_info);
@@ -77,7 +91,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
                                                ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST,
                                                &atcs_input,
                                                &atcs_output,
-                                               0,
+                                               1,
                                                sizeof(atcs_input),
                                                sizeof(atcs_output));
                if (result != 0)
index da9f5f1..bf4e18f 100644 (file)
@@ -1302,3 +1302,46 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
 
        return 0;
 }
+
+int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param)
+{
+       ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
+
+       if (param == NULL)
+               return -EINVAL;
+
+       profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *)
+                       cgs_atom_get_data_table(hwmgr->device,
+                                       GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo),
+                                       NULL, NULL, NULL);
+       if (!profile)
+               return -1;
+
+       param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0;
+       param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1;
+       param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2;
+       param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma;
+       param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean;
+       param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma;
+       param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0;
+       param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1;
+       param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2;
+       param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0;
+       param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1;
+       param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2;
+       param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+       param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+       param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+       param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1;
+       param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2;
+       param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b;
+       param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv;
+       param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
+       param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
+       param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
+       param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
+       param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor;
+       param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
+
+       return 0;
+}
index d24ebb5..248c5db 100644 (file)
@@ -250,6 +250,35 @@ struct pp_atomctrl_gpio_pin_assignment {
 };
 typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment;
 
+struct pp_atom_ctrl__avfs_parameters {
+       uint32_t  ulAVFS_meanNsigma_Acontant0;
+       uint32_t  ulAVFS_meanNsigma_Acontant1;
+       uint32_t  ulAVFS_meanNsigma_Acontant2;
+       uint16_t usAVFS_meanNsigma_DC_tol_sigma;
+       uint16_t usAVFS_meanNsigma_Platform_mean;
+       uint16_t usAVFS_meanNsigma_Platform_sigma;
+       uint32_t  ulGB_VDROOP_TABLE_CKSOFF_a0;
+       uint32_t  ulGB_VDROOP_TABLE_CKSOFF_a1;
+       uint32_t  ulGB_VDROOP_TABLE_CKSOFF_a2;
+       uint32_t  ulGB_VDROOP_TABLE_CKSON_a0;
+       uint32_t  ulGB_VDROOP_TABLE_CKSON_a1;
+       uint32_t  ulGB_VDROOP_TABLE_CKSON_a2;
+       uint32_t  ulAVFSGB_FUSE_TABLE_CKSOFF_m1;
+       uint16_t  usAVFSGB_FUSE_TABLE_CKSOFF_m2;
+       uint32_t  ulAVFSGB_FUSE_TABLE_CKSOFF_b;
+       uint32_t  ulAVFSGB_FUSE_TABLE_CKSON_m1;
+       uint16_t  usAVFSGB_FUSE_TABLE_CKSON_m2;
+       uint32_t  ulAVFSGB_FUSE_TABLE_CKSON_b;
+       uint16_t  usMaxVoltage_0_25mv;
+       uint8_t  ucEnableGB_VDROOP_TABLE_CKSOFF;
+       uint8_t  ucEnableGB_VDROOP_TABLE_CKSON;
+       uint8_t  ucEnableGB_FUSE_TABLE_CKSOFF;
+       uint8_t  ucEnableGB_FUSE_TABLE_CKSON;
+       uint16_t usPSM_Age_ComFactor;
+       uint8_t  ucEnableApplyAVFS_CKS_OFF_Voltage;
+       uint8_t  ucReserved;
+};
+
 extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment);
 extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
 extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr);
@@ -278,5 +307,8 @@ extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clo
 extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type,
                                uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
 extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table);
+
+extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param);
+
 #endif
 
index d27e8c4..233eb7f 100644 (file)
@@ -4489,6 +4489,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
        data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE;
        data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE;
        data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE;
+       data->force_pcie_gen = PP_PCIEGenInvalid;
 
        if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr,
                                VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
index 1b44f4e..f127198 100644 (file)
@@ -197,6 +197,22 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Table {
        ATOM_Tonga_SCLK_Dependency_Record entries[1];                            /* Dynamically allocate entries. */
 } ATOM_Tonga_SCLK_Dependency_Table;
 
+typedef struct _ATOM_Polaris_SCLK_Dependency_Record {
+       UCHAR  ucVddInd;                                                                                        /* Base voltage */
+       USHORT usVddcOffset;                                                                            /* Offset relative to base voltage */
+       ULONG ulSclk;
+       USHORT usEdcCurrent;
+       UCHAR  ucReliabilityTemperature;
+       UCHAR  ucCKSVOffsetandDisable;                  /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */
+       ULONG  ulSclkOffset;
+} ATOM_Polaris_SCLK_Dependency_Record;
+
+typedef struct _ATOM_Polaris_SCLK_Dependency_Table {
+       UCHAR ucRevId;
+       UCHAR ucNumEntries;                                                     /* Number of entries. */
+       ATOM_Polaris_SCLK_Dependency_Record entries[1];                          /* Dynamically allocate entries. */
+} ATOM_Polaris_SCLK_Dependency_Table;
+
 typedef struct _ATOM_Tonga_PCIE_Record {
        UCHAR ucPCIEGenSpeed;
        UCHAR usPCIELaneWidth;
index 296ec7e..671fdb4 100644 (file)
@@ -408,41 +408,78 @@ static int get_mclk_voltage_dependency_table(
 static int get_sclk_voltage_dependency_table(
                struct pp_hwmgr *hwmgr,
                phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table,
-               const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table
+               const PPTable_Generic_SubTable_Header *sclk_dep_table
                )
 {
        uint32_t table_size, i;
        phm_ppt_v1_clock_voltage_dependency_table *sclk_table;
 
-       PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries),
-               "Invalid PowerPlay Table!", return -1);
+       if (sclk_dep_table->ucRevId < 1) {
+               const ATOM_Tonga_SCLK_Dependency_Table *tonga_table =
+                           (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table;
 
-       table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
-               * sclk_dep_table->ucNumEntries;
+               PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries),
+                       "Invalid PowerPlay Table!", return -1);
 
-       sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
-               kzalloc(table_size, GFP_KERNEL);
+               table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+                       * tonga_table->ucNumEntries;
 
-       if (NULL == sclk_table)
-               return -ENOMEM;
+               sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
+                       kzalloc(table_size, GFP_KERNEL);
 
-       memset(sclk_table, 0x00, table_size);
-
-       sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries;
-
-       for (i = 0; i < sclk_dep_table->ucNumEntries; i++) {
-               sclk_table->entries[i].vddInd =
-                       sclk_dep_table->entries[i].ucVddInd;
-               sclk_table->entries[i].vdd_offset =
-                       sclk_dep_table->entries[i].usVddcOffset;
-               sclk_table->entries[i].clk =
-                       sclk_dep_table->entries[i].ulSclk;
-               sclk_table->entries[i].cks_enable =
-                       (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
-               sclk_table->entries[i].cks_voffset =
-                       (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
-       }
+               if (NULL == sclk_table)
+                       return -ENOMEM;
+
+               memset(sclk_table, 0x00, table_size);
+
+               sclk_table->count = (uint32_t)tonga_table->ucNumEntries;
+
+               for (i = 0; i < tonga_table->ucNumEntries; i++) {
+                       sclk_table->entries[i].vddInd =
+                               tonga_table->entries[i].ucVddInd;
+                       sclk_table->entries[i].vdd_offset =
+                               tonga_table->entries[i].usVddcOffset;
+                       sclk_table->entries[i].clk =
+                               tonga_table->entries[i].ulSclk;
+                       sclk_table->entries[i].cks_enable =
+                               (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+                       sclk_table->entries[i].cks_voffset =
+                               (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+               }
+       } else {
+               const ATOM_Polaris_SCLK_Dependency_Table *polaris_table =
+                           (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table;
 
+               PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries),
+                       "Invalid PowerPlay Table!", return -1);
+
+               table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
+                       * polaris_table->ucNumEntries;
+
+               sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *)
+                       kzalloc(table_size, GFP_KERNEL);
+
+               if (NULL == sclk_table)
+                       return -ENOMEM;
+
+               memset(sclk_table, 0x00, table_size);
+
+               sclk_table->count = (uint32_t)polaris_table->ucNumEntries;
+
+               for (i = 0; i < polaris_table->ucNumEntries; i++) {
+                       sclk_table->entries[i].vddInd =
+                               polaris_table->entries[i].ucVddInd;
+                       sclk_table->entries[i].vdd_offset =
+                               polaris_table->entries[i].usVddcOffset;
+                       sclk_table->entries[i].clk =
+                               polaris_table->entries[i].ulSclk;
+                       sclk_table->entries[i].cks_enable =
+                               (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0;
+                       sclk_table->entries[i].cks_voffset =
+                               (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F);
+                       sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset;
+               }
+       }
        *pp_tonga_sclk_dep_table = sclk_table;
 
        return 0;
@@ -708,8 +745,8 @@ static int init_clock_voltage_dependency(
        const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table =
                (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
                le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
-       const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table =
-               (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) +
+       const PPTable_Generic_SubTable_Header *sclk_dep_table =
+               (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) +
                le16_to_cpu(powerplay_table->usSclkDependencyTableOffset));
        const ATOM_Tonga_Hard_Limit_Table *pHardLimits =
                (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) +
index 0c6a413..d41d37a 100644 (file)
@@ -27,6 +27,7 @@
 
 #pragma pack(push, 1)
 
+#define PPSMC_MSG_SetGBDroopSettings          ((uint16_t) 0x305)
 
 #define PPSMC_SWSTATE_FLAG_DC                           0x01
 #define PPSMC_SWSTATE_FLAG_UVD                          0x02
index 3bd5e69..3df5de2 100644 (file)
@@ -26,3 +26,4 @@ extern bool acpi_atcs_functions_supported(void *device,
 extern int acpi_pcie_perf_request(void *device,
                                                uint8_t perf_req,
                                                bool advertise);
+extern bool acpi_atcs_notify_pcie_device_ready(void *device);
index 1a12d85..fd10a9f 100644 (file)
 #define SMU__NUM_LCLK_DPM_LEVELS 8
 #define SMU__NUM_PCIE_DPM_LEVELS 8
 
+#define EXP_M1  35
+#define EXP_M2  92821
+#define EXP_B   66629747
+
+#define EXP_M1_1  365
+#define EXP_M2_1  658700
+#define EXP_B_1   305506134
+
+#define EXP_M1_2  189
+#define EXP_M2_2  379692
+#define EXP_B_2   194609469
+
+#define EXP_M1_3  99
+#define EXP_M2_3  217915
+#define EXP_B_3   122255994
+
+#define EXP_M1_4  51
+#define EXP_M2_4  122643
+#define EXP_B_4   74893384
+
+#define EXP_M1_5  423
+#define EXP_M2_5  1103326
+#define EXP_B_5   728122621
+
 enum SID_OPTION {
        SID_OPTION_HI,
        SID_OPTION_LO,
@@ -548,20 +572,20 @@ struct SMU74_Firmware_Header {
        uint32_t CacConfigTable;
        uint32_t CacStatusTable;
 
-
        uint32_t mcRegisterTable;
 
-
        uint32_t mcArbDramTimingTable;
 
-
-
-
        uint32_t PmFuseTable;
        uint32_t Globals;
        uint32_t ClockStretcherTable;
        uint32_t VftTable;
-       uint32_t Reserved[21];
+       uint32_t Reserved1;
+       uint32_t AvfsTable;
+       uint32_t AvfsCksOffGbvTable;
+       uint32_t AvfsMeanNSigma;
+       uint32_t AvfsSclkOffsetTable;
+       uint32_t Reserved[16];
        uint32_t Signature;
 };
 
@@ -701,8 +725,6 @@ VR Config info is contained in dpmTable.VRConfig */
 struct SMU_ClockStretcherDataTableEntry {
        uint8_t minVID;
        uint8_t maxVID;
-
-
        uint16_t setting;
 };
 typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry;
@@ -769,6 +791,43 @@ struct VFT_TABLE_t {
 typedef struct VFT_TABLE_t VFT_TABLE_t;
 
 
+/* Total margin, root mean square of Fmax + DC + Platform */
+struct AVFS_Margin_t {
+       VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_Margin_t AVFS_Margin_t;
+
+#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2
+#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2
+
+struct GB_VDROOP_TABLE_t {
+       int32_t a0;
+       int32_t a1;
+       int32_t a2;
+       uint32_t spare;
+};
+typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t;
+
+struct AVFS_CksOff_Gbv_t {
+       VFT_CELL_t Cell[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t;
+
+struct AVFS_meanNsigma_t {
+       uint32_t Aconstant[3];
+       uint16_t DC_tol_sigma;
+       uint16_t Platform_mean;
+       uint16_t Platform_sigma;
+       uint16_t PSM_Age_CompFactor;
+       uint8_t  Static_Voltage_Offset[NUM_VFT_COLUMNS];
+};
+typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t;
+
+struct AVFS_Sclk_Offset_t {
+       uint16_t Sclk_Offset[8];
+};
+typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t;
+
 #endif
 
 
index 0dfe823..b85ff54 100644 (file)
@@ -223,6 +223,16 @@ struct SMU74_Discrete_StateInfo {
 
 typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo;
 
+struct SMU_QuadraticCoeffs {
+       int32_t m1;
+       uint32_t b;
+
+       int16_t m2;
+       uint8_t m1_shift;
+       uint8_t m2_shift;
+};
+typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
+
 struct SMU74_Discrete_DpmTable {
 
        SMU74_PIDController                  GraphicsPIDController;
@@ -258,7 +268,14 @@ struct SMU74_Discrete_DpmTable {
        uint8_t                             ThermOutPolarity;
        uint8_t                             ThermOutMode;
        uint8_t                             BootPhases;
-       uint32_t                            Reserved[4];
+
+       uint8_t                             VRHotLevel;
+       uint8_t                             Reserved1[3];
+       uint16_t                            FanStartTemperature;
+       uint16_t                            FanStopTemperature;
+       uint16_t                            MaxVoltage;
+       uint16_t                            Reserved2;
+       uint32_t                            Reserved[1];
 
        SMU74_Discrete_GraphicsLevel        GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS];
        SMU74_Discrete_MemoryLevel          MemoryACPILevel;
@@ -347,6 +364,8 @@ struct SMU74_Discrete_DpmTable {
 
        uint32_t                            CurrSclkPllRange;
        sclkFcwRange_t                      SclkFcwRangeTable[NUM_SCLK_RANGE];
+       GB_VDROOP_TABLE_t                   BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES];
+       SMU_QuadraticCoeffs                 AVFSGB_VDROOP_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES];
 };
 
 typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable;
@@ -550,16 +569,6 @@ struct SMU7_AcpiScoreboard {
 
 typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard;
 
-struct SMU_QuadraticCoeffs {
-       int32_t m1;
-       uint32_t b;
-
-       int16_t m2;
-       uint8_t m1_shift;
-       uint8_t m2_shift;
-};
-typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs;
-
 struct SMU74_Discrete_PmFuses {
        uint8_t BapmVddCVidHiSidd[8];
        uint8_t BapmVddCVidLoSidd[8];
@@ -821,6 +830,17 @@ typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard;
 #define DB_PCC_SHIFT 26 
 #define DB_EDC_SHIFT 27
 
+#define BTCGB0_Vdroop_Enable_MASK  0x1
+#define BTCGB1_Vdroop_Enable_MASK  0x2
+#define AVFSGB0_Vdroop_Enable_MASK 0x4
+#define AVFSGB1_Vdroop_Enable_MASK 0x8
+
+#define BTCGB0_Vdroop_Enable_SHIFT  0
+#define BTCGB1_Vdroop_Enable_SHIFT  1
+#define AVFSGB0_Vdroop_Enable_SHIFT 2
+#define AVFSGB1_Vdroop_Enable_SHIFT 3
+
+
 #pragma pack(pop)
 
 
index 043b6ac..5dba7c5 100644 (file)
 static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = {
        /*  Min      pcie   DeepSleep Activity  CgSpll      CgSpll    CcPwr  CcPwr  Sclk         Enabled      Enabled                       Voltage    Power */
        /* Voltage, DpmLevel, DivId,  Level,  FuncCntl3,  FuncCntl4,  DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */
-       { 0x3c0fd047, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x30750000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0xa00fd047, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x409c0000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0x0410d047, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0, 0, 0x0e, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x50c30000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0x6810d047, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x60ea0000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0xcc10d047, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xe8fd0000, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0x3011d047, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x70110100, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0x9411d047, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xf8240100, 0, 0, 0, 0, 0, 0, 0 } },
-       { 0xf811d047, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x80380100, 0, 0, 0, 0, 0, 0, 0 } }
+       { 0x100ea446, 0x00, 0x03, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x30750000, 0x3000, 0, 0x2600, 0, 0, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
+       { 0x400ea446, 0x01, 0x04, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x409c0000, 0x2000, 0, 0x1e00, 1, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
+       { 0x740ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x50c30000, 0x2800, 0, 0x2000, 1, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } },
+       { 0xa40ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x60ea0000, 0x3000, 0, 0x2600, 1, 1, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } },
+       { 0xd80ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x70110100, 0x3800, 0, 0x2c00, 1, 1, 0x0004, 0x1203, 0xffff, 0x3600, 0xc9e2, 0x2e00 } },
+       { 0x3c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x80380100, 0x2000, 0, 0x1e00, 2, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } },
+       { 0x6c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x905f0100, 0x2400, 0, 0x1e00, 2, 1, 0x0004, 0x8901, 0xffff, 0x2300, 0x314c, 0x1d00 } },
+       { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }
 };
 
 static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 =
-       {0x50140000, 0x50140000, 0x00320000, 0x00, 0x00,
-        0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0000, 0x00, 0x00};
+       {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00};
 
 /**
 * Set the address for reading/writing the SMC SRAM space.
@@ -219,6 +218,18 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
        && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C)));
 }
 
+static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr)
+{
+       uint32_t efuse;
+
+       efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4));
+       efuse &= 0x00000001;
+       if (efuse)
+               return true;
+
+       return false;
+}
+
 /**
 * Send a message to the SMC, and wait for its response.
 *
@@ -228,21 +239,27 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr)
 */
 int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg)
 {
+       int ret;
+
        if (!polaris10_is_smc_ram_running(smumgr))
                return -1;
 
+
        SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
 
-       if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
-               printk("Failed to send Previous Message.\n");
+       ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
 
+       if (ret != 1)
+               printk("\n failed to send pre message %x ret is %d \n",  msg, ret);
 
        cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg);
 
        SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0);
 
-       if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP))
-               printk("Failed to send Message.\n");
+       ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP);
+
+       if (ret != 1)
+               printk("\n failed to send message %x ret is %d \n",  msg, ret);
 
        return 0;
 }
@@ -953,6 +970,11 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr)
                (cgs_handle_t)smu_data->smu_buffer.handle);
                return -1;);
 
+       if (polaris10_is_hw_avfs_present(smumgr))
+               smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT;
+       else
+               smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED;
+
        return 0;
 }
 
index 39802c0..3d34fc4 100644 (file)
@@ -266,9 +266,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev)
                if (!ret)
                        ret = atmel_hlcdc_check_endpoint(dev, &ep);
 
-               of_node_put(ep_np);
-               if (ret)
+               if (ret) {
+                       of_node_put(ep_np);
                        return ret;
+               }
        }
 
        for_each_endpoint_of_node(dev->dev->of_node, ep_np) {
@@ -276,9 +277,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev)
                if (!ret)
                        ret = atmel_hlcdc_attach_endpoint(dev, &ep);
 
-               of_node_put(ep_np);
-               if (ret)
+               if (ret) {
+                       of_node_put(ep_np);
                        return ret;
+               }
        }
 
        return 0;
index aef3ca8..016c191 100644 (file)
@@ -339,6 +339,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane,
 
                atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff,
                                             factor_reg);
+       } else {
+               atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0);
        }
 }
 
index c204ef3..9bb99e2 100644 (file)
@@ -1296,14 +1296,39 @@ EXPORT_SYMBOL(drm_atomic_add_affected_planes);
  */
 void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
 {
+       struct drm_device *dev = state->dev;
+       unsigned crtc_mask = 0;
+       struct drm_crtc *crtc;
        int ret;
+       bool global = false;
+
+       drm_for_each_crtc(crtc, dev) {
+               if (crtc->acquire_ctx != state->acquire_ctx)
+                       continue;
+
+               crtc_mask |= drm_crtc_mask(crtc);
+               crtc->acquire_ctx = NULL;
+       }
+
+       if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) {
+               global = true;
+
+               dev->mode_config.acquire_ctx = NULL;
+       }
 
 retry:
        drm_modeset_backoff(state->acquire_ctx);
 
-       ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx);
+       ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
        if (ret)
                goto retry;
+
+       drm_for_each_crtc(crtc, dev)
+               if (drm_crtc_mask(crtc) & crtc_mask)
+                       crtc->acquire_ctx = state->acquire_ctx;
+
+       if (global)
+               dev->mode_config.acquire_ctx = state->acquire_ctx;
 }
 EXPORT_SYMBOL(drm_atomic_legacy_backoff);
 
index a6e4243..26feb2f 100644 (file)
@@ -528,11 +528,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc)
 int drm_crtc_helper_set_config(struct drm_mode_set *set)
 {
        struct drm_device *dev;
-       struct drm_crtc *new_crtc;
-       struct drm_encoder *save_encoders, *new_encoder, *encoder;
+       struct drm_crtc **save_encoder_crtcs, *new_crtc;
+       struct drm_encoder **save_connector_encoders, *new_encoder, *encoder;
        bool mode_changed = false; /* if true do a full mode set */
        bool fb_changed = false; /* if true and !mode_changed just do a flip */
-       struct drm_connector *save_connectors, *connector;
+       struct drm_connector *connector;
        int count = 0, ro, fail = 0;
        const struct drm_crtc_helper_funcs *crtc_funcs;
        struct drm_mode_set save_set;
@@ -574,15 +574,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
         * Allocate space for the backup of all (non-pointer) encoder and
         * connector data.
         */
-       save_encoders = kzalloc(dev->mode_config.num_encoder *
-                               sizeof(struct drm_encoder), GFP_KERNEL);
-       if (!save_encoders)
+       save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder *
+                               sizeof(struct drm_crtc *), GFP_KERNEL);
+       if (!save_encoder_crtcs)
                return -ENOMEM;
 
-       save_connectors = kzalloc(dev->mode_config.num_connector *
-                               sizeof(struct drm_connector), GFP_KERNEL);
-       if (!save_connectors) {
-               kfree(save_encoders);
+       save_connector_encoders = kzalloc(dev->mode_config.num_connector *
+                               sizeof(struct drm_encoder *), GFP_KERNEL);
+       if (!save_connector_encoders) {
+               kfree(save_encoder_crtcs);
                return -ENOMEM;
        }
 
@@ -593,12 +593,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
         */
        count = 0;
        drm_for_each_encoder(encoder, dev) {
-               save_encoders[count++] = *encoder;
+               save_encoder_crtcs[count++] = encoder->crtc;
        }
 
        count = 0;
        drm_for_each_connector(connector, dev) {
-               save_connectors[count++] = *connector;
+               save_connector_encoders[count++] = connector->encoder;
        }
 
        save_set.crtc = set->crtc;
@@ -631,8 +631,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                mode_changed = true;
        }
 
-       /* take a reference on all connectors in set */
+       /* take a reference on all unbound connectors in set, reuse the
+        * already taken reference for bound connectors
+        */
        for (ro = 0; ro < set->num_connectors; ro++) {
+               if (set->connectors[ro]->encoder)
+                       continue;
                drm_connector_reference(set->connectors[ro]);
        }
 
@@ -754,30 +758,28 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
                }
        }
 
-       /* after fail drop reference on all connectors in save set */
-       count = 0;
-       drm_for_each_connector(connector, dev) {
-               drm_connector_unreference(&save_connectors[count++]);
-       }
-
-       kfree(save_connectors);
-       kfree(save_encoders);
+       kfree(save_connector_encoders);
+       kfree(save_encoder_crtcs);
        return 0;
 
 fail:
        /* Restore all previous data. */
        count = 0;
        drm_for_each_encoder(encoder, dev) {
-               *encoder = save_encoders[count++];
+               encoder->crtc = save_encoder_crtcs[count++];
        }
 
        count = 0;
        drm_for_each_connector(connector, dev) {
-               *connector = save_connectors[count++];
+               connector->encoder = save_connector_encoders[count++];
        }
 
-       /* after fail drop reference on all connectors in set */
+       /* after fail drop reference on all unbound connectors in set, let
+        * bound connectors keep their reference
+        */
        for (ro = 0; ro < set->num_connectors; ro++) {
+               if (set->connectors[ro]->encoder)
+                       continue;
                drm_connector_unreference(set->connectors[ro]);
        }
 
@@ -787,8 +789,8 @@ fail:
                                      save_set.y, save_set.fb))
                DRM_ERROR("failed to restore config after modeset failure\n");
 
-       kfree(save_connectors);
-       kfree(save_encoders);
+       kfree(save_connector_encoders);
+       kfree(save_encoder_crtcs);
        return ret;
 }
 EXPORT_SYMBOL(drm_crtc_helper_set_config);
index a13edf5..6537908 100644 (file)
@@ -2927,11 +2927,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
                drm_dp_port_teardown_pdt(port, port->pdt);
 
                if (!port->input && port->vcpi.vcpi > 0) {
-                       if (mgr->mst_state) {
-                               drm_dp_mst_reset_vcpi_slots(mgr, port);
-                               drm_dp_update_payload_part1(mgr);
-                               drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
-                       }
+                       drm_dp_mst_reset_vcpi_slots(mgr, port);
+                       drm_dp_update_payload_part1(mgr);
+                       drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
                }
 
                kref_put(&port->kref, drm_dp_free_mst_port);
index 522cfd4..16353ee 100644 (file)
@@ -225,6 +225,7 @@ struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu)
 
        etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING;
        etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops;
+       etnaviv_domain->domain.pgsize_bitmap = SZ_4K;
        etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START;
        etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1;
 
index f6223f9..7f9901b 100644 (file)
@@ -31,7 +31,6 @@
 #include "exynos_drm_plane.h"
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
-#include "exynos_drm_fbdev.h"
 #include "exynos_drm_iommu.h"
 
 /*
index 468498e..4c1fb3f 100644 (file)
@@ -34,7 +34,7 @@
 
 struct exynos_dp_device {
        struct drm_encoder         encoder;
-       struct drm_connector       connector;
+       struct drm_connector       *connector;
        struct drm_bridge          *ptn_bridge;
        struct drm_device          *drm_dev;
        struct device              *dev;
@@ -70,7 +70,7 @@ static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data)
 static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data)
 {
        struct exynos_dp_device *dp = to_dp(plat_data);
-       struct drm_connector *connector = &dp->connector;
+       struct drm_connector *connector = dp->connector;
        struct drm_display_mode *mode;
        int num_modes = 0;
 
@@ -103,6 +103,7 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
        int ret;
 
        drm_connector_register(connector);
+       dp->connector = connector;
 
        /* Pre-empt DP connector creation if there's a bridge */
        if (dp->ptn_bridge) {
index 011211e..edbd98f 100644 (file)
@@ -15,7 +15,6 @@
 #include <drm/drmP.h>
 #include "exynos_drm_drv.h"
 #include "exynos_drm_crtc.h"
-#include "exynos_drm_fbdev.h"
 
 static LIST_HEAD(exynos_drm_subdrv_list);
 
index 3efe1aa..d472164 100644 (file)
@@ -30,7 +30,6 @@
 
 #include "exynos_drm_drv.h"
 #include "exynos_drm_fb.h"
-#include "exynos_drm_fbdev.h"
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_plane.h"
 #include "exynos_drm_iommu.h"
@@ -120,7 +119,6 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = {
        .timing_base = 0x0,
        .has_clksel = 1,
        .has_limited_fmt = 1,
-       .has_hw_trigger = 1,
 };
 
 static struct fimd_driver_data exynos3_fimd_driver_data = {
@@ -171,14 +169,11 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
        .lcdblk_vt_shift = 24,
        .lcdblk_bypass_shift = 15,
        .lcdblk_mic_bypass_shift = 11,
-       .trg_type = I80_HW_TRG,
        .has_shadowcon = 1,
        .has_vidoutcon = 1,
        .has_vtsel = 1,
        .has_mic_bypass = 1,
        .has_dp_clk = 1,
-       .has_hw_trigger = 1,
-       .has_trigger_per_te = 1,
 };
 
 struct fimd_context {
index 4935523..8564c3d 100644 (file)
 
 /* registers for base address */
 #define G2D_SRC_BASE_ADDR              0x0304
-#define G2D_SRC_STRIDE_REG             0x0308
+#define G2D_SRC_STRIDE                 0x0308
 #define G2D_SRC_COLOR_MODE             0x030C
 #define G2D_SRC_LEFT_TOP               0x0310
 #define G2D_SRC_RIGHT_BOTTOM           0x0314
 #define G2D_SRC_PLANE2_BASE_ADDR       0x0318
 #define G2D_DST_BASE_ADDR              0x0404
-#define G2D_DST_STRIDE_REG             0x0408
+#define G2D_DST_STRIDE                 0x0408
 #define G2D_DST_COLOR_MODE             0x040C
 #define G2D_DST_LEFT_TOP               0x0410
 #define G2D_DST_RIGHT_BOTTOM           0x0414
@@ -563,7 +563,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
 
        switch (reg_offset) {
        case G2D_SRC_BASE_ADDR:
-       case G2D_SRC_STRIDE_REG:
+       case G2D_SRC_STRIDE:
        case G2D_SRC_COLOR_MODE:
        case G2D_SRC_LEFT_TOP:
        case G2D_SRC_RIGHT_BOTTOM:
@@ -573,7 +573,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
                reg_type = REG_TYPE_SRC_PLANE2;
                break;
        case G2D_DST_BASE_ADDR:
-       case G2D_DST_STRIDE_REG:
+       case G2D_DST_STRIDE:
        case G2D_DST_COLOR_MODE:
        case G2D_DST_LEFT_TOP:
        case G2D_DST_RIGHT_BOTTOM:
@@ -968,8 +968,8 @@ static int g2d_check_reg_offset(struct device *dev,
                        } else
                                buf_info->types[reg_type] = BUF_TYPE_GEM;
                        break;
-               case G2D_SRC_STRIDE_REG:
-               case G2D_DST_STRIDE_REG:
+               case G2D_SRC_STRIDE:
+               case G2D_DST_STRIDE:
                        if (for_addr)
                                goto err;
 
index 55f1d37..77f12c0 100644 (file)
@@ -242,7 +242,7 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
            state->v_ratio == (1 << 15))
                height_ok = true;
 
-       if (width_ok & height_ok)
+       if (width_ok && height_ok)
                return 0;
 
        DRM_DEBUG_KMS("scaling mode is not supported");
index 5faacc6..7c334e9 100644 (file)
@@ -3481,6 +3481,7 @@ int intel_bios_init(struct drm_i915_private *dev_priv);
 bool intel_bios_is_valid_vbt(const void *buf, size_t size);
 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
 bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
index b235b6e..b9022fa 100644 (file)
@@ -139,6 +139,11 @@ fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
        else
                panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
 
+       panel_fixed_mode->width_mm = (dvo_timing->himage_hi << 8) |
+               dvo_timing->himage_lo;
+       panel_fixed_mode->height_mm = (dvo_timing->vimage_hi << 8) |
+               dvo_timing->vimage_lo;
+
        /* Some VBTs have bogus h/vtotal values */
        if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
                panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
@@ -1187,7 +1192,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
        }
        if (bdb->version < 106) {
                expected_size = 22;
-       } else if (bdb->version < 109) {
+       } else if (bdb->version < 111) {
                expected_size = 27;
        } else if (bdb->version < 195) {
                BUILD_BUG_ON(sizeof(struct old_child_dev_config) != 33);
@@ -1545,6 +1550,45 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin)
        return false;
 }
 
+/**
+ * intel_bios_is_port_present - is the specified digital port present
+ * @dev_priv:  i915 device instance
+ * @port:      port to check
+ *
+ * Return true if the device in %port is present.
+ */
+bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port)
+{
+       static const struct {
+               u16 dp, hdmi;
+       } port_mapping[] = {
+               [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, },
+               [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, },
+               [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, },
+               [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, },
+       };
+       int i;
+
+       /* FIXME maybe deal with port A as well? */
+       if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
+               return false;
+
+       if (!dev_priv->vbt.child_dev_num)
+               return false;
+
+       for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
+               const union child_device_config *p_child =
+                       &dev_priv->vbt.child_dev[i];
+               if ((p_child->common.dvo_port == port_mapping[port].dp ||
+                    p_child->common.dvo_port == port_mapping[port].hdmi) &&
+                   (p_child->common.device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING |
+                                                   DEVICE_TYPE_DISPLAYPORT_OUTPUT)))
+                       return true;
+       }
+
+       return false;
+}
+
 /**
  * intel_bios_is_port_edp - is the device in given port eDP
  * @dev_priv:  i915 device instance
index 2113f40..56a1637 100644 (file)
@@ -8275,12 +8275,14 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_encoder *encoder;
+       int i;
        u32 val, final;
        bool has_lvds = false;
        bool has_cpu_edp = false;
        bool has_panel = false;
        bool has_ck505 = false;
        bool can_ssc = false;
+       bool using_ssc_source = false;
 
        /* We need to take the global config into account */
        for_each_intel_encoder(dev, encoder) {
@@ -8307,8 +8309,22 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
                can_ssc = true;
        }
 
-       DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
-                     has_panel, has_lvds, has_ck505);
+       /* Check if any DPLLs are using the SSC source */
+       for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+               u32 temp = I915_READ(PCH_DPLL(i));
+
+               if (!(temp & DPLL_VCO_ENABLE))
+                       continue;
+
+               if ((temp & PLL_REF_INPUT_MASK) ==
+                   PLLB_REF_INPUT_SPREADSPECTRUMIN) {
+                       using_ssc_source = true;
+                       break;
+               }
+       }
+
+       DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+                     has_panel, has_lvds, has_ck505, using_ssc_source);
 
        /* Ironlake: try to setup display ref clock before DPLL
         * enabling. This is only under driver's control after
@@ -8345,9 +8361,9 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
                                final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
                } else
                        final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
-       } else {
-               final |= DREF_SSC_SOURCE_DISABLE;
-               final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
+       } else if (using_ssc_source) {
+               final |= DREF_SSC_SOURCE_ENABLE;
+               final |= DREF_SSC1_ENABLE;
        }
 
        if (final == val)
@@ -8393,7 +8409,7 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
                POSTING_READ(PCH_DREF_CONTROL);
                udelay(200);
        } else {
-               DRM_DEBUG_KMS("Disabling SSC entirely\n");
+               DRM_DEBUG_KMS("Disabling CPU source output\n");
 
                val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
 
@@ -8404,16 +8420,20 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
                POSTING_READ(PCH_DREF_CONTROL);
                udelay(200);
 
-               /* Turn off the SSC source */
-               val &= ~DREF_SSC_SOURCE_MASK;
-               val |= DREF_SSC_SOURCE_DISABLE;
+               if (!using_ssc_source) {
+                       DRM_DEBUG_KMS("Disabling SSC source\n");
 
-               /* Turn off SSC1 */
-               val &= ~DREF_SSC1_ENABLE;
+                       /* Turn off the SSC source */
+                       val &= ~DREF_SSC_SOURCE_MASK;
+                       val |= DREF_SSC_SOURCE_DISABLE;
 
-               I915_WRITE(PCH_DREF_CONTROL, val);
-               POSTING_READ(PCH_DREF_CONTROL);
-               udelay(200);
+                       /* Turn off SSC1 */
+                       val &= ~DREF_SSC1_ENABLE;
+
+                       I915_WRITE(PCH_DREF_CONTROL, val);
+                       POSTING_READ(PCH_DREF_CONTROL);
+                       udelay(200);
+               }
        }
 
        BUG_ON(val != final);
@@ -14554,6 +14574,8 @@ static void intel_setup_outputs(struct drm_device *dev)
                if (I915_READ(PCH_DP_D) & DP_DETECTED)
                        intel_dp_init(dev, PCH_DP_D, PORT_D);
        } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
+               bool has_edp, has_port;
+
                /*
                 * The DP_DETECTED bit is the latched state of the DDC
                 * SDA pin at boot. However since eDP doesn't require DDC
@@ -14562,27 +14584,37 @@ static void intel_setup_outputs(struct drm_device *dev)
                 * Thus we can't rely on the DP_DETECTED bit alone to detect
                 * eDP ports. Consult the VBT as well as DP_DETECTED to
                 * detect eDP ports.
+                *
+                * Sadly the straps seem to be missing sometimes even for HDMI
+                * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
+                * and VBT for the presence of the port. Additionally we can't
+                * trust the port type the VBT declares as we've seen at least
+                * HDMI ports that the VBT claim are DP or eDP.
                 */
-               if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
-                   !intel_dp_is_edp(dev, PORT_B))
+               has_edp = intel_dp_is_edp(dev, PORT_B);
+               has_port = intel_bios_is_port_present(dev_priv, PORT_B);
+               if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+                       has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
+               if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
                        intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
-               if (I915_READ(VLV_DP_B) & DP_DETECTED ||
-                   intel_dp_is_edp(dev, PORT_B))
-                       intel_dp_init(dev, VLV_DP_B, PORT_B);
 
-               if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
-                   !intel_dp_is_edp(dev, PORT_C))
+               has_edp = intel_dp_is_edp(dev, PORT_C);
+               has_port = intel_bios_is_port_present(dev_priv, PORT_C);
+               if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+                       has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
+               if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
                        intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
-               if (I915_READ(VLV_DP_C) & DP_DETECTED ||
-                   intel_dp_is_edp(dev, PORT_C))
-                       intel_dp_init(dev, VLV_DP_C, PORT_C);
 
                if (IS_CHERRYVIEW(dev)) {
-                       /* eDP not supported on port D, so don't check VBT */
-                       if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
-                               intel_hdmi_init(dev, CHV_HDMID, PORT_D);
-                       if (I915_READ(CHV_DP_D) & DP_DETECTED)
+                       /*
+                        * eDP not supported on port D,
+                        * so no need to worry about it
+                        */
+                       has_port = intel_bios_is_port_present(dev_priv, PORT_D);
+                       if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
                                intel_dp_init(dev, CHV_DP_D, PORT_D);
+                       if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
+                               intel_hdmi_init(dev, CHV_HDMID, PORT_D);
                }
 
                intel_dsi_init(dev);
index f192f58..79cf2d5 100644 (file)
@@ -4977,9 +4977,6 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
        intel_display_power_get(dev_priv, power_domain);
 
        if (long_hpd) {
-               /* indicate that we need to restart link training */
-               intel_dp->train_set_valid = false;
-
                intel_dp_long_pulse(intel_dp->attached_connector);
                if (intel_dp->is_mst)
                        ret = IRQ_HANDLED;
@@ -5725,8 +5722,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
                fixed_mode = drm_mode_duplicate(dev,
                                        dev_priv->vbt.lfp_lvds_vbt_mode);
-               if (fixed_mode)
+               if (fixed_mode) {
                        fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+                       connector->display_info.width_mm = fixed_mode->width_mm;
+                       connector->display_info.height_mm = fixed_mode->height_mm;
+               }
        }
        mutex_unlock(&dev->mode_config.mutex);
 
@@ -5923,9 +5923,9 @@ fail:
        return false;
 }
 
-void
-intel_dp_init(struct drm_device *dev,
-             i915_reg_t output_reg, enum port port)
+bool intel_dp_init(struct drm_device *dev,
+                  i915_reg_t output_reg,
+                  enum port port)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_digital_port *intel_dig_port;
@@ -5935,7 +5935,7 @@ intel_dp_init(struct drm_device *dev,
 
        intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
        if (!intel_dig_port)
-               return;
+               return false;
 
        intel_connector = intel_connector_alloc();
        if (!intel_connector)
@@ -5992,7 +5992,7 @@ intel_dp_init(struct drm_device *dev,
        if (!intel_dp_init_connector(intel_dig_port, intel_connector))
                goto err_init_connector;
 
-       return;
+       return true;
 
 err_init_connector:
        drm_encoder_cleanup(encoder);
@@ -6000,8 +6000,7 @@ err_encoder_init:
        kfree(intel_connector);
 err_connector_alloc:
        kfree(intel_dig_port);
-
-       return;
+       return false;
 }
 
 void intel_dp_mst_suspend(struct drm_device *dev)
index 0b8eefc..60fb39c 100644 (file)
@@ -85,8 +85,7 @@ static bool
 intel_dp_reset_link_train(struct intel_dp *intel_dp,
                        uint8_t dp_train_pat)
 {
-       if (!intel_dp->train_set_valid)
-               memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
+       memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
        intel_dp_set_signal_levels(intel_dp);
        return intel_dp_set_link_train(intel_dp, dp_train_pat);
 }
@@ -161,23 +160,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
                        break;
                }
 
-               /*
-                * if we used previously trained voltage and pre-emphasis values
-                * and we don't get clock recovery, reset link training values
-                */
-               if (intel_dp->train_set_valid) {
-                       DRM_DEBUG_KMS("clock recovery not ok, reset");
-                       /* clear the flag as we are not reusing train set */
-                       intel_dp->train_set_valid = false;
-                       if (!intel_dp_reset_link_train(intel_dp,
-                                                      DP_TRAINING_PATTERN_1 |
-                                                      DP_LINK_SCRAMBLING_DISABLE)) {
-                               DRM_ERROR("failed to enable link training\n");
-                               return;
-                       }
-                       continue;
-               }
-
                /* Check to see if we've tried the max voltage */
                for (i = 0; i < intel_dp->lane_count; i++)
                        if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
@@ -284,7 +266,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
                /* Make sure clock is still ok */
                if (!drm_dp_clock_recovery_ok(link_status,
                                              intel_dp->lane_count)) {
-                       intel_dp->train_set_valid = false;
                        intel_dp_link_training_clock_recovery(intel_dp);
                        intel_dp_set_link_train(intel_dp,
                                                training_pattern |
@@ -301,7 +282,6 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 
                /* Try 5 times, then try clock recovery if that fails */
                if (tries > 5) {
-                       intel_dp->train_set_valid = false;
                        intel_dp_link_training_clock_recovery(intel_dp);
                        intel_dp_set_link_train(intel_dp,
                                                training_pattern |
@@ -322,10 +302,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
 
        intel_dp_set_idle_link_train(intel_dp);
 
-       if (channel_eq) {
-               intel_dp->train_set_valid = true;
+       if (channel_eq)
                DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
-       }
 }
 
 void intel_dp_stop_link_train(struct intel_dp *intel_dp)
index 3ac7059..baf6f55 100644 (file)
@@ -366,6 +366,9 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
                                             DPLL_ID_PCH_PLL_B);
        }
 
+       if (!pll)
+               return NULL;
+
        /* reference the pll */
        intel_reference_shared_dpll(pll, crtc_state);
 
index a28b4aa..f7f0f01 100644 (file)
@@ -863,8 +863,6 @@ struct intel_dp {
        /* This is called before a link training is starterd */
        void (*prepare_link_retrain)(struct intel_dp *intel_dp);
 
-       bool train_set_valid;
-
        /* Displayport compliance testing */
        unsigned long compliance_test_type;
        unsigned long compliance_test_data;
@@ -1284,7 +1282,7 @@ void intel_csr_ucode_suspend(struct drm_i915_private *);
 void intel_csr_ucode_resume(struct drm_i915_private *);
 
 /* intel_dp.c */
-void intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
+bool intel_dp_init(struct drm_device *dev, i915_reg_t output_reg, enum port port);
 bool intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
                             struct intel_connector *intel_connector);
 void intel_dp_set_link_params(struct intel_dp *intel_dp,
index 366ad6c..4756ef6 100644 (file)
@@ -1545,6 +1545,9 @@ void intel_dsi_init(struct drm_device *dev)
                goto err;
        }
 
+       connector->display_info.width_mm = fixed_mode->width_mm;
+       connector->display_info.height_mm = fixed_mode->height_mm;
+
        intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
 
        intel_dsi_add_properties(intel_connector);
index d5a7cfe..647127f 100644 (file)
@@ -824,8 +824,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
 {
        struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
        struct intel_fbc *fbc = &dev_priv->fbc;
-       bool enable_by_default = IS_HASWELL(dev_priv) ||
-                                IS_BROADWELL(dev_priv);
+       bool enable_by_default = IS_BROADWELL(dev_priv);
 
        if (intel_vgpu_active(dev_priv->dev)) {
                fbc->no_fbc_reason = "VGPU is active";
index 2c3bd9c..a884470 100644 (file)
@@ -2142,6 +2142,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
        enum port port = intel_dig_port->port;
        uint8_t alternate_ddc_pin;
 
+       DRM_DEBUG_KMS("Adding HDMI connector on port %c\n",
+                     port_name(port));
+
        if (WARN(intel_dig_port->max_lanes < 4,
                 "Not enough lanes (%d) for HDMI on port %c\n",
                 intel_dig_port->max_lanes, port_name(port)))
index bc53c0d..96281e6 100644 (file)
@@ -1082,6 +1082,8 @@ void intel_lvds_init(struct drm_device *dev)
                fixed_mode = drm_mode_duplicate(dev, dev_priv->vbt.lfp_lvds_vbt_mode);
                if (fixed_mode) {
                        fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
+                       connector->display_info.width_mm = fixed_mode->width_mm;
+                       connector->display_info.height_mm = fixed_mode->height_mm;
                        goto out;
                }
        }
index c15051d..44fb0b3 100644 (file)
@@ -403,9 +403,10 @@ struct lvds_dvo_timing {
        u8 vsync_off:4;
        u8 rsvd0:6;
        u8 hsync_off_hi:2;
-       u8 h_image;
-       u8 v_image;
-       u8 max_hv;
+       u8 himage_lo;
+       u8 vimage_lo;
+       u8 vimage_hi:4;
+       u8 himage_hi:4;
        u8 h_border;
        u8 v_border;
        u8 rsvd1:3;
index 300ea03..d1f248f 100644 (file)
@@ -552,7 +552,8 @@ nouveau_fbcon_init(struct drm_device *dev)
        if (ret)
                goto fini;
 
-       fbcon->helper.fbdev->pixmap.buf_align = 4;
+       if (fbcon->helper.fbdev)
+               fbcon->helper.fbdev->pixmap.buf_align = 4;
        return 0;
 
 fini:
index 18fab39..62ad030 100644 (file)
@@ -1614,7 +1614,7 @@ nvkm_device_pci_func = {
        .fini = nvkm_device_pci_fini,
        .resource_addr = nvkm_device_pci_resource_addr,
        .resource_size = nvkm_device_pci_resource_size,
-       .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64),
+       .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
 };
 
 int
index 323c79a..41bd5d0 100644 (file)
@@ -276,6 +276,8 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
                struct pwr_rail_t *r = &stbl.rail[i];
                struct nvkm_iccsense_rail *rail;
                struct nvkm_iccsense_sensor *sensor;
+               int (*read)(struct nvkm_iccsense *,
+                           struct nvkm_iccsense_rail *);
 
                if (!r->mode || r->resistor_mohm == 0)
                        continue;
@@ -284,31 +286,31 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev)
                if (!sensor)
                        continue;
 
-               rail = kmalloc(sizeof(*rail), GFP_KERNEL);
-               if (!rail)
-                       return -ENOMEM;
-
                switch (sensor->type) {
                case NVBIOS_EXTDEV_INA209:
                        if (r->rail != 0)
                                continue;
-                       rail->read = nvkm_iccsense_ina209_read;
+                       read = nvkm_iccsense_ina209_read;
                        break;
                case NVBIOS_EXTDEV_INA219:
                        if (r->rail != 0)
                                continue;
-                       rail->read = nvkm_iccsense_ina219_read;
+                       read = nvkm_iccsense_ina219_read;
                        break;
                case NVBIOS_EXTDEV_INA3221:
                        if (r->rail >= 3)
                                continue;
-                       rail->read = nvkm_iccsense_ina3221_read;
+                       read = nvkm_iccsense_ina3221_read;
                        break;
                default:
                        continue;
                }
 
+               rail = kmalloc(sizeof(*rail), GFP_KERNEL);
+               if (!rail)
+                       return -ENOMEM;
                sensor->rail_mask |= 1 << r->rail;
+               rail->read = read;
                rail->sensor = sensor;
                rail->idx = r->rail;
                rail->mohm = r->resistor_mohm;
index 2e216e2..259cd6e 100644 (file)
@@ -589,7 +589,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev))
                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                /* use frac fb div on RS780/RS880 */
-               if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+               if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880))
+                   && !radeon_crtc->ss_enabled)
                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                if (ASIC_IS_DCE32(rdev) && mode->clock > 165000)
                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
@@ -626,7 +627,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                        if (radeon_crtc->ss.refdiv) {
                                radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
                                radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
-                               if (ASIC_IS_AVIVO(rdev))
+                               if (rdev->family >= CHIP_RV770)
                                        radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
                        }
                }
index e721e6b..21c44b2 100644 (file)
@@ -630,6 +630,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
 /*
  * GPU helpers function.
  */
+
+/**
+ * radeon_device_is_virtual - check if we are running is a virtual environment
+ *
+ * Check if the asic has been passed through to a VM (all asics).
+ * Used at driver startup.
+ * Returns true if virtual or false if not.
+ */
+static bool radeon_device_is_virtual(void)
+{
+#ifdef CONFIG_X86
+       return boot_cpu_has(X86_FEATURE_HYPERVISOR);
+#else
+       return false;
+#endif
+}
+
 /**
  * radeon_card_posted - check if the hw has already been initialized
  *
@@ -643,6 +660,10 @@ bool radeon_card_posted(struct radeon_device *rdev)
 {
        uint32_t reg;
 
+       /* for pass through, always force asic_init */
+       if (radeon_device_is_virtual())
+               return false;
+
        /* required for EFI mode on macbook2,1 which uses an r5xx asic */
        if (efi_enabled(EFI_BOOT) &&
            (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
@@ -1631,7 +1652,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
        radeon_agp_suspend(rdev);
 
        pci_save_state(dev->pdev);
-       if (freeze && rdev->family >= CHIP_R600) {
+       if (freeze && rdev->family >= CHIP_CEDAR) {
                rdev->asic->asic_reset(rdev, true);
                pci_restore_state(dev->pdev);
        } else if (suspend) {
index 99510e6..a4b357d 100644 (file)
@@ -1,6 +1,6 @@
 config DRM_SUN4I
        tristate "DRM Support for Allwinner A10 Display Engine"
-       depends on DRM && ARM
+       depends on DRM && ARM && COMMON_CLK
        depends on ARCH_SUNXI || COMPILE_TEST
        select DRM_GEM_CMA_HELPER
        select DRM_KMS_HELPER
index f7a15c1..3ab5604 100644 (file)
@@ -190,7 +190,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
        /* Get the physical address of the buffer in memory */
        gem = drm_fb_cma_get_gem_obj(fb, 0);
 
-       DRM_DEBUG_DRIVER("Using GEM @ 0x%x\n", gem->paddr);
+       DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
 
        /* Compute the start of the displayed memory */
        bpp = drm_format_plane_cpp(fb->pixel_format, 0);
@@ -198,7 +198,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
        paddr += (state->src_x >> 16) * bpp;
        paddr += (state->src_y >> 16) * fb->pitches[0];
 
-       DRM_DEBUG_DRIVER("Setting buffer address to 0x%x\n", paddr);
+       DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
 
        /* Write the 32 lower bits of the address (in bits) */
        lo_paddr = paddr << 3;
index 3ff668c..5b34631 100644 (file)
@@ -72,14 +72,40 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw,
 static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate,
                                  unsigned long *parent_rate)
 {
-       return *parent_rate / DIV_ROUND_CLOSEST(*parent_rate, rate);
+       unsigned long best_parent = 0;
+       u8 best_div = 1;
+       int i;
+
+       for (i = 6; i < 127; i++) {
+               unsigned long ideal = rate * i;
+               unsigned long rounded;
+
+               rounded = clk_hw_round_rate(clk_hw_get_parent(hw),
+                                           ideal);
+
+               if (rounded == ideal) {
+                       best_parent = rounded;
+                       best_div = i;
+                       goto out;
+               }
+
+               if ((rounded < ideal) && (rounded > best_parent)) {
+                       best_parent = rounded;
+                       best_div = i;
+               }
+       }
+
+out:
+       *parent_rate = best_parent;
+
+       return best_parent / best_div;
 }
 
 static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate,
                               unsigned long parent_rate)
 {
        struct sun4i_dclk *dclk = hw_to_dclk(hw);
-       int div = DIV_ROUND_CLOSEST(parent_rate, rate);
+       u8 div = parent_rate / rate;
 
        return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG,
                                  GENMASK(6, 0), div);
@@ -127,10 +153,14 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
        const char *clk_name, *parent_name;
        struct clk_init_data init;
        struct sun4i_dclk *dclk;
+       int ret;
 
        parent_name = __clk_get_name(tcon->sclk0);
-       of_property_read_string_index(dev->of_node, "clock-output-names", 0,
-                                     &clk_name);
+       ret = of_property_read_string_index(dev->of_node,
+                                           "clock-output-names", 0,
+                                           &clk_name);
+       if (ret)
+               return ret;
 
        dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL);
        if (!dclk)
@@ -140,6 +170,7 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon)
        init.ops = &sun4i_dclk_ops;
        init.parent_names = &parent_name;
        init.num_parents = 1;
+       init.flags = CLK_SET_RATE_PARENT;
 
        dclk->regmap = tcon->regs;
        dclk->hw.init = &init;
index 76e922b..257d2b4 100644 (file)
 #include "sun4i_layer.h"
 #include "sun4i_tcon.h"
 
-static int sun4i_drv_connector_plug_all(struct drm_device *drm)
-{
-       struct drm_connector *connector, *failed;
-       int ret;
-
-       mutex_lock(&drm->mode_config.mutex);
-       list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
-               ret = drm_connector_register(connector);
-               if (ret) {
-                       failed = connector;
-                       goto err;
-               }
-       }
-       mutex_unlock(&drm->mode_config.mutex);
-       return 0;
-
-err:
-       list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
-               if (failed == connector)
-                       break;
-
-               drm_connector_unregister(connector);
-       }
-       mutex_unlock(&drm->mode_config.mutex);
-
-       return ret;
-}
-
 static int sun4i_drv_enable_vblank(struct drm_device *drm, unsigned int pipe)
 {
        struct sun4i_drv *drv = drm->dev_private;
@@ -125,6 +97,22 @@ static struct drm_driver sun4i_drv_driver = {
        .disable_vblank         = sun4i_drv_disable_vblank,
 };
 
+static void sun4i_remove_framebuffers(void)
+{
+       struct apertures_struct *ap;
+
+       ap = alloc_apertures(1);
+       if (!ap)
+               return;
+
+       /* The framebuffer can be located anywhere in RAM */
+       ap->ranges[0].base = 0;
+       ap->ranges[0].size = ~0;
+
+       remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
+       kfree(ap);
+}
+
 static int sun4i_drv_bind(struct device *dev)
 {
        struct drm_device *drm;
@@ -172,6 +160,9 @@ static int sun4i_drv_bind(struct device *dev)
        }
        drm->irq_enabled = true;
 
+       /* Remove early framebuffers (ie. simplefb) */
+       sun4i_remove_framebuffers();
+
        /* Create our framebuffer */
        drv->fbdev = sun4i_framebuffer_init(drm);
        if (IS_ERR(drv->fbdev)) {
@@ -187,7 +178,7 @@ static int sun4i_drv_bind(struct device *dev)
        if (ret)
                goto free_drm;
 
-       ret = sun4i_drv_connector_plug_all(drm);
+       ret = drm_connector_register_all(drm);
        if (ret)
                goto unregister_drm;
 
@@ -204,6 +195,7 @@ static void sun4i_drv_unbind(struct device *dev)
 {
        struct drm_device *drm = dev_get_drvdata(dev);
 
+       drm_connector_unregister_all(drm);
        drm_dev_unregister(drm);
        drm_kms_helper_poll_fini(drm);
        sun4i_framebuffer_free(drm);
index ab64948..aaffe9e 100644 (file)
@@ -54,8 +54,13 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector)
 static int sun4i_rgb_mode_valid(struct drm_connector *connector,
                                struct drm_display_mode *mode)
 {
+       struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector);
+       struct sun4i_drv *drv = rgb->drv;
+       struct sun4i_tcon *tcon = drv->tcon;
        u32 hsync = mode->hsync_end - mode->hsync_start;
        u32 vsync = mode->vsync_end - mode->vsync_start;
+       unsigned long rate = mode->clock * 1000;
+       long rounded_rate;
 
        DRM_DEBUG_DRIVER("Validating modes...\n");
 
@@ -87,6 +92,15 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
 
        DRM_DEBUG_DRIVER("Vertical parameters OK\n");
 
+       rounded_rate = clk_round_rate(tcon->dclk, rate);
+       if (rounded_rate < rate)
+               return MODE_CLOCK_LOW;
+
+       if (rounded_rate > rate)
+               return MODE_CLOCK_HIGH;
+
+       DRM_DEBUG_DRIVER("Clock rate OK\n");
+
        return MODE_OK;
 }
 
@@ -203,7 +217,7 @@ int sun4i_rgb_init(struct drm_device *drm)
        int ret;
 
        /* If we don't have a panel, there's no point in going on */
-       if (!tcon->panel)
+       if (IS_ERR(tcon->panel))
                return -ENODEV;
 
        rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL);
index 9f19b0e..652385f 100644 (file)
@@ -425,11 +425,11 @@ static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node)
 
        remote = of_graph_get_remote_port_parent(end_node);
        if (!remote) {
-               DRM_DEBUG_DRIVER("Enable to parse remote node\n");
+               DRM_DEBUG_DRIVER("Unable to parse remote node\n");
                return ERR_PTR(-EINVAL);
        }
 
-       return of_drm_find_panel(remote);
+       return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER);
 }
 
 static int sun4i_tcon_bind(struct device *dev, struct device *master,
@@ -490,7 +490,11 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master,
                return 0;
        }
 
-       return sun4i_rgb_init(drm);
+       ret = sun4i_rgb_init(drm);
+       if (ret < 0)
+               goto err_free_clocks;
+
+       return 0;
 
 err_free_clocks:
        sun4i_tcon_free_clocks(tcon);
@@ -522,12 +526,13 @@ static int sun4i_tcon_probe(struct platform_device *pdev)
         * Defer the probe.
         */
        panel = sun4i_tcon_find_panel(node);
-       if (IS_ERR(panel)) {
-               /*
-                * If we don't have a panel endpoint, just go on
-                */
-               if (PTR_ERR(panel) != -ENODEV)
-                       return -EPROBE_DEFER;
+
+       /*
+        * If we don't have a panel endpoint, just go on
+        */
+       if (PTR_ERR(panel) == -EPROBE_DEFER) {
+               DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n");
+               return -EPROBE_DEFER;
        }
 
        return component_add(&pdev->dev, &sun4i_tcon_ops);
index aad8c16..0cd4f72 100644 (file)
@@ -261,7 +261,7 @@ static void elo_remove(struct hid_device *hdev)
        struct elo_priv *priv = hid_get_drvdata(hdev);
 
        hid_hw_stop(hdev);
-       flush_workqueue(wq);
+       cancel_delayed_work_sync(&priv->work);
        kfree(priv);
 }
 
index c741f5e..95b7d61 100644 (file)
@@ -1401,6 +1401,11 @@ static const struct hid_device_id mt_devices[] = {
                MT_USB_DEVICE(USB_VENDOR_ID_NOVATEK,
                        USB_DEVICE_ID_NOVATEK_PCT) },
 
+       /* Ntrig Panel */
+       { .driver_data = MT_CLS_NSMU,
+               HID_DEVICE(BUS_I2C, HID_GROUP_MULTITOUCH_WIN_8,
+                       USB_VENDOR_ID_NTRIG, 0x1b05) },
+
        /* PixArt optical touch screen */
        { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
                MT_USB_DEVICE(USB_VENDOR_ID_PIXART,
index 2f1ddca..700145b 100644 (file)
@@ -516,13 +516,13 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
                                        goto inval;
                        } else if (uref->usage_index >= field->report_count)
                                goto inval;
-
-                       else if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
-                                (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
-                                 uref->usage_index + uref_multi->num_values > field->report_count))
-                               goto inval;
                }
 
+               if ((cmd == HIDIOCGUSAGES || cmd == HIDIOCSUSAGES) &&
+                   (uref_multi->num_values > HID_MAX_MULTI_USAGES ||
+                    uref->usage_index + uref_multi->num_values > field->report_count))
+                       goto inval;
+
                switch (cmd) {
                case HIDIOCGUSAGE:
                        uref->value = field->value[uref->usage_index];
index c43318d..2ac87d5 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/sched.h>
+#include <linux/ctype.h>
 
 #include <linux/i8k.h>
 
 
 static DEFINE_MUTEX(i8k_mutex);
 static char bios_version[4];
+static char bios_machineid[16];
 static struct device *i8k_hwmon_dev;
 static u32 i8k_hwmon_flags;
 static uint i8k_fan_mult = I8K_FAN_MULT;
 static uint i8k_pwm_mult;
 static uint i8k_fan_max = I8K_FAN_HIGH;
+static bool disallow_fan_type_call;
 
 #define I8K_HWMON_HAVE_TEMP1   (1 << 0)
 #define I8K_HWMON_HAVE_TEMP2   (1 << 1)
@@ -94,13 +97,13 @@ module_param(ignore_dmi, bool, 0);
 MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
 
 #if IS_ENABLED(CONFIG_I8K)
-static bool restricted;
+static bool restricted = true;
 module_param(restricted, bool, 0);
-MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
+MODULE_PARM_DESC(restricted, "Restrict fan control and serial number to CAP_SYS_ADMIN (default: 1)");
 
 static bool power_status;
 module_param(power_status, bool, 0600);
-MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
+MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k (default: 0)");
 #endif
 
 static uint fan_mult;
@@ -235,14 +238,28 @@ static int i8k_get_fan_speed(int fan)
 /*
  * Read the fan type.
  */
-static int i8k_get_fan_type(int fan)
+static int _i8k_get_fan_type(int fan)
 {
        struct smm_regs regs = { .eax = I8K_SMM_GET_FAN_TYPE, };
 
+       if (disallow_fan_type_call)
+               return -EINVAL;
+
        regs.ebx = fan & 0xff;
        return i8k_smm(&regs) ? : regs.eax & 0xff;
 }
 
+static int i8k_get_fan_type(int fan)
+{
+       /* I8K_SMM_GET_FAN_TYPE SMM call is expensive, so cache values */
+       static int types[2] = { INT_MIN, INT_MIN };
+
+       if (types[fan] == INT_MIN)
+               types[fan] = _i8k_get_fan_type(fan);
+
+       return types[fan];
+}
+
 /*
  * Read the fan nominal rpm for specific fan speed.
  */
@@ -387,14 +404,20 @@ i8k_ioctl_unlocked(struct file *fp, unsigned int cmd, unsigned long arg)
 
        switch (cmd) {
        case I8K_BIOS_VERSION:
+               if (!isdigit(bios_version[0]) || !isdigit(bios_version[1]) ||
+                   !isdigit(bios_version[2]))
+                       return -EINVAL;
+
                val = (bios_version[0] << 16) |
                                (bios_version[1] << 8) | bios_version[2];
                break;
 
        case I8K_MACHINE_ID:
-               memset(buff, 0, 16);
-               strlcpy(buff, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
-                       sizeof(buff));
+               if (restricted && !capable(CAP_SYS_ADMIN))
+                       return -EPERM;
+
+               memset(buff, 0, sizeof(buff));
+               strlcpy(buff, bios_machineid, sizeof(buff));
                break;
 
        case I8K_FN_STATUS:
@@ -511,7 +534,7 @@ static int i8k_proc_show(struct seq_file *seq, void *offset)
        seq_printf(seq, "%s %s %s %d %d %d %d %d %d %d\n",
                   I8K_PROC_FMT,
                   bios_version,
-                  i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+                  (restricted && !capable(CAP_SYS_ADMIN)) ? "-1" : bios_machineid,
                   cpu_temp,
                   left_fan, right_fan, left_speed, right_speed,
                   ac_power, fn_key);
@@ -718,6 +741,9 @@ static struct attribute *i8k_attrs[] = {
 static umode_t i8k_is_visible(struct kobject *kobj, struct attribute *attr,
                              int index)
 {
+       if (disallow_fan_type_call &&
+           (index == 9 || index == 12))
+               return 0;
        if (index >= 0 && index <= 1 &&
            !(i8k_hwmon_flags & I8K_HWMON_HAVE_TEMP1))
                return 0;
@@ -767,13 +793,17 @@ static int __init i8k_init_hwmon(void)
        if (err >= 0)
                i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
 
-       /* First fan attributes, if fan type is OK */
-       err = i8k_get_fan_type(0);
+       /* First fan attributes, if fan status or type is OK */
+       err = i8k_get_fan_status(0);
+       if (err < 0)
+               err = i8k_get_fan_type(0);
        if (err >= 0)
                i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN1;
 
-       /* Second fan attributes, if fan type is OK */
-       err = i8k_get_fan_type(1);
+       /* Second fan attributes, if fan status or type is OK */
+       err = i8k_get_fan_status(1);
+       if (err < 0)
+               err = i8k_get_fan_type(1);
        if (err >= 0)
                i8k_hwmon_flags |= I8K_HWMON_HAVE_FAN2;
 
@@ -929,12 +959,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
 
 MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
 
-static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
+/*
+ * On some machines once I8K_SMM_GET_FAN_TYPE is issued then CPU fan speed
+ * randomly going up and down due to bug in Dell SMM or BIOS. Here is blacklist
+ * of affected Dell machines for which we disallow I8K_SMM_GET_FAN_TYPE call.
+ * See bug: https://bugzilla.kernel.org/show_bug.cgi?id=100121
+ */
+static struct dmi_system_id i8k_blacklist_fan_type_dmi_table[] __initdata = {
        {
-               /*
-                * CPU fan speed going up and down on Dell Studio XPS 8000
-                * for unknown reasons.
-                */
                .ident = "Dell Studio XPS 8000",
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -942,16 +974,19 @@ static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
                },
        },
        {
-               /*
-                * CPU fan speed going up and down on Dell Studio XPS 8100
-                * for unknown reasons.
-                */
                .ident = "Dell Studio XPS 8100",
                .matches = {
                        DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
                        DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
                },
        },
+       {
+               .ident = "Dell Inspiron 580",
+               .matches = {
+                       DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+                       DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Inspiron 580 "),
+               },
+       },
        { }
 };
 
@@ -966,8 +1001,7 @@ static int __init i8k_probe(void)
        /*
         * Get DMI information
         */
-       if (!dmi_check_system(i8k_dmi_table) ||
-           dmi_check_system(i8k_blacklist_dmi_table)) {
+       if (!dmi_check_system(i8k_dmi_table)) {
                if (!ignore_dmi && !force)
                        return -ENODEV;
 
@@ -978,8 +1012,13 @@ static int __init i8k_probe(void)
                        i8k_get_dmi_data(DMI_BIOS_VERSION));
        }
 
+       if (dmi_check_system(i8k_blacklist_fan_type_dmi_table))
+               disallow_fan_type_call = true;
+
        strlcpy(bios_version, i8k_get_dmi_data(DMI_BIOS_VERSION),
                sizeof(bios_version));
+       strlcpy(bios_machineid, i8k_get_dmi_data(DMI_PRODUCT_SERIAL),
+               sizeof(bios_machineid));
 
        /*
         * Get SMM Dell signature
index 847d1b5..688be9e 100644 (file)
@@ -300,13 +300,10 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
        if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
                /*
                 * The trace run will continue with the same allocated trace
-                * buffer. As such zero-out the buffer so that we don't end
-                * up with stale data.
-                *
-                * Since the tracer is still enabled drvdata::buf
-                * can't be NULL.
+                * buffer. The trace buffer is cleared in tmc_etr_enable_hw(),
+                * so we don't have to explicitly clear it. Also, since the
+                * tracer is still enabled drvdata::buf can't be NULL.
                 */
-               memset(drvdata->buf, 0, drvdata->size);
                tmc_etr_enable_hw(drvdata);
        } else {
                /*
@@ -315,7 +312,7 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
                 */
                vaddr = drvdata->vaddr;
                paddr = drvdata->paddr;
-               drvdata->buf = NULL;
+               drvdata->buf = drvdata->vaddr = NULL;
        }
 
        drvdata->reading = false;
index 5443d03..d08d1ab 100644 (file)
@@ -385,7 +385,6 @@ static int _coresight_build_path(struct coresight_device *csdev,
        int i;
        bool found = false;
        struct coresight_node *node;
-       struct coresight_connection *conn;
 
        /* An activated sink has been found.  Enqueue the element */
        if ((csdev->type == CORESIGHT_DEV_TYPE_SINK ||
@@ -394,8 +393,9 @@ static int _coresight_build_path(struct coresight_device *csdev,
 
        /* Not a sink - recursively explore each port found on this element */
        for (i = 0; i < csdev->nr_outport; i++) {
-               conn = &csdev->conns[i];
-               if (_coresight_build_path(conn->child_dev, path) == 0) {
+               struct coresight_device *child_dev = csdev->conns[i].child_dev;
+
+               if (child_dev && _coresight_build_path(child_dev, path) == 0) {
                        found = true;
                        break;
                }
@@ -425,6 +425,7 @@ out:
 struct list_head *coresight_build_path(struct coresight_device *csdev)
 {
        struct list_head *path;
+       int rc;
 
        path = kzalloc(sizeof(struct list_head), GFP_KERNEL);
        if (!path)
@@ -432,9 +433,10 @@ struct list_head *coresight_build_path(struct coresight_device *csdev)
 
        INIT_LIST_HEAD(path);
 
-       if (_coresight_build_path(csdev, path)) {
+       rc = _coresight_build_path(csdev, path);
+       if (rc) {
                kfree(path);
-               path = NULL;
+               return ERR_PTR(rc);
        }
 
        return path;
@@ -507,8 +509,9 @@ int coresight_enable(struct coresight_device *csdev)
                goto out;
 
        path = coresight_build_path(csdev);
-       if (!path) {
+       if (IS_ERR(path)) {
                pr_err("building path(s) failed\n");
+               ret = PTR_ERR(path);
                goto out;
        }
 
index a1e642e..7fddc13 100644 (file)
@@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_accel_buffer_setup_ops = {
 
 int st_accel_allocate_ring(struct iio_dev *indio_dev)
 {
-       return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+       return iio_triggered_buffer_setup(indio_dev, NULL,
                &st_sensors_trigger_handler, &st_accel_buffer_setup_ops);
 }
 
index dc73f2d..4d95bfc 100644 (file)
@@ -741,6 +741,7 @@ static const struct iio_info accel_info = {
 static const struct iio_trigger_ops st_accel_trigger_ops = {
        .owner = THIS_MODULE,
        .set_trigger_state = ST_ACCEL_TRIGGER_SET_STATE,
+       .validate_device = st_sensors_validate_device,
 };
 #define ST_ACCEL_TRIGGER_OPS (&st_accel_trigger_ops)
 #else
index c558985..f1693db 100644 (file)
@@ -57,31 +57,20 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p)
        struct iio_poll_func *pf = p;
        struct iio_dev *indio_dev = pf->indio_dev;
        struct st_sensor_data *sdata = iio_priv(indio_dev);
+       s64 timestamp;
 
-       /* If we have a status register, check if this IRQ came from us */
-       if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
-               u8 status;
-
-               len = sdata->tf->read_byte(&sdata->tb, sdata->dev,
-                          sdata->sensor_settings->drdy_irq.addr_stat_drdy,
-                          &status);
-               if (len < 0)
-                       dev_err(sdata->dev, "could not read channel status\n");
-
-               /*
-                * If this was not caused by any channels on this sensor,
-                * return IRQ_NONE
-                */
-               if (!(status & (u8)indio_dev->active_scan_mask[0]))
-                       return IRQ_NONE;
-       }
+       /* If we do timetamping here, do it before reading the values */
+       if (sdata->hw_irq_trigger)
+               timestamp = sdata->hw_timestamp;
+       else
+               timestamp = iio_get_time_ns();
 
        len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
        if (len < 0)
                goto st_sensors_get_buffer_element_error;
 
        iio_push_to_buffers_with_timestamp(indio_dev, sdata->buffer_data,
-               pf->timestamp);
+                                          timestamp);
 
 st_sensors_get_buffer_element_error:
        iio_trigger_notify_done(indio_dev->trig);
index dffe006..9e59c90 100644 (file)
@@ -363,6 +363,11 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
        if (err < 0)
                return err;
 
+       /* Disable DRDY, this might be still be enabled after reboot. */
+       err = st_sensors_set_dataready_irq(indio_dev, false);
+       if (err < 0)
+               return err;
+
        if (sdata->current_fullscale) {
                err = st_sensors_set_fullscale(indio_dev,
                                                sdata->current_fullscale->num);
@@ -424,6 +429,9 @@ int st_sensors_set_dataready_irq(struct iio_dev *indio_dev, bool enable)
        else
                drdy_mask = sdata->sensor_settings->drdy_irq.mask_int2;
 
+       /* Flag to the poll function that the hardware trigger is in use */
+       sdata->hw_irq_trigger = enable;
+
        /* Enable/Disable the interrupt generator for data ready. */
        err = st_sensors_write_data_with_mask(indio_dev,
                                        sdata->sensor_settings->drdy_irq.addr,
index da72279..296e4ff 100644 (file)
 #include <linux/iio/common/st_sensors.h>
 #include "st_sensors_core.h"
 
+/**
+ * st_sensors_irq_handler() - top half of the IRQ-based triggers
+ * @irq: irq number
+ * @p: private handler data
+ */
+irqreturn_t st_sensors_irq_handler(int irq, void *p)
+{
+       struct iio_trigger *trig = p;
+       struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+       struct st_sensor_data *sdata = iio_priv(indio_dev);
+
+       /* Get the time stamp as close in time as possible */
+       sdata->hw_timestamp = iio_get_time_ns();
+       return IRQ_WAKE_THREAD;
+}
+
+/**
+ * st_sensors_irq_thread() - bottom half of the IRQ-based triggers
+ * @irq: irq number
+ * @p: private handler data
+ */
+irqreturn_t st_sensors_irq_thread(int irq, void *p)
+{
+       struct iio_trigger *trig = p;
+       struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
+       struct st_sensor_data *sdata = iio_priv(indio_dev);
+       int ret;
+
+       /*
+        * If this trigger is backed by a hardware interrupt and we have a
+        * status register, check if this IRQ came from us
+        */
+       if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
+               u8 status;
+
+               ret = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+                          sdata->sensor_settings->drdy_irq.addr_stat_drdy,
+                          &status);
+               if (ret < 0) {
+                       dev_err(sdata->dev, "could not read channel status\n");
+                       goto out_poll;
+               }
+               /*
+                * the lower bits of .active_scan_mask[0] is directly mapped
+                * to the channels on the sensor: either bit 0 for
+                * one-dimensional sensors, or e.g. x,y,z for accelerometers,
+                * gyroscopes or magnetometers. No sensor use more than 3
+                * channels, so cut the other status bits here.
+                */
+               status &= 0x07;
+
+               /*
+                * If this was not caused by any channels on this sensor,
+                * return IRQ_NONE
+                */
+               if (!indio_dev->active_scan_mask)
+                       return IRQ_NONE;
+               if (!(status & (u8)indio_dev->active_scan_mask[0]))
+                       return IRQ_NONE;
+       }
+
+out_poll:
+       /* It's our IRQ: proceed to handle the register polling */
+       iio_trigger_poll_chained(p);
+       return IRQ_HANDLED;
+}
+
 int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                                const struct iio_trigger_ops *trigger_ops)
 {
@@ -30,6 +97,10 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                return -ENOMEM;
        }
 
+       iio_trigger_set_drvdata(sdata->trig, indio_dev);
+       sdata->trig->ops = trigger_ops;
+       sdata->trig->dev.parent = sdata->dev;
+
        irq = sdata->get_irq_data_ready(indio_dev);
        irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
        /*
@@ -77,9 +148,12 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
            sdata->sensor_settings->drdy_irq.addr_stat_drdy)
                irq_trig |= IRQF_SHARED;
 
-       err = request_threaded_irq(irq,
-                       iio_trigger_generic_data_rdy_poll,
-                       NULL,
+       /* Let's create an interrupt thread masking the hard IRQ here */
+       irq_trig |= IRQF_ONESHOT;
+
+       err = request_threaded_irq(sdata->get_irq_data_ready(indio_dev),
+                       st_sensors_irq_handler,
+                       st_sensors_irq_thread,
                        irq_trig,
                        sdata->trig->name,
                        sdata->trig);
@@ -88,10 +162,6 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                goto iio_trigger_free;
        }
 
-       iio_trigger_set_drvdata(sdata->trig, indio_dev);
-       sdata->trig->ops = trigger_ops;
-       sdata->trig->dev.parent = sdata->dev;
-
        err = iio_trigger_register(sdata->trig);
        if (err < 0) {
                dev_err(&indio_dev->dev, "failed to register iio trigger.\n");
@@ -119,6 +189,18 @@ void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
 }
 EXPORT_SYMBOL(st_sensors_deallocate_trigger);
 
+int st_sensors_validate_device(struct iio_trigger *trig,
+                              struct iio_dev *indio_dev)
+{
+       struct iio_dev *indio = iio_trigger_get_drvdata(trig);
+
+       if (indio != indio_dev)
+               return -EINVAL;
+
+       return 0;
+}
+EXPORT_SYMBOL(st_sensors_validate_device);
+
 MODULE_AUTHOR("Denis Ciocca <denis.ciocca@st.com>");
 MODULE_DESCRIPTION("STMicroelectronics ST-sensors trigger");
 MODULE_LICENSE("GPL v2");
index e63b957..f7c71da 100644 (file)
@@ -247,7 +247,7 @@ config MCP4922
 
 config STX104
        tristate "Apex Embedded Systems STX104 DAC driver"
-       depends on X86 && ISA
+       depends on X86 && ISA_BUS_API
        help
          Say yes here to build support for the 2-channel DAC on the Apex
          Embedded Systems STX104 integrated analog PC/104 card. The base port
index 948f600..69bde59 100644 (file)
@@ -525,7 +525,7 @@ static int ad5592r_alloc_channels(struct ad5592r_state *st)
 
        device_for_each_child_node(st->dev, child) {
                ret = fwnode_property_read_u32(child, "reg", &reg);
-               if (ret || reg > ARRAY_SIZE(st->channel_modes))
+               if (ret || reg >= ARRAY_SIZE(st->channel_modes))
                        continue;
 
                ret = fwnode_property_read_u32(child, "adi,mode", &tmp);
index d67b17b..a537704 100644 (file)
@@ -91,7 +91,7 @@ static const struct iio_buffer_setup_ops st_gyro_buffer_setup_ops = {
 
 int st_gyro_allocate_ring(struct iio_dev *indio_dev)
 {
-       return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+       return iio_triggered_buffer_setup(indio_dev, NULL,
                &st_sensors_trigger_handler, &st_gyro_buffer_setup_ops);
 }
 
index 52a3c87..a801295 100644 (file)
@@ -409,6 +409,7 @@ static const struct iio_info gyro_info = {
 static const struct iio_trigger_ops st_gyro_trigger_ops = {
        .owner = THIS_MODULE,
        .set_trigger_state = ST_GYRO_TRIGGER_SET_STATE,
+       .validate_device = st_sensors_validate_device,
 };
 #define ST_GYRO_TRIGGER_OPS (&st_gyro_trigger_ops)
 #else
index 3be6d20..1153591 100644 (file)
@@ -165,10 +165,8 @@ static irqreturn_t am2315_trigger_handler(int irq, void *p)
        struct am2315_sensor_data sensor_data;
 
        ret = am2315_read_data(data, &sensor_data);
-       if (ret < 0) {
-               mutex_unlock(&data->lock);
+       if (ret < 0)
                goto err;
-       }
 
        mutex_lock(&data->lock);
        if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
index fa47676..a03832a 100644 (file)
@@ -55,7 +55,7 @@ static const struct {
        },
        { /* IIO_HUMIDITYRELATIVE channel */
                .shift = 8,
-               .mask = 2,
+               .mask = 3,
        },
 };
 
@@ -164,14 +164,14 @@ static int hdc100x_get_measurement(struct hdc100x_data *data,
                dev_err(&client->dev, "cannot read high byte measurement");
                return ret;
        }
-       val = ret << 6;
+       val = ret << 8;
 
        ret = i2c_smbus_read_byte(client);
        if (ret < 0) {
                dev_err(&client->dev, "cannot read low byte measurement");
                return ret;
        }
-       val |= ret >> 2;
+       val |= ret;
 
        return val;
 }
@@ -211,18 +211,18 @@ static int hdc100x_read_raw(struct iio_dev *indio_dev,
                return IIO_VAL_INT_PLUS_MICRO;
        case IIO_CHAN_INFO_SCALE:
                if (chan->type == IIO_TEMP) {
-                       *val = 165;
-                       *val2 = 65536 >> 2;
+                       *val = 165000;
+                       *val2 = 65536;
                        return IIO_VAL_FRACTIONAL;
                } else {
-                       *val = 0;
-                       *val2 = 10000;
-                       return IIO_VAL_INT_PLUS_MICRO;
+                       *val = 100;
+                       *val2 = 65536;
+                       return IIO_VAL_FRACTIONAL;
                }
                break;
        case IIO_CHAN_INFO_OFFSET:
-               *val = -3971;
-               *val2 = 879096;
+               *val = -15887;
+               *val2 = 515151;
                return IIO_VAL_INT_PLUS_MICRO;
        default:
                return -EINVAL;
index 0bf92b0..b8a290e 100644 (file)
@@ -209,11 +209,11 @@ static const struct  bmi160_scale_item bmi160_scale_table[] = {
 };
 
 static const struct bmi160_odr bmi160_accel_odr[] = {
-       {0x01, 0, 78125},
-       {0x02, 1, 5625},
-       {0x03, 3, 125},
-       {0x04, 6, 25},
-       {0x05, 12, 5},
+       {0x01, 0, 781250},
+       {0x02, 1, 562500},
+       {0x03, 3, 125000},
+       {0x04, 6, 250000},
+       {0x05, 12, 500000},
        {0x06, 25, 0},
        {0x07, 50, 0},
        {0x08, 100, 0},
@@ -229,7 +229,7 @@ static const struct bmi160_odr bmi160_gyro_odr[] = {
        {0x08, 100, 0},
        {0x09, 200, 0},
        {0x0A, 400, 0},
-       {0x0B, 8000, 0},
+       {0x0B, 800, 0},
        {0x0C, 1600, 0},
        {0x0D, 3200, 0},
 };
@@ -364,8 +364,8 @@ int bmi160_set_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
 
        return regmap_update_bits(data->regmap,
                                  bmi160_regs[t].config,
-                                 bmi160_odr_table[t].tbl[i].bits,
-                                 bmi160_regs[t].config_odr_mask);
+                                 bmi160_regs[t].config_odr_mask,
+                                 bmi160_odr_table[t].tbl[i].bits);
 }
 
 static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
index ae2806a..0c52dfe 100644 (file)
@@ -210,22 +210,35 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
 
        /* Prevent the module from being removed whilst attached to a trigger */
        __module_get(pf->indio_dev->info->driver_module);
+
+       /* Get irq number */
        pf->irq = iio_trigger_get_irq(trig);
+       if (pf->irq < 0)
+               goto out_put_module;
+
+       /* Request irq */
        ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
                                   pf->type, pf->name,
                                   pf);
-       if (ret < 0) {
-               module_put(pf->indio_dev->info->driver_module);
-               return ret;
-       }
+       if (ret < 0)
+               goto out_put_irq;
 
+       /* Enable trigger in driver */
        if (trig->ops && trig->ops->set_trigger_state && notinuse) {
                ret = trig->ops->set_trigger_state(trig, true);
                if (ret < 0)
-                       module_put(pf->indio_dev->info->driver_module);
+                       goto out_free_irq;
        }
 
        return ret;
+
+out_free_irq:
+       free_irq(pf->irq, pf);
+out_put_irq:
+       iio_trigger_put_irq(trig, pf->irq);
+out_put_module:
+       module_put(pf->indio_dev->info->driver_module);
+       return ret;
 }
 
 static int iio_trigger_detach_poll_func(struct iio_trigger *trig,
index b4dbb39..651d57b 100644 (file)
@@ -1011,6 +1011,7 @@ static int apds9960_probe(struct i2c_client *client,
 
        iio_device_attach_buffer(indio_dev, buffer);
 
+       indio_dev->dev.parent = &client->dev;
        indio_dev->info = &apds9960_info;
        indio_dev->name = APDS9960_DRV_NAME;
        indio_dev->channels = apds9960_channels;
index 72b364e..b54dcba 100644 (file)
@@ -84,7 +84,7 @@ static int bh1780_debugfs_reg_access(struct iio_dev *indio_dev,
        int ret;
 
        if (!readval)
-               bh1780_write(bh1780, (u8)reg, (u8)writeval);
+               return bh1780_write(bh1780, (u8)reg, (u8)writeval);
 
        ret = bh1780_read(bh1780, (u8)reg);
        if (ret < 0)
@@ -187,7 +187,7 @@ static int bh1780_probe(struct i2c_client *client,
 
        indio_dev->dev.parent = &client->dev;
        indio_dev->info = &bh1780_info;
-       indio_dev->name = id->name;
+       indio_dev->name = "bh1780";
        indio_dev->channels = bh1780_channels;
        indio_dev->num_channels = ARRAY_SIZE(bh1780_channels);
        indio_dev->modes = INDIO_DIRECT_MODE;
@@ -226,7 +226,8 @@ static int bh1780_remove(struct i2c_client *client)
 static int bh1780_runtime_suspend(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
-       struct bh1780_data *bh1780 = i2c_get_clientdata(client);
+       struct iio_dev *indio_dev = i2c_get_clientdata(client);
+       struct bh1780_data *bh1780 = iio_priv(indio_dev);
        int ret;
 
        ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF);
@@ -241,7 +242,8 @@ static int bh1780_runtime_suspend(struct device *dev)
 static int bh1780_runtime_resume(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
-       struct bh1780_data *bh1780 = i2c_get_clientdata(client);
+       struct iio_dev *indio_dev = i2c_get_clientdata(client);
+       struct bh1780_data *bh1780 = iio_priv(indio_dev);
        int ret;
 
        ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON);
index e01e58a..f17cb2e 100644 (file)
@@ -147,7 +147,6 @@ static const struct iio_chan_spec max44000_channels[] = {
        {
                .type = IIO_PROXIMITY,
                .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
-               .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
                .scan_index = MAX44000_SCAN_INDEX_PRX,
                .scan_type = {
                        .sign           = 'u',
index ecd3bd0..0a9e8fa 100644 (file)
@@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_magn_buffer_setup_ops = {
 
 int st_magn_allocate_ring(struct iio_dev *indio_dev)
 {
-       return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+       return iio_triggered_buffer_setup(indio_dev, NULL,
                &st_sensors_trigger_handler, &st_magn_buffer_setup_ops);
 }
 
index 62036d2..8250fc3 100644 (file)
@@ -572,6 +572,7 @@ static const struct iio_info magn_info = {
 static const struct iio_trigger_ops st_magn_trigger_ops = {
        .owner = THIS_MODULE,
        .set_trigger_state = ST_MAGN_TRIGGER_SET_STATE,
+       .validate_device = st_sensors_validate_device,
 };
 #define ST_MAGN_TRIGGER_OPS (&st_magn_trigger_ops)
 #else
index 2f1498e..724452d 100644 (file)
@@ -879,8 +879,8 @@ static int bmp280_probe(struct i2c_client *client,
        if (ret < 0)
                return ret;
        if (chip_id != id->driver_data) {
-               dev_err(&client->dev, "bad chip id.  expected %x got %x\n",
-                       BMP280_CHIP_ID, chip_id);
+               dev_err(&client->dev, "bad chip id.  expected %lx got %x\n",
+                       id->driver_data, chip_id);
                return -EINVAL;
        }
 
index 2ff53f2..99468d0 100644 (file)
@@ -82,7 +82,7 @@ static const struct iio_buffer_setup_ops st_press_buffer_setup_ops = {
 
 int st_press_allocate_ring(struct iio_dev *indio_dev)
 {
-       return iio_triggered_buffer_setup(indio_dev, &iio_pollfunc_store_time,
+       return iio_triggered_buffer_setup(indio_dev, NULL,
                &st_sensors_trigger_handler, &st_press_buffer_setup_ops);
 }
 
index 9e9b72a..92a118c 100644 (file)
 #include <linux/iio/common/st_sensors.h>
 #include "st_pressure.h"
 
+#define MCELSIUS_PER_CELSIUS                   1000
+
+/* Default pressure sensitivity */
 #define ST_PRESS_LSB_PER_MBAR                  4096UL
 #define ST_PRESS_KPASCAL_NANO_SCALE            (100000000UL / \
                                                 ST_PRESS_LSB_PER_MBAR)
+
+/* Default temperature sensitivity */
 #define ST_PRESS_LSB_PER_CELSIUS               480UL
-#define ST_PRESS_CELSIUS_NANO_SCALE            (1000000000UL / \
-                                                ST_PRESS_LSB_PER_CELSIUS)
+#define ST_PRESS_MILLI_CELSIUS_OFFSET          42500UL
+
 #define ST_PRESS_NUMBER_DATA_CHANNELS          1
 
 /* FULLSCALE */
+#define ST_PRESS_FS_AVL_1100MB                 1100
 #define ST_PRESS_FS_AVL_1260MB                 1260
 
 #define ST_PRESS_1_OUT_XL_ADDR                 0x28
@@ -54,9 +60,6 @@
 #define ST_PRESS_LPS331AP_PW_MASK              0x80
 #define ST_PRESS_LPS331AP_FS_ADDR              0x23
 #define ST_PRESS_LPS331AP_FS_MASK              0x30
-#define ST_PRESS_LPS331AP_FS_AVL_1260_VAL      0x00
-#define ST_PRESS_LPS331AP_FS_AVL_1260_GAIN     ST_PRESS_KPASCAL_NANO_SCALE
-#define ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN     ST_PRESS_CELSIUS_NANO_SCALE
 #define ST_PRESS_LPS331AP_BDU_ADDR             0x20
 #define ST_PRESS_LPS331AP_BDU_MASK             0x04
 #define ST_PRESS_LPS331AP_DRDY_IRQ_ADDR                0x22
 #define ST_PRESS_LPS331AP_OD_IRQ_ADDR          0x22
 #define ST_PRESS_LPS331AP_OD_IRQ_MASK          0x40
 #define ST_PRESS_LPS331AP_MULTIREAD_BIT                true
-#define ST_PRESS_LPS331AP_TEMP_OFFSET          42500
 
 /* CUSTOM VALUES FOR LPS001WP SENSOR */
+
+/* LPS001WP pressure resolution */
+#define ST_PRESS_LPS001WP_LSB_PER_MBAR         16UL
+/* LPS001WP temperature resolution */
+#define ST_PRESS_LPS001WP_LSB_PER_CELSIUS      64UL
+
 #define ST_PRESS_LPS001WP_WAI_EXP              0xba
 #define ST_PRESS_LPS001WP_ODR_ADDR             0x20
 #define ST_PRESS_LPS001WP_ODR_MASK             0x30
@@ -78,6 +86,8 @@
 #define ST_PRESS_LPS001WP_ODR_AVL_13HZ_VAL     0x03
 #define ST_PRESS_LPS001WP_PW_ADDR              0x20
 #define ST_PRESS_LPS001WP_PW_MASK              0x40
+#define ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN \
+       (100000000UL / ST_PRESS_LPS001WP_LSB_PER_MBAR)
 #define ST_PRESS_LPS001WP_BDU_ADDR             0x20
 #define ST_PRESS_LPS001WP_BDU_MASK             0x04
 #define ST_PRESS_LPS001WP_MULTIREAD_BIT                true
 #define ST_PRESS_LPS25H_ODR_AVL_25HZ_VAL       0x04
 #define ST_PRESS_LPS25H_PW_ADDR                        0x20
 #define ST_PRESS_LPS25H_PW_MASK                        0x80
-#define ST_PRESS_LPS25H_FS_ADDR                        0x00
-#define ST_PRESS_LPS25H_FS_MASK                        0x00
-#define ST_PRESS_LPS25H_FS_AVL_1260_VAL                0x00
-#define ST_PRESS_LPS25H_FS_AVL_1260_GAIN       ST_PRESS_KPASCAL_NANO_SCALE
-#define ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN       ST_PRESS_CELSIUS_NANO_SCALE
 #define ST_PRESS_LPS25H_BDU_ADDR               0x20
 #define ST_PRESS_LPS25H_BDU_MASK               0x04
 #define ST_PRESS_LPS25H_DRDY_IRQ_ADDR          0x23
 #define ST_PRESS_LPS25H_OD_IRQ_ADDR            0x22
 #define ST_PRESS_LPS25H_OD_IRQ_MASK            0x40
 #define ST_PRESS_LPS25H_MULTIREAD_BIT          true
-#define ST_PRESS_LPS25H_TEMP_OFFSET            42500
 #define ST_PRESS_LPS25H_OUT_XL_ADDR            0x28
 #define ST_TEMP_LPS25H_OUT_L_ADDR              0x2b
 
@@ -161,7 +165,9 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
                        .storagebits = 16,
                        .endianness = IIO_LE,
                },
-               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+               .info_mask_separate =
+                       BIT(IIO_CHAN_INFO_RAW) |
+                       BIT(IIO_CHAN_INFO_SCALE),
                .modified = 0,
        },
        {
@@ -177,7 +183,7 @@ static const struct iio_chan_spec st_press_lps001wp_channels[] = {
                },
                .info_mask_separate =
                        BIT(IIO_CHAN_INFO_RAW) |
-                       BIT(IIO_CHAN_INFO_OFFSET),
+                       BIT(IIO_CHAN_INFO_SCALE),
                .modified = 0,
        },
        IIO_CHAN_SOFT_TIMESTAMP(1)
@@ -212,11 +218,14 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
                        .addr = ST_PRESS_LPS331AP_FS_ADDR,
                        .mask = ST_PRESS_LPS331AP_FS_MASK,
                        .fs_avl = {
+                               /*
+                                * Pressure and temperature sensitivity values
+                                * as defined in table 3 of LPS331AP datasheet.
+                                */
                                [0] = {
                                        .num = ST_PRESS_FS_AVL_1260MB,
-                                       .value = ST_PRESS_LPS331AP_FS_AVL_1260_VAL,
-                                       .gain = ST_PRESS_LPS331AP_FS_AVL_1260_GAIN,
-                                       .gain2 = ST_PRESS_LPS331AP_FS_AVL_TEMP_GAIN,
+                                       .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+                                       .gain2 = ST_PRESS_LSB_PER_CELSIUS,
                                },
                        },
                },
@@ -261,7 +270,17 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
                        .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
                },
                .fs = {
-                       .addr = 0,
+                       .fs_avl = {
+                               /*
+                                * Pressure and temperature resolution values
+                                * as defined in table 3 of LPS001WP datasheet.
+                                */
+                               [0] = {
+                                       .num = ST_PRESS_FS_AVL_1100MB,
+                                       .gain = ST_PRESS_LPS001WP_FS_AVL_PRESS_GAIN,
+                                       .gain2 = ST_PRESS_LPS001WP_LSB_PER_CELSIUS,
+                               },
+                       },
                },
                .bdu = {
                        .addr = ST_PRESS_LPS001WP_BDU_ADDR,
@@ -298,14 +317,15 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
                        .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
                },
                .fs = {
-                       .addr = ST_PRESS_LPS25H_FS_ADDR,
-                       .mask = ST_PRESS_LPS25H_FS_MASK,
                        .fs_avl = {
+                               /*
+                                * Pressure and temperature sensitivity values
+                                * as defined in table 3 of LPS25H datasheet.
+                                */
                                [0] = {
                                        .num = ST_PRESS_FS_AVL_1260MB,
-                                       .value = ST_PRESS_LPS25H_FS_AVL_1260_VAL,
-                                       .gain = ST_PRESS_LPS25H_FS_AVL_1260_GAIN,
-                                       .gain2 = ST_PRESS_LPS25H_FS_AVL_TEMP_GAIN,
+                                       .gain = ST_PRESS_KPASCAL_NANO_SCALE,
+                                       .gain2 = ST_PRESS_LSB_PER_CELSIUS,
                                },
                        },
                },
@@ -364,26 +384,26 @@ static int st_press_read_raw(struct iio_dev *indio_dev,
 
                return IIO_VAL_INT;
        case IIO_CHAN_INFO_SCALE:
-               *val = 0;
-
                switch (ch->type) {
                case IIO_PRESSURE:
+                       *val = 0;
                        *val2 = press_data->current_fullscale->gain;
-                       break;
+                       return IIO_VAL_INT_PLUS_NANO;
                case IIO_TEMP:
+                       *val = MCELSIUS_PER_CELSIUS;
                        *val2 = press_data->current_fullscale->gain2;
-                       break;
+                       return IIO_VAL_FRACTIONAL;
                default:
                        err = -EINVAL;
                        goto read_error;
                }
 
-               return IIO_VAL_INT_PLUS_NANO;
        case IIO_CHAN_INFO_OFFSET:
                switch (ch->type) {
                case IIO_TEMP:
-                       *val = 425;
-                       *val2 = 10;
+                       *val = ST_PRESS_MILLI_CELSIUS_OFFSET *
+                              press_data->current_fullscale->gain2;
+                       *val2 = MCELSIUS_PER_CELSIUS;
                        break;
                default:
                        err = -EINVAL;
@@ -425,6 +445,7 @@ static const struct iio_info press_info = {
 static const struct iio_trigger_ops st_press_trigger_ops = {
        .owner = THIS_MODULE,
        .set_trigger_state = ST_PRESS_TRIGGER_SET_STATE,
+       .validate_device = st_sensors_validate_device,
 };
 #define ST_PRESS_TRIGGER_OPS (&st_press_trigger_ops)
 #else
index f4d29d5..e2f926c 100644 (file)
@@ -64,6 +64,7 @@ struct as3935_state {
        struct delayed_work work;
 
        u32 tune_cap;
+       u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */
        u8 buf[2] ____cacheline_aligned;
 };
 
@@ -72,7 +73,8 @@ static const struct iio_chan_spec as3935_channels[] = {
                .type           = IIO_PROXIMITY,
                .info_mask_separate =
                        BIT(IIO_CHAN_INFO_RAW) |
-                       BIT(IIO_CHAN_INFO_PROCESSED),
+                       BIT(IIO_CHAN_INFO_PROCESSED) |
+                       BIT(IIO_CHAN_INFO_SCALE),
                .scan_index     = 0,
                .scan_type = {
                        .sign           = 'u',
@@ -181,7 +183,12 @@ static int as3935_read_raw(struct iio_dev *indio_dev,
                /* storm out of range */
                if (*val == AS3935_DATA_MASK)
                        return -EINVAL;
-               *val *= 1000;
+
+               if (m == IIO_CHAN_INFO_PROCESSED)
+                       *val *= 1000;
+               break;
+       case IIO_CHAN_INFO_SCALE:
+               *val = 1000;
                break;
        default:
                return -EINVAL;
@@ -206,10 +213,10 @@ static irqreturn_t as3935_trigger_handler(int irq, void *private)
        ret = as3935_read(st, AS3935_DATA, &val);
        if (ret)
                goto err_read;
-       val &= AS3935_DATA_MASK;
-       val *= 1000;
 
-       iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp);
+       st->buffer[0] = val & AS3935_DATA_MASK;
+       iio_push_to_buffers_with_timestamp(indio_dev, &st->buffer,
+                                          pf->timestamp);
 err_read:
        iio_trigger_notify_done(indio_dev->trig);
 
index 0409667..1a2984c 100644 (file)
@@ -411,7 +411,9 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 
        for (ix = 0; ix < table->sz; ix++)
                if (table->data_vec[ix].attr.ndev == ndev)
-                       if (!del_gid(ib_dev, port, table, ix, false))
+                       if (!del_gid(ib_dev, port, table, ix,
+                                    !!(table->data_vec[ix].props &
+                                       GID_TABLE_ENTRY_DEFAULT)))
                                deleted = true;
 
        write_unlock_irq(&table->rwlock);
index f0c91ba..ad1b1ad 100644 (file)
@@ -708,17 +708,6 @@ static void cma_deref_id(struct rdma_id_private *id_priv)
                complete(&id_priv->comp);
 }
 
-static int cma_disable_callback(struct rdma_id_private *id_priv,
-                               enum rdma_cm_state state)
-{
-       mutex_lock(&id_priv->handler_mutex);
-       if (id_priv->state != state) {
-               mutex_unlock(&id_priv->handler_mutex);
-               return -EINVAL;
-       }
-       return 0;
-}
-
 struct rdma_cm_id *rdma_create_id(struct net *net,
                                  rdma_cm_event_handler event_handler,
                                  void *context, enum rdma_port_space ps,
@@ -1671,11 +1660,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
        struct rdma_cm_event event;
        int ret = 0;
 
+       mutex_lock(&id_priv->handler_mutex);
        if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
-               cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||
+            id_priv->state != RDMA_CM_CONNECT) ||
            (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
-               cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))
-               return 0;
+            id_priv->state != RDMA_CM_DISCONNECT))
+               goto out;
 
        memset(&event, 0, sizeof event);
        switch (ib_event->event) {
@@ -1870,7 +1860,7 @@ static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_e
 
 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
 {
-       struct rdma_id_private *listen_id, *conn_id;
+       struct rdma_id_private *listen_id, *conn_id = NULL;
        struct rdma_cm_event event;
        struct net_device *net_dev;
        int offset, ret;
@@ -1884,9 +1874,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
                goto net_dev_put;
        }
 
-       if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) {
+       mutex_lock(&listen_id->handler_mutex);
+       if (listen_id->state != RDMA_CM_LISTEN) {
                ret = -ECONNABORTED;
-               goto net_dev_put;
+               goto err1;
        }
 
        memset(&event, 0, sizeof event);
@@ -1976,8 +1967,9 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
        struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
        struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
 
-       if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
-               return 0;
+       mutex_lock(&id_priv->handler_mutex);
+       if (id_priv->state != RDMA_CM_CONNECT)
+               goto out;
 
        memset(&event, 0, sizeof event);
        switch (iw_event->event) {
@@ -2029,6 +2021,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
                return ret;
        }
 
+out:
        mutex_unlock(&id_priv->handler_mutex);
        return ret;
 }
@@ -2039,13 +2032,15 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
        struct rdma_cm_id *new_cm_id;
        struct rdma_id_private *listen_id, *conn_id;
        struct rdma_cm_event event;
-       int ret;
+       int ret = -ECONNABORTED;
        struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
        struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
 
        listen_id = cm_id->context;
-       if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))
-               return -ECONNABORTED;
+
+       mutex_lock(&listen_id->handler_mutex);
+       if (listen_id->state != RDMA_CM_LISTEN)
+               goto out;
 
        /* Create a new RDMA id for the new IW CM ID */
        new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net,
@@ -3216,8 +3211,9 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
        struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
        int ret = 0;
 
-       if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))
-               return 0;
+       mutex_lock(&id_priv->handler_mutex);
+       if (id_priv->state != RDMA_CM_CONNECT)
+               goto out;
 
        memset(&event, 0, sizeof event);
        switch (ib_event->event) {
@@ -3673,12 +3669,13 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
        struct rdma_id_private *id_priv;
        struct cma_multicast *mc = multicast->context;
        struct rdma_cm_event event;
-       int ret;
+       int ret = 0;
 
        id_priv = mc->id_priv;
-       if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) &&
-           cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))
-               return 0;
+       mutex_lock(&id_priv->handler_mutex);
+       if (id_priv->state != RDMA_CM_ADDR_BOUND &&
+           id_priv->state != RDMA_CM_ADDR_RESOLVED)
+               goto out;
 
        if (!status)
                status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
@@ -3720,6 +3717,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
                return 0;
        }
 
+out:
        mutex_unlock(&id_priv->handler_mutex);
        return 0;
 }
@@ -3878,12 +3876,12 @@ static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
        gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
                   rdma_start_port(id_priv->cma_dev->device)];
        if (addr->sa_family == AF_INET) {
-               if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
+               if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
+                       mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
                        err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
                                            true);
-               if (!err) {
-                       mc->igmp_joined = true;
-                       mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
+                       if (!err)
+                               mc->igmp_joined = true;
                }
        } else {
                if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
index 1a8babb..825021d 100644 (file)
@@ -1747,7 +1747,7 @@ static int create_qp(struct ib_uverbs_file *file,
        struct ib_srq                   *srq = NULL;
        struct ib_qp                    *qp;
        char                            *buf;
-       struct ib_qp_init_attr          attr;
+       struct ib_qp_init_attr          attr = {};
        struct ib_uverbs_ex_create_qp_resp resp;
        int                             ret;
 
index 1d7d4cf..6298f54 100644 (file)
@@ -511,12 +511,16 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
                ah_attr->grh.dgid = sgid;
 
                if (!rdma_cap_eth_ah(device, port_num)) {
-                       ret = ib_find_cached_gid_by_port(device, &dgid,
-                                                        IB_GID_TYPE_IB,
-                                                        port_num, NULL,
-                                                        &gid_index);
-                       if (ret)
-                               return ret;
+                       if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
+                               ret = ib_find_cached_gid_by_port(device, &dgid,
+                                                                IB_GID_TYPE_IB,
+                                                                port_num, NULL,
+                                                                &gid_index);
+                               if (ret)
+                                       return ret;
+                       } else {
+                               gid_index = 0;
+                       }
                }
 
                ah_attr->grh.sgid_index = (u8) gid_index;
index 81619fb..f5de851 100644 (file)
@@ -1037,7 +1037,7 @@ static void dc_shutdown(struct hfi1_devdata *);
 static void dc_start(struct hfi1_devdata *);
 static int qos_rmt_entries(struct hfi1_devdata *dd, unsigned int *mp,
                           unsigned int *np);
-static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd);
+static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd);
 
 /*
  * Error interrupt table entry.  This is used as input to the interrupt
@@ -6962,8 +6962,6 @@ void handle_link_down(struct work_struct *work)
        }
 
        reset_neighbor_info(ppd);
-       if (ppd->mgmt_allowed)
-               remove_full_mgmt_pkey(ppd);
 
        /* disable the port */
        clear_rcvctrl(ppd->dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
@@ -7070,12 +7068,16 @@ static void add_full_mgmt_pkey(struct hfi1_pportdata *ppd)
                            __func__, ppd->pkeys[2], FULL_MGMT_P_KEY);
        ppd->pkeys[2] = FULL_MGMT_P_KEY;
        (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+       hfi1_event_pkey_change(ppd->dd, ppd->port);
 }
 
-static void remove_full_mgmt_pkey(struct hfi1_pportdata *ppd)
+static void clear_full_mgmt_pkey(struct hfi1_pportdata *ppd)
 {
-       ppd->pkeys[2] = 0;
-       (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+       if (ppd->pkeys[2] != 0) {
+               ppd->pkeys[2] = 0;
+               (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
+               hfi1_event_pkey_change(ppd->dd, ppd->port);
+       }
 }
 
 /*
@@ -9168,6 +9170,13 @@ int start_link(struct hfi1_pportdata *ppd)
                return 0;
        }
 
+       /*
+        * FULL_MGMT_P_KEY is cleared from the pkey table, so that the
+        * pkey table can be configured properly if the HFI unit is connected
+        * to switch port with MgmtAllowed=NO
+        */
+       clear_full_mgmt_pkey(ppd);
+
        return set_link_state(ppd, HLS_DN_POLL);
 }
 
@@ -9777,7 +9786,7 @@ static void set_send_length(struct hfi1_pportdata *ppd)
        u64 len1 = 0, len2 = (((dd->vld[15].mtu + max_hb) >> 2)
                              & SEND_LEN_CHECK1_LEN_VL15_MASK) <<
                SEND_LEN_CHECK1_LEN_VL15_SHIFT;
-       int i;
+       int i, j;
        u32 thres;
 
        for (i = 0; i < ppd->vls_supported; i++) {
@@ -9801,7 +9810,10 @@ static void set_send_length(struct hfi1_pportdata *ppd)
                            sc_mtu_to_threshold(dd->vld[i].sc,
                                                dd->vld[i].mtu,
                                                dd->rcd[0]->rcvhdrqentsize));
-               sc_set_cr_threshold(dd->vld[i].sc, thres);
+               for (j = 0; j < INIT_SC_PER_VL; j++)
+                       sc_set_cr_threshold(
+                                       pio_select_send_context_vl(dd, j, i),
+                                           thres);
        }
        thres = min(sc_percent_to_threshold(dd->vld[15].sc, 50),
                    sc_mtu_to_threshold(dd->vld[15].sc,
index 7a5b0e6..c702a00 100644 (file)
@@ -203,6 +203,9 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
 
        switch (cmd) {
        case HFI1_IOCTL_ASSIGN_CTXT:
+               if (uctxt)
+                       return -EINVAL;
+
                if (copy_from_user(&uinfo,
                                   (struct hfi1_user_info __user *)arg,
                                   sizeof(uinfo)))
index 0d28a5a..eed971c 100644 (file)
@@ -1383,7 +1383,7 @@ static void postinit_cleanup(struct hfi1_devdata *dd)
 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        int ret = 0, j, pidx, initfail;
-       struct hfi1_devdata *dd = NULL;
+       struct hfi1_devdata *dd = ERR_PTR(-EINVAL);
        struct hfi1_pportdata *ppd;
 
        /* First, lock the non-writable module parameters */
index 2190295..fca07a1 100644 (file)
@@ -78,6 +78,16 @@ static inline void clear_opa_smp_data(struct opa_smp *smp)
        memset(data, 0, size);
 }
 
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port)
+{
+       struct ib_event event;
+
+       event.event = IB_EVENT_PKEY_CHANGE;
+       event.device = &dd->verbs_dev.rdi.ibdev;
+       event.element.port_num = port;
+       ib_dispatch_event(&event);
+}
+
 static void send_trap(struct hfi1_ibport *ibp, void *data, unsigned len)
 {
        struct ib_mad_send_buf *send_buf;
@@ -1418,15 +1428,10 @@ static int set_pkeys(struct hfi1_devdata *dd, u8 port, u16 *pkeys)
        }
 
        if (changed) {
-               struct ib_event event;
-
                (void)hfi1_set_ib_cfg(ppd, HFI1_IB_CFG_PKEYS, 0);
-
-               event.event = IB_EVENT_PKEY_CHANGE;
-               event.device = &dd->verbs_dev.rdi.ibdev;
-               event.element.port_num = port;
-               ib_dispatch_event(&event);
+               hfi1_event_pkey_change(dd, port);
        }
+
        return 0;
 }
 
index 55ee086..8b734aa 100644 (file)
@@ -434,4 +434,6 @@ struct sc2vlnt {
                    COUNTER_MASK(1, 3) | \
                    COUNTER_MASK(1, 4))
 
+void hfi1_event_pkey_change(struct hfi1_devdata *dd, u8 port);
+
 #endif                         /* _HFI1_MAD_H */
index d5edb1a..d402245 100644 (file)
@@ -995,7 +995,7 @@ static void sc_wait_for_packet_egress(struct send_context *sc, int pause)
                /* counter is reset if occupancy count changes */
                if (reg != reg_prev)
                        loop = 0;
-               if (loop > 500) {
+               if (loop > 50000) {
                        /* timed out - bounce the link */
                        dd_dev_err(dd,
                                   "%s: context %u(%u) timeout waiting for packets to egress, remaining count %u, bouncing link\n",
@@ -1797,6 +1797,21 @@ static void pio_map_rcu_callback(struct rcu_head *list)
        pio_map_free(m);
 }
 
+/*
+ * Set credit return threshold for the kernel send context
+ */
+static void set_threshold(struct hfi1_devdata *dd, int scontext, int i)
+{
+       u32 thres;
+
+       thres = min(sc_percent_to_threshold(dd->kernel_send_context[scontext],
+                                           50),
+                   sc_mtu_to_threshold(dd->kernel_send_context[scontext],
+                                       dd->vld[i].mtu,
+                                       dd->rcd[0]->rcvhdrqentsize));
+       sc_set_cr_threshold(dd->kernel_send_context[scontext], thres);
+}
+
 /*
  * pio_map_init - called when #vls change
  * @dd: hfi1_devdata
@@ -1872,11 +1887,16 @@ int pio_map_init(struct hfi1_devdata *dd, u8 port, u8 num_vls, u8 *vl_scontexts)
                        if (!newmap->map[i])
                                goto bail;
                        newmap->map[i]->mask = (1 << ilog2(sz)) - 1;
-                       /* assign send contexts */
+                       /*
+                        * assign send contexts and
+                        * adjust credit return threshold
+                        */
                        for (j = 0; j < sz; j++) {
-                               if (dd->kernel_send_context[scontext])
+                               if (dd->kernel_send_context[scontext]) {
                                        newmap->map[i]->ksc[j] =
                                        dd->kernel_send_context[scontext];
+                                       set_threshold(dd, scontext, i);
+                               }
                                if (++scontext >= first_scontext +
                                                  vl_scontexts[i])
                                        /* wrap back to first send context */
index 2441669..9fb5616 100644 (file)
@@ -579,7 +579,8 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len)
 
        if (ppd->qsfp_info.cache_valid) {
                if (QSFP_IS_CU(cache[QSFP_MOD_TECH_OFFS]))
-                       sprintf(lenstr, "%dM ", cache[QSFP_MOD_LEN_OFFS]);
+                       snprintf(lenstr, sizeof(lenstr), "%dM ",
+                                cache[QSFP_MOD_LEN_OFFS]);
 
                power_byte = cache[QSFP_MOD_PWR_OFFS];
                sofar += scnprintf(buf + sofar, len - sofar, "PWR:%.3sW\n",
index bc95c41..d8fb056 100644 (file)
@@ -92,11 +92,10 @@ void hfi1_put_txreq(struct verbs_txreq *tx)
 
 struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
                                struct rvt_qp *qp)
+       __must_hold(&qp->s_lock)
 {
        struct verbs_txreq *tx = ERR_PTR(-EBUSY);
-       unsigned long flags;
 
-       spin_lock_irqsave(&qp->s_lock, flags);
        write_seqlock(&dev->iowait_lock);
        if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
                struct hfi1_qp_priv *priv;
@@ -116,7 +115,6 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
        }
 out:
        write_sequnlock(&dev->iowait_lock);
-       spin_unlock_irqrestore(&qp->s_lock, flags);
        return tx;
 }
 
index 1cf69b2..a1d6e08 100644 (file)
@@ -73,6 +73,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
 
 static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
                                            struct rvt_qp *qp)
+       __must_hold(&qp->slock)
 {
        struct verbs_txreq *tx;
        struct hfi1_qp_priv *priv = qp->priv;
index 8b95320..b738acd 100644 (file)
 
 #define IW_HMC_OBJ_TYPE_NUM ARRAY_SIZE(iw_hmc_obj_types)
 #define IW_CFG_FPM_QP_COUNT            32768
+#define I40IW_MAX_PAGES_PER_FMR                512
+#define I40IW_MIN_PAGES_PER_FMR                1
 
 #define I40IW_MTU_TO_MSS               40
 #define I40IW_DEFAULT_MSS              1460
index 02a735b..33959ed 100644 (file)
@@ -79,6 +79,7 @@ static int i40iw_query_device(struct ib_device *ibdev,
        props->max_qp_init_rd_atom = props->max_qp_rd_atom;
        props->atomic_cap = IB_ATOMIC_NONE;
        props->max_map_per_fmr = 1;
+       props->max_fast_reg_page_list_len = I40IW_MAX_PAGES_PER_FMR;
        return 0;
 }
 
@@ -1527,7 +1528,7 @@ static struct ib_mr *i40iw_alloc_mr(struct ib_pd *pd,
        mutex_lock(&iwdev->pbl_mutex);
        status = i40iw_get_pble(&iwdev->sc_dev, iwdev->pble_rsrc, palloc, iwmr->page_cnt);
        mutex_unlock(&iwdev->pbl_mutex);
-       if (!status)
+       if (status)
                goto err1;
 
        if (palloc->level != I40IW_LEVEL_1)
@@ -2149,6 +2150,7 @@ static int i40iw_post_send(struct ib_qp *ibqp,
                        struct i40iw_sc_dev *dev = &iwqp->iwdev->sc_dev;
                        struct i40iw_fast_reg_stag_info info;
 
+                       memset(&info, 0, sizeof(info));
                        info.access_rights = I40IW_ACCESS_FLAGS_LOCALREAD;
                        info.access_rights |= i40iw_get_user_access(flags);
                        info.stag_key = reg_wr(ib_wr)->key & 0xff;
@@ -2158,10 +2160,14 @@ static int i40iw_post_send(struct ib_qp *ibqp,
                        info.addr_type = I40IW_ADDR_TYPE_VA_BASED;
                        info.va = (void *)(uintptr_t)iwmr->ibmr.iova;
                        info.total_len = iwmr->ibmr.length;
+                       info.reg_addr_pa = *(u64 *)palloc->level1.addr;
                        info.first_pm_pbl_index = palloc->level1.idx;
                        info.local_fence = ib_wr->send_flags & IB_SEND_FENCE;
                        info.signaled = ib_wr->send_flags & IB_SEND_SIGNALED;
 
+                       if (iwmr->npages > I40IW_MIN_PAGES_PER_FMR)
+                               info.chunk_size = 1;
+
                        if (page_shift == 21)
                                info.page_size = 1; /* 2M page */
 
@@ -2327,13 +2333,16 @@ static int i40iw_req_notify_cq(struct ib_cq *ibcq,
 {
        struct i40iw_cq *iwcq;
        struct i40iw_cq_uk *ukcq;
-       enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_SOLICITED;
+       unsigned long flags;
+       enum i40iw_completion_notify cq_notify = IW_CQ_COMPL_EVENT;
 
        iwcq = (struct i40iw_cq *)ibcq;
        ukcq = &iwcq->sc_cq.cq_uk;
-       if (notify_flags == IB_CQ_NEXT_COMP)
-               cq_notify = IW_CQ_COMPL_EVENT;
+       if (notify_flags == IB_CQ_SOLICITED)
+               cq_notify = IW_CQ_COMPL_SOLICITED;
+       spin_lock_irqsave(&iwcq->lock, flags);
        ukcq->ops.iw_cq_request_notification(ukcq, cq_notify);
+       spin_unlock_irqrestore(&iwcq->lock, flags);
        return 0;
 }
 
index 105246f..5fc6233 100644 (file)
@@ -47,6 +47,7 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
 
        ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
        ah->av.ib.g_slid  = ah_attr->src_path_bits;
+       ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
        if (ah_attr->ah_flags & IB_AH_GRH) {
                ah->av.ib.g_slid   |= 0x80;
                ah->av.ib.gid_index = ah_attr->grh.sgid_index;
@@ -64,7 +65,6 @@ static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr,
                       !(1 << ah->av.ib.stat_rate & dev->caps.stat_rate_support))
                        --ah->av.ib.stat_rate;
        }
-       ah->av.ib.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
 
        return &ah->ibah;
 }
index d68f506..9c2e53d 100644 (file)
@@ -527,7 +527,7 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
                tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
        spin_unlock(&tun_qp->tx_lock);
        if (ret)
-               goto out;
+               goto end;
 
        tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
        if (tun_qp->tx_ring[tun_tx_ix].ah)
@@ -596,9 +596,15 @@ int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
        wr.wr.send_flags = IB_SEND_SIGNALED;
 
        ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
-out:
-       if (ret)
-               ib_destroy_ah(ah);
+       if (!ret)
+               return 0;
+ out:
+       spin_lock(&tun_qp->tx_lock);
+       tun_qp->tx_ix_tail++;
+       spin_unlock(&tun_qp->tx_lock);
+       tun_qp->tx_ring[tun_tx_ix].ah = NULL;
+end:
+       ib_destroy_ah(ah);
        return ret;
 }
 
@@ -1326,9 +1332,15 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
 
 
        ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
+       if (!ret)
+               return 0;
+
+       spin_lock(&sqp->tx_lock);
+       sqp->tx_ix_tail++;
+       spin_unlock(&sqp->tx_lock);
+       sqp->tx_ring[wire_tx_ix].ah = NULL;
 out:
-       if (ret)
-               ib_destroy_ah(ah);
+       ib_destroy_ah(ah);
        return ret;
 }
 
index 0eb09e1..42a4607 100644 (file)
@@ -1704,6 +1704,9 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
        struct mlx4_dev *dev = (to_mdev(qp->device))->dev;
        int is_bonded = mlx4_is_bonded(dev);
 
+       if (flow_attr->port < 1 || flow_attr->port > qp->device->phys_port_cnt)
+               return ERR_PTR(-EINVAL);
+
        if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) &&
            (flow_attr->type != IB_FLOW_ATTR_NORMAL))
                return ERR_PTR(-EOPNOTSUPP);
index 6c5ac5d..29acda2 100644 (file)
@@ -139,7 +139,7 @@ struct mlx4_ib_mr {
        u32                     max_pages;
        struct mlx4_mr          mmr;
        struct ib_umem         *umem;
-       void                    *pages_alloc;
+       size_t                  page_map_size;
 };
 
 struct mlx4_ib_mw {
index 6312721..5d73989 100644 (file)
@@ -277,20 +277,23 @@ mlx4_alloc_priv_pages(struct ib_device *device,
                      struct mlx4_ib_mr *mr,
                      int max_pages)
 {
-       int size = max_pages * sizeof(u64);
-       int add_size;
        int ret;
 
-       add_size = max_t(int, MLX4_MR_PAGES_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
+       /* Ensure that size is aligned to DMA cacheline
+        * requirements.
+        * max_pages is limited to MLX4_MAX_FAST_REG_PAGES
+        * so page_map_size will never cross PAGE_SIZE.
+        */
+       mr->page_map_size = roundup(max_pages * sizeof(u64),
+                                   MLX4_MR_PAGES_ALIGN);
 
-       mr->pages_alloc = kzalloc(size + add_size, GFP_KERNEL);
-       if (!mr->pages_alloc)
+       /* Prevent cross page boundary allocation. */
+       mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
+       if (!mr->pages)
                return -ENOMEM;
 
-       mr->pages = PTR_ALIGN(mr->pages_alloc, MLX4_MR_PAGES_ALIGN);
-
        mr->page_map = dma_map_single(device->dma_device, mr->pages,
-                                     size, DMA_TO_DEVICE);
+                                     mr->page_map_size, DMA_TO_DEVICE);
 
        if (dma_mapping_error(device->dma_device, mr->page_map)) {
                ret = -ENOMEM;
@@ -298,9 +301,9 @@ mlx4_alloc_priv_pages(struct ib_device *device,
        }
 
        return 0;
-err:
-       kfree(mr->pages_alloc);
 
+err:
+       free_page((unsigned long)mr->pages);
        return ret;
 }
 
@@ -309,11 +312,10 @@ mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
 {
        if (mr->pages) {
                struct ib_device *device = mr->ibmr.device;
-               int size = mr->max_pages * sizeof(u64);
 
                dma_unmap_single(device->dma_device, mr->page_map,
-                                size, DMA_TO_DEVICE);
-               kfree(mr->pages_alloc);
+                                mr->page_map_size, DMA_TO_DEVICE);
+               free_page((unsigned long)mr->pages);
                mr->pages = NULL;
        }
 }
@@ -537,14 +539,12 @@ int mlx4_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
        mr->npages = 0;
 
        ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
-                                  sizeof(u64) * mr->max_pages,
-                                  DMA_TO_DEVICE);
+                                  mr->page_map_size, DMA_TO_DEVICE);
 
        rc = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, mlx4_set_page);
 
        ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
-                                     sizeof(u64) * mr->max_pages,
-                                     DMA_TO_DEVICE);
+                                     mr->page_map_size, DMA_TO_DEVICE);
 
        return rc;
 }
index 81b0e1f..8db8405 100644 (file)
@@ -362,7 +362,7 @@ static int send_wqe_overhead(enum mlx4_ib_qp_type type, u32 flags)
                        sizeof (struct mlx4_wqe_raddr_seg);
        case MLX4_IB_QPT_RC:
                return sizeof (struct mlx4_wqe_ctrl_seg) +
-                       sizeof (struct mlx4_wqe_atomic_seg) +
+                       sizeof (struct mlx4_wqe_masked_atomic_seg) +
                        sizeof (struct mlx4_wqe_raddr_seg);
        case MLX4_IB_QPT_SMI:
        case MLX4_IB_QPT_GSI:
@@ -1191,8 +1191,10 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
        {
                err = create_qp_common(to_mdev(pd->device), pd, init_attr,
                                       udata, 0, &qp, gfp);
-               if (err)
+               if (err) {
+                       kfree(qp);
                        return ERR_PTR(err);
+               }
 
                qp->ibqp.qp_num = qp->mqp.qpn;
                qp->xrcdn = xrcdn;
index 1534af1..364aab9 100644 (file)
@@ -121,7 +121,7 @@ static void pma_cnt_ext_assign(struct ib_pma_portcounters_ext *pma_cnt_ext,
        pma_cnt_ext->port_xmit_data =
                cpu_to_be64(MLX5_SUM_CNT(out, transmitted_ib_unicast.octets,
                                         transmitted_ib_multicast.octets) >> 2);
-       pma_cnt_ext->port_xmit_data =
+       pma_cnt_ext->port_rcv_data =
                cpu_to_be64(MLX5_SUM_CNT(out, received_ib_unicast.octets,
                                         received_ib_multicast.octets) >> 2);
        pma_cnt_ext->port_xmit_packets =
index ce43422..ce0a7ab 100644 (file)
@@ -3332,10 +3332,11 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
                        return MLX5_FENCE_MODE_SMALL_AND_FENCE;
                else
                        return fence;
-
-       } else {
-               return 0;
+       } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
+               return MLX5_FENCE_MODE_FENCE;
        }
+
+       return 0;
 }
 
 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
index ff946d5..382466a 100644 (file)
@@ -2178,6 +2178,11 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
 
        switch (cmd.type) {
        case QIB_CMD_ASSIGN_CTXT:
+               if (rcd) {
+                       ret = -EINVAL;
+                       goto bail;
+               }
+
                ret = qib_assign_ctxt(fp, &cmd.cmd.user_info);
                if (ret)
                        goto bail;
index 7de5134..41ba7e9 100644 (file)
@@ -369,8 +369,8 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
                        /* wrap to first map page, invert bit 0 */
                        offset = qpt->incr | ((offset & 1) ^ 1);
                }
-               /* there can be no bits at shift and below */
-               WARN_ON(offset & (rdi->dparms.qos_shift - 1));
+               /* there can be no set bits in low-order QoS bits */
+               WARN_ON(offset & (BIT(rdi->dparms.qos_shift) - 1));
                qpn = mk_qpn(qpt, map, offset);
        }
 
@@ -576,12 +576,6 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
        qp->s_ssn = 1;
        qp->s_lsn = 0;
        qp->s_mig_state = IB_MIG_MIGRATED;
-       if (qp->s_ack_queue)
-               memset(
-                       qp->s_ack_queue,
-                       0,
-                       rvt_max_atomic(rdi) *
-                               sizeof(*qp->s_ack_queue));
        qp->r_head_ack_queue = 0;
        qp->s_tail_ack_queue = 0;
        qp->s_num_rd_atomic = 0;
@@ -705,8 +699,10 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd,
                 * initialization that is needed.
                 */
                priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp);
-               if (!priv)
+               if (IS_ERR(priv)) {
+                       ret = priv;
                        goto bail_qp;
+               }
                qp->priv = priv;
                qp->timeout_jiffies =
                        usecs_to_jiffies((4096UL * (1UL << qp->timeout)) /
index e1cc2cc..30c4fda 100644 (file)
@@ -501,9 +501,7 @@ static noinline int check_support(struct rvt_dev_info *rdi, int verb)
                            !rdi->driver_f.quiesce_qp ||
                            !rdi->driver_f.notify_error_qp ||
                            !rdi->driver_f.mtu_from_qp ||
-                           !rdi->driver_f.mtu_to_path_mtu ||
-                           !rdi->driver_f.shut_down_port ||
-                           !rdi->driver_f.cap_mask_chg)
+                           !rdi->driver_f.mtu_to_path_mtu)
                                return -EINVAL;
                break;
 
index e68b20c..4a41556 100644 (file)
@@ -1638,8 +1638,7 @@ retry:
         */
        qp_init->cap.max_send_wr = srp_sq_size / 2;
        qp_init->cap.max_rdma_ctxs = srp_sq_size / 2;
-       qp_init->cap.max_send_sge = max(sdev->device->attrs.max_sge_rd,
-                                       sdev->device->attrs.max_sge);
+       qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
        qp_init->port_num = ch->sport->port;
 
        ch->qp = ib_create_qp(sdev->pd, qp_init);
index fee6bfd..3890304 100644 (file)
@@ -106,6 +106,7 @@ enum {
        SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
 
        SRPT_DEF_SG_TABLESIZE = 128,
+       SRPT_DEF_SG_PER_WQE = 16,
 
        MIN_SRPT_SQ_SIZE = 16,
        DEF_SRPT_SQ_SIZE = 4096,
index 94b6821..5f6b3bc 100644 (file)
@@ -1941,6 +1941,7 @@ static struct iommu_ops arm_smmu_ops = {
        .attach_dev             = arm_smmu_attach_dev,
        .map                    = arm_smmu_map,
        .unmap                  = arm_smmu_unmap,
+       .map_sg                 = default_iommu_map_sg,
        .iova_to_phys           = arm_smmu_iova_to_phys,
        .add_device             = arm_smmu_add_device,
        .remove_device          = arm_smmu_remove_device,
index a644d0c..1070094 100644 (file)
@@ -3222,11 +3222,6 @@ static int __init init_dmars(void)
                        }
                }
 
-               iommu_flush_write_buffer(iommu);
-               iommu_set_root_entry(iommu);
-               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
-               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
-
                if (!ecap_pass_through(iommu->ecap))
                        hw_pass_through = 0;
 #ifdef CONFIG_INTEL_IOMMU_SVM
@@ -3235,6 +3230,18 @@ static int __init init_dmars(void)
 #endif
        }
 
+       /*
+        * Now that qi is enabled on all iommus, set the root entry and flush
+        * caches. This is required on some Intel X58 chipsets, otherwise the
+        * flush_context function will loop forever and the boot hangs.
+        */
+       for_each_active_iommu(iommu, drhd) {
+               iommu_flush_write_buffer(iommu);
+               iommu_set_root_entry(iommu);
+               iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
+               iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
+       }
+
        if (iommu_pass_through)
                iommu_identity_mapping |= IDENTMAP_ALL;
 
index c7d6156..25b4627 100644 (file)
@@ -815,7 +815,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
        dte_addr = virt_to_phys(rk_domain->dt);
        for (i = 0; i < iommu->num_mmu; i++) {
                rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
-               rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
+               rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
                rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
        }
 
index 3b5e10a..8a4adbe 100644 (file)
@@ -746,6 +746,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
                /* verify that it doesn't conflict with an IPI irq */
                if (test_bit(spec->hwirq, ipi_resrv))
                        return -EBUSY;
+
+               hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
+
+               return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
+                                                    &gic_level_irq_controller,
+                                                    NULL);
        } else {
                base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
                if (base_hwirq == gic_shared_intrs) {
@@ -867,10 +873,14 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
                                                    &gic_level_irq_controller,
                                                    NULL);
                if (ret)
-                       return ret;
+                       goto error;
        }
 
        return 0;
+
+error:
+       irq_domain_free_irqs_parent(d, virq, nr_irqs);
+       return ret;
 }
 
 void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
index 3495d5d..3bce448 100644 (file)
@@ -53,11 +53,12 @@ static void led_timer_function(unsigned long data)
 
        if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
                led_set_brightness_nosleep(led_cdev, LED_OFF);
+               led_cdev->flags &= ~LED_BLINK_SW;
                return;
        }
 
        if (led_cdev->flags & LED_BLINK_ONESHOT_STOP) {
-               led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP;
+               led_cdev->flags &=  ~(LED_BLINK_ONESHOT_STOP | LED_BLINK_SW);
                return;
        }
 
@@ -151,6 +152,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
                return;
        }
 
+       led_cdev->flags |= LED_BLINK_SW;
        mod_timer(&led_cdev->blink_timer, jiffies + 1);
 }
 
@@ -219,6 +221,7 @@ void led_stop_software_blink(struct led_classdev *led_cdev)
        del_timer_sync(&led_cdev->blink_timer);
        led_cdev->blink_delay_on = 0;
        led_cdev->blink_delay_off = 0;
+       led_cdev->flags &= ~LED_BLINK_SW;
 }
 EXPORT_SYMBOL_GPL(led_stop_software_blink);
 
@@ -226,10 +229,10 @@ void led_set_brightness(struct led_classdev *led_cdev,
                        enum led_brightness brightness)
 {
        /*
-        * In case blinking is on delay brightness setting
+        * If software blink is active, delay brightness setting
         * until the next timer tick.
         */
-       if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) {
+       if (led_cdev->flags & LED_BLINK_SW) {
                /*
                 * If we need to disable soft blinking delegate this to the
                 * work queue task to avoid problems in case we are called
index 410c39c..c9f3862 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/sched.h>
 #include <linux/leds.h>
 #include <linux/reboot.h>
+#include <linux/suspend.h>
 #include "../leds.h"
 
 static int panic_heartbeats;
@@ -154,6 +155,30 @@ static struct led_trigger heartbeat_led_trigger = {
        .deactivate = heartbeat_trig_deactivate,
 };
 
+static int heartbeat_pm_notifier(struct notifier_block *nb,
+                                unsigned long pm_event, void *unused)
+{
+       int rc;
+
+       switch (pm_event) {
+       case PM_SUSPEND_PREPARE:
+       case PM_HIBERNATION_PREPARE:
+       case PM_RESTORE_PREPARE:
+               led_trigger_unregister(&heartbeat_led_trigger);
+               break;
+       case PM_POST_SUSPEND:
+       case PM_POST_HIBERNATION:
+       case PM_POST_RESTORE:
+               rc = led_trigger_register(&heartbeat_led_trigger);
+               if (rc)
+                       pr_err("could not re-register heartbeat trigger\n");
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_DONE;
+}
+
 static int heartbeat_reboot_notifier(struct notifier_block *nb,
                                     unsigned long code, void *unused)
 {
@@ -168,6 +193,10 @@ static int heartbeat_panic_notifier(struct notifier_block *nb,
        return NOTIFY_DONE;
 }
 
+static struct notifier_block heartbeat_pm_nb = {
+       .notifier_call = heartbeat_pm_notifier,
+};
+
 static struct notifier_block heartbeat_reboot_nb = {
        .notifier_call = heartbeat_reboot_notifier,
 };
@@ -184,12 +213,14 @@ static int __init heartbeat_trig_init(void)
                atomic_notifier_chain_register(&panic_notifier_list,
                                               &heartbeat_panic_nb);
                register_reboot_notifier(&heartbeat_reboot_nb);
+               register_pm_notifier(&heartbeat_pm_nb);
        }
        return rc;
 }
 
 static void __exit heartbeat_trig_exit(void)
 {
+       unregister_pm_notifier(&heartbeat_pm_nb);
        unregister_reboot_notifier(&heartbeat_reboot_nb);
        atomic_notifier_chain_unregister(&panic_notifier_list,
                                         &heartbeat_panic_nb);
index b73c6e7..6f2c852 100644 (file)
@@ -61,21 +61,36 @@ static int mcb_probe(struct device *dev)
        struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
        struct mcb_device *mdev = to_mcb_device(dev);
        const struct mcb_device_id *found_id;
+       struct module *carrier_mod;
+       int ret;
 
        found_id = mcb_match_id(mdrv->id_table, mdev);
        if (!found_id)
                return -ENODEV;
 
-       return mdrv->probe(mdev, found_id);
+       carrier_mod = mdev->dev.parent->driver->owner;
+       if (!try_module_get(carrier_mod))
+               return -EINVAL;
+
+       get_device(dev);
+       ret = mdrv->probe(mdev, found_id);
+       if (ret)
+               module_put(carrier_mod);
+
+       return ret;
 }
 
 static int mcb_remove(struct device *dev)
 {
        struct mcb_driver *mdrv = to_mcb_driver(dev->driver);
        struct mcb_device *mdev = to_mcb_device(dev);
+       struct module *carrier_mod;
 
        mdrv->remove(mdev);
 
+       carrier_mod = mdev->dev.parent->driver->owner;
+       module_put(carrier_mod);
+
        put_device(&mdev->dev);
 
        return 0;
index ca94bde..8bef433 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Media Controller ancillary functions
  *
- * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+ * Copyright (c) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
  * Copyright (C) 2016 Shuah Khan <shuahkh@osg.samsung.com>
  * Copyright (C) 2006-2010 Nokia Corporation
  * Copyright (c) 2016 Intel Corporation.
index af4884b..15508df 100644 (file)
@@ -398,7 +398,7 @@ static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
        gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
                           GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
        gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
-                          GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
+                          GPMC_CONFIG4_WEEXTRADELAY, p->we_extra_delay);
        gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
                           GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
                           p->cycle2cyclesamecsen);
index eed254d..641c1a5 100644 (file)
@@ -730,7 +730,7 @@ static void mei_cl_wake_all(struct mei_cl *cl)
        /* synchronized under device mutex */
        if (waitqueue_active(&cl->wait)) {
                cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
-               wake_up_interruptible(&cl->wait);
+               wake_up(&cl->wait);
        }
 }
 
index 16baeb5..ef36182 100644 (file)
@@ -1147,11 +1147,17 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
  */
 static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
 {
-       struct kstat stat;
        int err, minor;
+       struct path path;
+       struct kstat stat;
 
        /* Probably this is an MTD character device node path */
-       err = vfs_stat(mtd_dev, &stat);
+       err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
+       if (err)
+               return ERR_PTR(err);
+
+       err = vfs_getattr(&path, &stat);
+       path_put(&path);
        if (err)
                return ERR_PTR(err);
 
@@ -1160,6 +1166,7 @@ static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
                return ERR_PTR(-EINVAL);
 
        minor = MINOR(stat.rdev);
+
        if (minor & 1)
                /*
                 * Just do not think the "/dev/mtdrX" devices support is need,
index 5780dd1..ebf5172 100644 (file)
@@ -575,6 +575,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
        int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
        struct ubi_volume *vol = ubi->volumes[idx];
        struct ubi_vid_hdr *vid_hdr;
+       uint32_t crc;
 
        vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
        if (!vid_hdr)
@@ -599,14 +600,8 @@ retry:
                goto out_put;
        }
 
-       vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
-       err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
-       if (err) {
-               up_read(&ubi->fm_eba_sem);
-               goto write_error;
-       }
+       ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
 
-       data_size = offset + len;
        mutex_lock(&ubi->buf_mutex);
        memset(ubi->peb_buf + offset, 0xFF, len);
 
@@ -621,6 +616,19 @@ retry:
 
        memcpy(ubi->peb_buf + offset, buf, len);
 
+       data_size = offset + len;
+       crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
+       vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
+       vid_hdr->copy_flag = 1;
+       vid_hdr->data_size = cpu_to_be32(data_size);
+       vid_hdr->data_crc = cpu_to_be32(crc);
+       err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
+       if (err) {
+               mutex_unlock(&ubi->buf_mutex);
+               up_read(&ubi->fm_eba_sem);
+               goto write_error;
+       }
+
        err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
        if (err) {
                mutex_unlock(&ubi->buf_mutex);
index 348dbbc..a9e2cef 100644 (file)
@@ -302,6 +302,7 @@ EXPORT_SYMBOL_GPL(ubi_open_volume_nm);
 struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
 {
        int error, ubi_num, vol_id;
+       struct path path;
        struct kstat stat;
 
        dbg_gen("open volume %s, mode %d", pathname, mode);
@@ -309,7 +310,12 @@ struct ubi_volume_desc *ubi_open_volume_path(const char *pathname, int mode)
        if (!pathname || !*pathname)
                return ERR_PTR(-EINVAL);
 
-       error = vfs_stat(pathname, &stat);
+       error = kern_path(pathname, LOOKUP_FOLLOW, &path);
+       if (error)
+               return ERR_PTR(error);
+
+       error = vfs_getattr(&path, &stat);
+       path_put(&path);
        if (error)
                return ERR_PTR(error);
 
index dfbab61..1fa3a32 100644 (file)
@@ -221,9 +221,9 @@ static int pci_vc_do_save_buffer(struct pci_dev *dev, int pos,
                else
                        pci_write_config_word(dev, pos + PCI_VC_PORT_CTRL,
                                              *(u16 *)buf);
-               buf += 2;
+               buf += 4;
        }
-       len += 2;
+       len += 4;
 
        /*
         * If we have any Low Priority VCs and a VC Arbitration Table Offset
index 1b8304e..140436a 100644 (file)
@@ -1010,8 +1010,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
                if (!ret)
                        ret = init_fn(pmu);
        } else {
-               ret = probe_current_pmu(pmu, probe_table);
                cpumask_setall(&pmu->supported_cpus);
+               ret = probe_current_pmu(pmu, probe_table);
        }
 
        if (ret) {
index cc093eb..8b851f7 100644 (file)
@@ -233,8 +233,12 @@ static inline int __is_running(const struct exynos_mipi_phy_desc *data,
                        struct exynos_mipi_video_phy *state)
 {
        u32 val;
+       int ret;
+
+       ret = regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
+       if (ret)
+               return 0;
 
-       regmap_read(state->regmaps[data->resetn_map], data->resetn_reg, &val);
        return val & data->resetn_val;
 }
 
index 0a477d2..bf46844 100644 (file)
@@ -293,11 +293,18 @@ static int ti_pipe3_init(struct phy *x)
                ret = ti_pipe3_dpll_wait_lock(phy);
        }
 
-       /* Program the DPLL only if not locked */
+       /* SATA has issues if re-programmed when locked */
        val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
-       if (!(val & PLL_LOCK))
-               if (ti_pipe3_dpll_program(phy))
-                       return -EINVAL;
+       if ((val & PLL_LOCK) && of_device_is_compatible(phy->dev->of_node,
+                                                       "ti,phy-pipe3-sata"))
+               return ret;
+
+       /* Program the DPLL */
+       ret = ti_pipe3_dpll_program(phy);
+       if (ret) {
+               ti_pipe3_disable_clocks(phy);
+               return -EINVAL;
+       }
 
        return ret;
 }
index 6b6af6c..d9b10a3 100644 (file)
@@ -463,7 +463,8 @@ static int twl4030_phy_power_on(struct phy *phy)
        twl4030_usb_set_mode(twl, twl->usb_mode);
        if (twl->usb_mode == T2_USB_MODE_ULPI)
                twl4030_i2c_access(twl, 0);
-       schedule_delayed_work(&twl->id_workaround_work, 0);
+       twl->linkstat = MUSB_UNKNOWN;
+       schedule_delayed_work(&twl->id_workaround_work, HZ);
 
        return 0;
 }
@@ -537,6 +538,7 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
        struct twl4030_usb *twl = _twl;
        enum musb_vbus_id_status status;
        bool status_changed = false;
+       int err;
 
        status = twl4030_usb_linkstat(twl);
 
@@ -567,7 +569,9 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
                        pm_runtime_mark_last_busy(twl->dev);
                        pm_runtime_put_autosuspend(twl->dev);
                }
-               musb_mailbox(status);
+               err = musb_mailbox(status);
+               if (err)
+                       twl->linkstat = MUSB_UNKNOWN;
        }
 
        /* don't schedule during sleep - irq works right then */
@@ -595,7 +599,8 @@ static int twl4030_phy_init(struct phy *phy)
        struct twl4030_usb *twl = phy_get_drvdata(phy);
 
        pm_runtime_get_sync(twl->dev);
-       schedule_delayed_work(&twl->id_workaround_work, 0);
+       twl->linkstat = MUSB_UNKNOWN;
+       schedule_delayed_work(&twl->id_workaround_work, HZ);
        pm_runtime_mark_last_busy(twl->dev);
        pm_runtime_put_autosuspend(twl->dev);
 
@@ -763,7 +768,8 @@ static int twl4030_usb_remove(struct platform_device *pdev)
        if (cable_present(twl->linkstat))
                pm_runtime_put_noidle(twl->dev);
        pm_runtime_mark_last_busy(twl->dev);
-       pm_runtime_put_sync_suspend(twl->dev);
+       pm_runtime_dont_use_autosuspend(&pdev->dev);
+       pm_runtime_put_sync(twl->dev);
        pm_runtime_disable(twl->dev);
 
        /* autogate 60MHz ULPI clock,
index c06bb85..3ec0025 100644 (file)
@@ -103,7 +103,6 @@ config DELL_SMBIOS
 
 config DELL_LAPTOP
        tristate "Dell Laptop Extras"
-       depends on X86
        depends on DELL_SMBIOS
        depends on DMI
        depends on BACKLIGHT_CLASS_DEVICE
@@ -505,7 +504,7 @@ config THINKPAD_ACPI_HOTKEY_POLL
 
 config SENSORS_HDAPS
        tristate "Thinkpad Hard Drive Active Protection System (hdaps)"
-       depends on INPUT && X86
+       depends on INPUT
        select INPUT_POLLDEV
        default n
        help
@@ -749,7 +748,7 @@ config TOSHIBA_WMI
 
 config ACPI_CMPC
        tristate "CMPC Laptop Extras"
-       depends on X86 && ACPI
+       depends on ACPI
        depends on RFKILL || RFKILL=n
        select INPUT
        select BACKLIGHT_CLASS_DEVICE
@@ -848,7 +847,7 @@ config INTEL_IMR
 
 config INTEL_PMC_CORE
        bool "Intel PMC Core driver"
-       depends on X86 && PCI
+       depends on PCI
        ---help---
          The Intel Platform Controller Hub for Intel Core SoCs provides access
          to Power Management Controller registers via a PCI interface. This
@@ -860,7 +859,7 @@ config INTEL_PMC_CORE
 
 config IBM_RTL
        tristate "Device driver to enable PRTL support"
-       depends on X86 && PCI
+       depends on PCI
        ---help---
         Enable support for IBM Premium Real Time Mode (PRTM).
         This module will allow you the enter and exit PRTM in the BIOS via
@@ -894,7 +893,6 @@ config XO15_EBOOK
 
 config SAMSUNG_LAPTOP
        tristate "Samsung Laptop driver"
-       depends on X86
        depends on RFKILL || RFKILL = n
        depends on ACPI_VIDEO || ACPI_VIDEO = n
        depends on BACKLIGHT_CLASS_DEVICE
index 4a23fbc..d1a091b 100644 (file)
@@ -567,6 +567,7 @@ static void ideapad_sysfs_exit(struct ideapad_private *priv)
 static const struct key_entry ideapad_keymap[] = {
        { KE_KEY, 6,  { KEY_SWITCHVIDEOMODE } },
        { KE_KEY, 7,  { KEY_CAMERA } },
+       { KE_KEY, 8,  { KEY_MICMUTE } },
        { KE_KEY, 11, { KEY_F16 } },
        { KE_KEY, 13, { KEY_WLAN } },
        { KE_KEY, 16, { KEY_PROG1 } },
@@ -809,6 +810,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
                                break;
                        case 13:
                        case 11:
+                       case 8:
                        case 7:
                        case 6:
                                ideapad_input_report(priv, vpc_bit);
index c3bfa1f..b65ce75 100644 (file)
@@ -2043,6 +2043,7 @@ static int hotkey_autosleep_ack;
 
 static u32 hotkey_orig_mask;           /* events the BIOS had enabled */
 static u32 hotkey_all_mask;            /* all events supported in fw */
+static u32 hotkey_adaptive_all_mask;   /* all adaptive events supported in fw */
 static u32 hotkey_reserved_mask;       /* events better left disabled */
 static u32 hotkey_driver_mask;         /* events needed by the driver */
 static u32 hotkey_user_mask;           /* events visible to userspace */
@@ -2742,6 +2743,17 @@ static ssize_t hotkey_all_mask_show(struct device *dev,
 
 static DEVICE_ATTR_RO(hotkey_all_mask);
 
+/* sysfs hotkey all_mask ----------------------------------------------- */
+static ssize_t hotkey_adaptive_all_mask_show(struct device *dev,
+                          struct device_attribute *attr,
+                          char *buf)
+{
+       return snprintf(buf, PAGE_SIZE, "0x%08x\n",
+                       hotkey_adaptive_all_mask | hotkey_source_mask);
+}
+
+static DEVICE_ATTR_RO(hotkey_adaptive_all_mask);
+
 /* sysfs hotkey recommended_mask --------------------------------------- */
 static ssize_t hotkey_recommended_mask_show(struct device *dev,
                                            struct device_attribute *attr,
@@ -2985,6 +2997,7 @@ static struct attribute *hotkey_attributes[] __initdata = {
        &dev_attr_wakeup_hotunplug_complete.attr,
        &dev_attr_hotkey_mask.attr,
        &dev_attr_hotkey_all_mask.attr,
+       &dev_attr_hotkey_adaptive_all_mask.attr,
        &dev_attr_hotkey_recommended_mask.attr,
 #ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
        &dev_attr_hotkey_source_mask.attr,
@@ -3321,20 +3334,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
        if (!tp_features.hotkey)
                return 1;
 
-       /*
-        * Check if we have an adaptive keyboard, like on the
-        * Lenovo Carbon X1 2014 (2nd Gen).
-        */
-       if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
-               if ((hkeyv >> 8) == 2) {
-                       tp_features.has_adaptive_kbd = true;
-                       res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
-                                       &adaptive_kbd_attr_group);
-                       if (res)
-                               goto err_exit;
-               }
-       }
-
        quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
                                     ARRAY_SIZE(tpacpi_hotkey_qtable));
 
@@ -3357,30 +3356,70 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
           A30, R30, R31, T20-22, X20-21, X22-24.  Detected by checking
           for HKEY interface version 0x100 */
        if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
-               if ((hkeyv >> 8) != 1) {
-                       pr_err("unknown version of the HKEY interface: 0x%x\n",
-                              hkeyv);
-                       pr_err("please report this to %s\n", TPACPI_MAIL);
-               } else {
+               vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
+                           "firmware HKEY interface version: 0x%x\n",
+                           hkeyv);
+
+               switch (hkeyv >> 8) {
+               case 1:
                        /*
                         * MHKV 0x100 in A31, R40, R40e,
                         * T4x, X31, and later
                         */
-                       vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
-                               "firmware HKEY interface version: 0x%x\n",
-                               hkeyv);
 
                        /* Paranoia check AND init hotkey_all_mask */
                        if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
                                        "MHKA", "qd")) {
-                               pr_err("missing MHKA handler, "
-                                      "please report this to %s\n",
+                               pr_err("missing MHKA handler, please report this to %s\n",
                                       TPACPI_MAIL);
                                /* Fallback: pre-init for FN+F3,F4,F12 */
                                hotkey_all_mask = 0x080cU;
                        } else {
                                tp_features.hotkey_mask = 1;
                        }
+                       break;
+
+               case 2:
+                       /*
+                        * MHKV 0x200 in X1, T460s, X260, T560, X1 Tablet (2016)
+                        */
+
+                       /* Paranoia check AND init hotkey_all_mask */
+                       if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
+                                       "MHKA", "dd", 1)) {
+                               pr_err("missing MHKA handler, please report this to %s\n",
+                                      TPACPI_MAIL);
+                               /* Fallback: pre-init for FN+F3,F4,F12 */
+                               hotkey_all_mask = 0x080cU;
+                       } else {
+                               tp_features.hotkey_mask = 1;
+                       }
+
+                       /*
+                        * Check if we have an adaptive keyboard, like on the
+                        * Lenovo Carbon X1 2014 (2nd Gen).
+                        */
+                       if (acpi_evalf(hkey_handle, &hotkey_adaptive_all_mask,
+                                      "MHKA", "dd", 2)) {
+                               if (hotkey_adaptive_all_mask != 0) {
+                                       tp_features.has_adaptive_kbd = true;
+                                       res = sysfs_create_group(
+                                               &tpacpi_pdev->dev.kobj,
+                                               &adaptive_kbd_attr_group);
+                                       if (res)
+                                               goto err_exit;
+                               }
+                       } else {
+                               tp_features.has_adaptive_kbd = false;
+                               hotkey_adaptive_all_mask = 0x0U;
+                       }
+                       break;
+
+               default:
+                       pr_err("unknown version of the HKEY interface: 0x%x\n",
+                              hkeyv);
+                       pr_err("please report this to %s\n", TPACPI_MAIL);
+                       break;
                }
        }
 
index dba3843..ed337a8 100644 (file)
@@ -457,7 +457,8 @@ int pwm_apply_state(struct pwm_device *pwm, struct pwm_state *state)
 {
        int err;
 
-       if (!pwm)
+       if (!pwm || !state || !state->period ||
+           state->duty_cycle > state->period)
                return -EINVAL;
 
        if (!memcmp(state, &pwm->state, sizeof(*state)))
index f994c7e..14fc011 100644 (file)
@@ -272,7 +272,7 @@ static int atmel_hlcdc_pwm_probe(struct platform_device *pdev)
        chip->chip.of_pwm_n_cells = 3;
        chip->chip.can_sleep = 1;
 
-       ret = pwmchip_add(&chip->chip);
+       ret = pwmchip_add_with_polarity(&chip->chip, PWM_POLARITY_INVERSED);
        if (ret) {
                clk_disable_unprepare(hlcdc->periph_clk);
                return ret;
index d985992..01695d4 100644 (file)
@@ -152,7 +152,7 @@ static ssize_t enable_store(struct device *child,
                goto unlock;
        }
 
-       pwm_apply_state(pwm, &state);
+       ret = pwm_apply_state(pwm, &state);
 
 unlock:
        mutex_unlock(&export->lock);
index d4c2856..3ddc85e 100644 (file)
@@ -1122,7 +1122,7 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
                } else {
                        struct scsi_cmnd *SCp;
 
-                       SCp = scsi_host_find_tag(SDp->host, SCSI_NO_TAG);
+                       SCp = SDp->current_cmnd;
                        if(unlikely(SCp == NULL)) {
                                sdev_printk(KERN_ERR, SDp,
                                        "no saved request for untagged cmd\n");
@@ -1826,7 +1826,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
                       slot->tag, slot);
        } else {
                slot->tag = SCSI_NO_TAG;
-               /* must populate current_cmnd for scsi_host_find_tag to work */
+               /* save current command for reselection */
                SCp->device->current_cmnd = SCp;
        }
        /* sanity check: some of the commands generated by the mid-layer
index a8b610e..106a6ad 100644 (file)
@@ -1128,7 +1128,6 @@ static int scsi_eh_action(struct scsi_cmnd *scmd, int rtn)
  */
 void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, struct list_head *done_q)
 {
-       scmd->device->host->host_failed--;
        scmd->eh_eflags = 0;
        list_move_tail(&scmd->eh_entry, done_q);
 }
@@ -2227,6 +2226,9 @@ int scsi_error_handler(void *data)
                else
                        scsi_unjam_host(shost);
 
+               /* All scmds have been handled */
+               shost->host_failed = 0;
+
                /*
                 * Note - if the above fails completely, the action is to take
                 * individual devices offline and flush the queue of any
index bbfee53..845e49a 100644 (file)
@@ -2521,12 +2521,13 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
        return 0;
 
  failed:
-       if (ni)
+       if (ni) {
                lnet_ni_decref(ni);
+               rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
+               rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
+       }
 
        rej.ibr_version             = version;
-       rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
-       rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
        kiblnd_reject(cmid, &rej);
 
        return -ECONNREFUSED;
index c17870c..fbce1f7 100644 (file)
@@ -102,7 +102,7 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8  *pbuf)
        if (!efuseTbl)
                return;
 
-       eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(*eFuseWord));
+       eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
        if (!eFuseWord) {
                DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
                goto eFuseWord_failed;
index 87ea3b8..363f3a3 100644 (file)
@@ -2072,7 +2072,8 @@ void rtl8188eu_set_hal_ops(struct adapter *adapt)
 {
        struct hal_ops  *halfunc = &adapt->HalFunc;
 
-       adapt->HalData = kzalloc(sizeof(*adapt->HalData), GFP_KERNEL);
+
+       adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
        if (!adapt->HalData)
                DBG_88E("cant not alloc memory for HAL DATA\n");
 
index 6dc810b..944a6dc 100644 (file)
@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* Creative SB Audigy 2 NX */
        { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* USB3503 */
+       { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
+
        /* Microsoft Wireless Laser Mouse 6000 Receiver */
        { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
 
@@ -173,6 +176,10 @@ static const struct usb_device_id usb_quirk_list[] = {
        /* MAYA44USB sound device */
        { USB_DEVICE(0x0a92, 0x0091), .driver_info = USB_QUIRK_RESET_RESUME },
 
+       /* ASUS Base Station(T100) */
+       { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
+                       USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+
        /* Action Semiconductor flash disk */
        { USB_DEVICE(0x10d6, 0x2200), .driver_info =
                        USB_QUIRK_STRING_FETCH_255 },
@@ -188,26 +195,22 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x1908, 0x1315), .driver_info =
                        USB_QUIRK_HONOR_BNUMINTERFACES },
 
-       /* INTEL VALUE SSD */
-       { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
-
-       /* USB3503 */
-       { USB_DEVICE(0x0424, 0x3503), .driver_info = USB_QUIRK_RESET_RESUME },
-
-       /* ASUS Base Station(T100) */
-       { USB_DEVICE(0x0b05, 0x17e0), .driver_info =
-                       USB_QUIRK_IGNORE_REMOTE_WAKEUP },
-
        /* Protocol and OTG Electrical Test Device */
        { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
                        USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
 
+       /* Acer C120 LED Projector */
+       { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
+
        /* Blackmagic Design Intensity Shuttle */
        { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM },
 
        /* Blackmagic Design UltraStudio SDI */
        { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
 
+       /* INTEL VALUE SSD */
+       { USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+
        { }  /* terminating entry must be last */
 };
 
index 3c58d63..dec0b21 100644 (file)
        DWC2_TRACE_SCHEDULER_VB(pr_fmt("%s: SCH: " fmt),                \
                                dev_name(hsotg->dev), ##__VA_ARGS__)
 
+#ifdef CONFIG_MIPS
+/*
+ * There are some MIPS machines that can run in either big-endian
+ * or little-endian mode and that use the dwc2 register without
+ * a byteswap in both ways.
+ * Unlike other architectures, MIPS apparently does not require a
+ * barrier before the __raw_writel() to synchronize with DMA but does
+ * require the barrier after the __raw_writel() to serialize a set of
+ * writes. This set of operations was added specifically for MIPS and
+ * should only be used there.
+ */
 static inline u32 dwc2_readl(const void __iomem *addr)
 {
        u32 value = __raw_readl(addr);
@@ -90,6 +101,22 @@ static inline void dwc2_writel(u32 value, void __iomem *addr)
        pr_info("INFO:: wrote %08x to %p\n", value, addr);
 #endif
 }
+#else
+/* Normal architectures just use readl/write */
+static inline u32 dwc2_readl(const void __iomem *addr)
+{
+       return readl(addr);
+}
+
+static inline void dwc2_writel(u32 value, void __iomem *addr)
+{
+       writel(value, addr);
+
+#ifdef DWC2_LOG_WRITES
+       pr_info("info:: wrote %08x to %p\n", value, addr);
+#endif
+}
+#endif
 
 /* Maximum number of Endpoints/HostChannels */
 #define MAX_EPS_CHANNELS       16
index 4c5e300..26cf09d 100644 (file)
@@ -1018,7 +1018,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
        return 1;
 }
 
-static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value);
+static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now);
 
 /**
  * get_ep_head - return the first request on the endpoint
@@ -1094,7 +1094,7 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
                case USB_ENDPOINT_HALT:
                        halted = ep->halted;
 
-                       dwc2_hsotg_ep_sethalt(&ep->ep, set);
+                       dwc2_hsotg_ep_sethalt(&ep->ep, set, true);
 
                        ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
                        if (ret) {
@@ -2948,8 +2948,13 @@ static int dwc2_hsotg_ep_dequeue(struct usb_ep *ep, struct usb_request *req)
  * dwc2_hsotg_ep_sethalt - set halt on a given endpoint
  * @ep: The endpoint to set halt.
  * @value: Set or unset the halt.
+ * @now: If true, stall the endpoint now. Otherwise return -EAGAIN if
+ *       the endpoint is busy processing requests.
+ *
+ * We need to stall the endpoint immediately if request comes from set_feature
+ * protocol command handler.
  */
-static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value)
+static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value, bool now)
 {
        struct dwc2_hsotg_ep *hs_ep = our_ep(ep);
        struct dwc2_hsotg *hs = hs_ep->parent;
@@ -2969,6 +2974,17 @@ static int dwc2_hsotg_ep_sethalt(struct usb_ep *ep, int value)
                return 0;
        }
 
+       if (hs_ep->isochronous) {
+               dev_err(hs->dev, "%s is Isochronous Endpoint\n", ep->name);
+               return -EINVAL;
+       }
+
+       if (!now && value && !list_empty(&hs_ep->queue)) {
+               dev_dbg(hs->dev, "%s request is pending, cannot halt\n",
+                       ep->name);
+               return -EAGAIN;
+       }
+
        if (hs_ep->dir_in) {
                epreg = DIEPCTL(index);
                epctl = dwc2_readl(hs->regs + epreg);
@@ -3020,7 +3036,7 @@ static int dwc2_hsotg_ep_sethalt_lock(struct usb_ep *ep, int value)
        int ret = 0;
 
        spin_lock_irqsave(&hs->lock, flags);
-       ret = dwc2_hsotg_ep_sethalt(ep, value);
+       ret = dwc2_hsotg_ep_sethalt(ep, value, false);
        spin_unlock_irqrestore(&hs->lock, flags);
 
        return ret;
index 7ddf944..6540506 100644 (file)
 #define DWC3_DEPCMD_GET_RSC_IDX(x)     (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
 #define DWC3_DEPCMD_STATUS(x)          (((x) >> 12) & 0x0F)
 #define DWC3_DEPCMD_HIPRI_FORCERM      (1 << 11)
+#define DWC3_DEPCMD_CLEARPENDIN                (1 << 11)
 #define DWC3_DEPCMD_CMDACT             (1 << 10)
 #define DWC3_DEPCMD_CMDIOC             (1 << 8)
 
index dd5cb55..2f1fb7e 100644 (file)
@@ -128,12 +128,6 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, exynos);
 
-       ret = dwc3_exynos_register_phys(exynos);
-       if (ret) {
-               dev_err(dev, "couldn't register PHYs\n");
-               return ret;
-       }
-
        exynos->dev     = dev;
 
        exynos->clk = devm_clk_get(dev, "usbdrd30");
@@ -183,20 +177,29 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
                goto err3;
        }
 
+       ret = dwc3_exynos_register_phys(exynos);
+       if (ret) {
+               dev_err(dev, "couldn't register PHYs\n");
+               goto err4;
+       }
+
        if (node) {
                ret = of_platform_populate(node, NULL, NULL, dev);
                if (ret) {
                        dev_err(dev, "failed to add dwc3 core\n");
-                       goto err4;
+                       goto err5;
                }
        } else {
                dev_err(dev, "no device node, failed to add dwc3 core\n");
                ret = -ENODEV;
-               goto err4;
+               goto err5;
        }
 
        return 0;
 
+err5:
+       platform_device_unregister(exynos->usb2_phy);
+       platform_device_unregister(exynos->usb3_phy);
 err4:
        regulator_disable(exynos->vdd10);
 err3:
index 5c0adb9..50d6ae6 100644 (file)
@@ -129,12 +129,18 @@ static int st_dwc3_drd_init(struct st_dwc3 *dwc3_data)
        switch (dwc3_data->dr_mode) {
        case USB_DR_MODE_PERIPHERAL:
 
-               val &= ~(USB3_FORCE_VBUSVALID | USB3_DELAY_VBUSVALID
+               val &= ~(USB3_DELAY_VBUSVALID
                        | USB3_SEL_FORCE_OPMODE | USB3_FORCE_OPMODE(0x3)
                        | USB3_SEL_FORCE_DPPULLDOWN2 | USB3_FORCE_DPPULLDOWN2
                        | USB3_SEL_FORCE_DMPULLDOWN2 | USB3_FORCE_DMPULLDOWN2);
 
-               val |= USB3_DEVICE_NOT_HOST;
+               /*
+                * USB3_PORT2_FORCE_VBUSVALID When '1' and when
+                * USB3_PORT2_DEVICE_NOT_HOST = 1, forces VBUSVLDEXT2 input
+                * of the pico PHY to 1.
+                */
+
+               val |= USB3_DEVICE_NOT_HOST | USB3_FORCE_VBUSVALID;
                break;
 
        case USB_DR_MODE_HOST:
index 9a7d0bd..07248ff 100644 (file)
@@ -347,6 +347,28 @@ int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
        return ret;
 }
 
+static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep)
+{
+       struct dwc3 *dwc = dep->dwc;
+       struct dwc3_gadget_ep_cmd_params params;
+       u32 cmd = DWC3_DEPCMD_CLEARSTALL;
+
+       /*
+        * As of core revision 2.60a the recommended programming model
+        * is to set the ClearPendIN bit when issuing a Clear Stall EP
+        * command for IN endpoints. This is to prevent an issue where
+        * some (non-compliant) hosts may not send ACK TPs for pending
+        * IN transfers due to a mishandled error condition. Synopsys
+        * STAR 9000614252.
+        */
+       if (dep->direction && (dwc->revision >= DWC3_REVISION_260A))
+               cmd |= DWC3_DEPCMD_CLEARPENDIN;
+
+       memset(&params, 0, sizeof(params));
+
+       return dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
+}
+
 static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
                struct dwc3_trb *trb)
 {
@@ -1314,8 +1336,7 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
                else
                        dep->flags |= DWC3_EP_STALL;
        } else {
-               ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
-                       DWC3_DEPCMD_CLEARSTALL, &params);
+               ret = dwc3_send_clear_stall_ep_cmd(dep);
                if (ret)
                        dev_err(dwc->dev, "failed to clear STALL on %s\n",
                                        dep->name);
@@ -2247,7 +2268,6 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
 
        for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
                struct dwc3_ep *dep;
-               struct dwc3_gadget_ep_cmd_params params;
                int ret;
 
                dep = dwc->eps[epnum];
@@ -2259,9 +2279,7 @@ static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
 
                dep->flags &= ~DWC3_EP_STALL;
 
-               memset(&params, 0, sizeof(params));
-               ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
-                               DWC3_DEPCMD_CLEARSTALL, &params);
+               ret = dwc3_send_clear_stall_ep_cmd(dep);
                WARN_ON_ONCE(ret);
        }
 }
index d67de0d..eb64848 100644 (file)
@@ -1868,14 +1868,19 @@ unknown:
                                }
                                break;
                        }
-                       req->length = value;
-                       req->context = cdev;
-                       req->zero = value < w_length;
-                       value = composite_ep0_queue(cdev, req, GFP_ATOMIC);
-                       if (value < 0) {
-                               DBG(cdev, "ep_queue --> %d\n", value);
-                               req->status = 0;
-                               composite_setup_complete(gadget->ep0, req);
+
+                       if (value >= 0) {
+                               req->length = value;
+                               req->context = cdev;
+                               req->zero = value < w_length;
+                               value = composite_ep0_queue(cdev, req,
+                                                           GFP_ATOMIC);
+                               if (value < 0) {
+                                       DBG(cdev, "ep_queue --> %d\n", value);
+                                       req->status = 0;
+                                       composite_setup_complete(gadget->ep0,
+                                                                req);
+                               }
                        }
                        return value;
                }
index b6f60ca..70cf347 100644 (file)
@@ -1401,6 +1401,7 @@ static const struct usb_gadget_driver configfs_driver_template = {
                .owner          = THIS_MODULE,
                .name           = "configfs-gadget",
        },
+       .match_existing_only = 1,
 };
 
 static struct config_group *gadgets_make(
index 73515d5..cc33d26 100644 (file)
@@ -2051,7 +2051,7 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
 
                if (len < sizeof(*d) ||
                    d->bFirstInterfaceNumber >= ffs->interfaces_count ||
-                   d->Reserved1)
+                   !d->Reserved1)
                        return -EINVAL;
                for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
                        if (d->Reserved2[i])
@@ -2729,6 +2729,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
                func->ffs->ss_descs_count;
 
        int fs_len, hs_len, ss_len, ret, i;
+       struct ffs_ep *eps_ptr;
 
        /* Make it a single chunk, less management later on */
        vla_group(d);
@@ -2777,12 +2778,9 @@ static int _ffs_func_bind(struct usb_configuration *c,
               ffs->raw_descs_length);
 
        memset(vla_ptr(vlabuf, d, inums), 0xff, d_inums__sz);
-       for (ret = ffs->eps_count; ret; --ret) {
-               struct ffs_ep *ptr;
-
-               ptr = vla_ptr(vlabuf, d, eps);
-               ptr[ret].num = -1;
-       }
+       eps_ptr = vla_ptr(vlabuf, d, eps);
+       for (i = 0; i < ffs->eps_count; i++)
+               eps_ptr[i].num = -1;
 
        /* Save pointers
         * d_eps == vlabuf, func->eps used to kfree vlabuf later
@@ -2851,7 +2849,7 @@ static int _ffs_func_bind(struct usb_configuration *c,
                goto error;
 
        func->function.os_desc_table = vla_ptr(vlabuf, d, os_desc_table);
-       if (c->cdev->use_os_string)
+       if (c->cdev->use_os_string) {
                for (i = 0; i < ffs->interfaces_count; ++i) {
                        struct usb_os_desc *desc;
 
@@ -2862,13 +2860,15 @@ static int _ffs_func_bind(struct usb_configuration *c,
                                vla_ptr(vlabuf, d, ext_compat) + i * 16;
                        INIT_LIST_HEAD(&desc->ext_prop);
                }
-       ret = ffs_do_os_descs(ffs->ms_os_descs_count,
-                             vla_ptr(vlabuf, d, raw_descs) +
-                             fs_len + hs_len + ss_len,
-                             d_raw_descs__sz - fs_len - hs_len - ss_len,
-                             __ffs_func_bind_do_os_desc, func);
-       if (unlikely(ret < 0))
-               goto error;
+               ret = ffs_do_os_descs(ffs->ms_os_descs_count,
+                                     vla_ptr(vlabuf, d, raw_descs) +
+                                     fs_len + hs_len + ss_len,
+                                     d_raw_descs__sz - fs_len - hs_len -
+                                     ss_len,
+                                     __ffs_func_bind_do_os_desc, func);
+               if (unlikely(ret < 0))
+                       goto error;
+       }
        func->function.os_desc_n =
                c->cdev->use_os_string ? ffs->interfaces_count : 0;
 
index c45104e..64706a7 100644 (file)
@@ -161,14 +161,6 @@ static struct usb_endpoint_descriptor hs_ep_out_desc = {
        .wMaxPacketSize =       cpu_to_le16(512)
 };
 
-static struct usb_qualifier_descriptor dev_qualifier = {
-       .bLength =              sizeof(dev_qualifier),
-       .bDescriptorType =      USB_DT_DEVICE_QUALIFIER,
-       .bcdUSB =               cpu_to_le16(0x0200),
-       .bDeviceClass =         USB_CLASS_PRINTER,
-       .bNumConfigurations =   1
-};
-
 static struct usb_descriptor_header *hs_printer_function[] = {
        (struct usb_descriptor_header *) &intf_desc,
        (struct usb_descriptor_header *) &hs_ep_in_desc,
index 35fe3c8..197f733 100644 (file)
@@ -1445,16 +1445,18 @@ static void usbg_drop_tpg(struct se_portal_group *se_tpg)
        for (i = 0; i < TPG_INSTANCES; ++i)
                if (tpg_instances[i].tpg == tpg)
                        break;
-       if (i < TPG_INSTANCES)
+       if (i < TPG_INSTANCES) {
                tpg_instances[i].tpg = NULL;
-       opts = container_of(tpg_instances[i].func_inst,
-               struct f_tcm_opts, func_inst);
-       mutex_lock(&opts->dep_lock);
-       if (opts->has_dep)
-               module_put(opts->dependent);
-       else
-               configfs_undepend_item_unlocked(&opts->func_inst.group.cg_item);
-       mutex_unlock(&opts->dep_lock);
+               opts = container_of(tpg_instances[i].func_inst,
+                       struct f_tcm_opts, func_inst);
+               mutex_lock(&opts->dep_lock);
+               if (opts->has_dep)
+                       module_put(opts->dependent);
+               else
+                       configfs_undepend_item_unlocked(
+                               &opts->func_inst.group.cg_item);
+               mutex_unlock(&opts->dep_lock);
+       }
        mutex_unlock(&tpg_instances_lock);
 
        kfree(tpg);
index 186d4b1..cd214ec 100644 (file)
@@ -598,18 +598,6 @@ static struct usb_gadget_strings *fn_strings[] = {
        NULL,
 };
 
-static struct usb_qualifier_descriptor devqual_desc = {
-       .bLength = sizeof devqual_desc,
-       .bDescriptorType = USB_DT_DEVICE_QUALIFIER,
-
-       .bcdUSB = cpu_to_le16(0x200),
-       .bDeviceClass = USB_CLASS_MISC,
-       .bDeviceSubClass = 0x02,
-       .bDeviceProtocol = 0x01,
-       .bNumConfigurations = 1,
-       .bRESERVED = 0,
-};
-
 static struct usb_interface_assoc_descriptor iad_desc = {
        .bLength = sizeof iad_desc,
        .bDescriptorType = USB_DT_INTERFACE_ASSOCIATION,
@@ -1292,6 +1280,7 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
 
        if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
                struct cntrl_cur_lay3 c;
+               memset(&c, 0, sizeof(struct cntrl_cur_lay3));
 
                if (entity_id == USB_IN_CLK_ID)
                        c.dCUR = p_srate;
index d626830..990df22 100644 (file)
@@ -83,9 +83,7 @@ EXPORT_SYMBOL_GPL(fsg_fs_function);
  * USB 2.0 devices need to expose both high speed and full speed
  * descriptors, unless they only run at full speed.
  *
- * That means alternate endpoint descriptors (bigger packets)
- * and a "device qualifier" ... plus more construction options
- * for the configuration descriptor.
+ * That means alternate endpoint descriptors (bigger packets).
  */
 struct usb_endpoint_descriptor fsg_hs_bulk_in_desc = {
        .bLength =              USB_DT_ENDPOINT_SIZE,
index e64479f..aa3707b 100644 (file)
@@ -938,8 +938,11 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
                        struct usb_ep           *ep = dev->gadget->ep0;
                        struct usb_request      *req = dev->req;
 
-                       if ((retval = setup_req (ep, req, 0)) == 0)
-                               retval = usb_ep_queue (ep, req, GFP_ATOMIC);
+                       if ((retval = setup_req (ep, req, 0)) == 0) {
+                               spin_unlock_irq (&dev->lock);
+                               retval = usb_ep_queue (ep, req, GFP_KERNEL);
+                               spin_lock_irq (&dev->lock);
+                       }
                        dev->state = STATE_DEV_CONNECTED;
 
                        /* assume that was SET_CONFIGURATION */
@@ -1457,8 +1460,11 @@ delegate:
                                                        w_length);
                                if (value < 0)
                                        break;
+
+                               spin_unlock (&dev->lock);
                                value = usb_ep_queue (gadget->ep0, dev->req,
-                                                       GFP_ATOMIC);
+                                                       GFP_KERNEL);
+                               spin_lock (&dev->lock);
                                if (value < 0) {
                                        clean_req (gadget->ep0, dev->req);
                                        break;
@@ -1481,11 +1487,14 @@ delegate:
        if (value >= 0 && dev->state != STATE_DEV_SETUP) {
                req->length = value;
                req->zero = value < w_length;
-               value = usb_ep_queue (gadget->ep0, req, GFP_ATOMIC);
+
+               spin_unlock (&dev->lock);
+               value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL);
                if (value < 0) {
                        DBG (dev, "ep_queue --> %d\n", value);
                        req->status = 0;
                }
+               return value;
        }
 
        /* device stalls when value < 0 */
index 6e8300d..e1b2dce 100644 (file)
@@ -603,11 +603,15 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
                }
        }
 
-       list_add_tail(&driver->pending, &gadget_driver_pending_list);
-       pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
-               driver->function);
+       if (!driver->match_existing_only) {
+               list_add_tail(&driver->pending, &gadget_driver_pending_list);
+               pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
+                       driver->function);
+               ret = 0;
+       }
+
        mutex_unlock(&udc_lock);
-       return 0;
+       return ret;
 found:
        ret = udc_bind_to_driver(udc, driver);
        mutex_unlock(&udc_lock);
index ae1b6e6..a962b89 100644 (file)
@@ -368,6 +368,15 @@ static void ehci_shutdown(struct usb_hcd *hcd)
 {
        struct ehci_hcd *ehci = hcd_to_ehci(hcd);
 
+       /**
+        * Protect the system from crashing at system shutdown in cases where
+        * usb host is not added yet from OTG controller driver.
+        * As ehci_setup() not done yet, so stop accessing registers or
+        * variables initialized in ehci_setup()
+        */
+       if (!ehci->sbrn)
+               return;
+
        spin_lock_irq(&ehci->lock);
        ehci->shutdown = true;
        ehci->rh_state = EHCI_RH_STOPPING;
index ffc9029..74f62d6 100644 (file)
@@ -872,14 +872,22 @@ int ehci_hub_control(
 ) {
        struct ehci_hcd *ehci = hcd_to_ehci (hcd);
        int             ports = HCS_N_PORTS (ehci->hcs_params);
-       u32 __iomem     *status_reg = &ehci->regs->port_status[
-                               (wIndex & 0xff) - 1];
-       u32 __iomem     *hostpc_reg = &ehci->regs->hostpc[(wIndex & 0xff) - 1];
+       u32 __iomem     *status_reg, *hostpc_reg;
        u32             temp, temp1, status;
        unsigned long   flags;
        int             retval = 0;
        unsigned        selector;
 
+       /*
+        * Avoid underflow while calculating (wIndex & 0xff) - 1.
+        * The compiler might deduce that wIndex can never be 0 and then
+        * optimize away the tests for !wIndex below.
+        */
+       temp = wIndex & 0xff;
+       temp -= (temp > 0);
+       status_reg = &ehci->regs->port_status[temp];
+       hostpc_reg = &ehci->regs->hostpc[temp];
+
        /*
         * FIXME:  support SetPortFeatures USB_PORT_FEAT_INDICATOR.
         * HCS_INDICATOR may say we can change LEDs to off/amber/green.
index d3afc89..2f8d3af 100644 (file)
@@ -179,22 +179,32 @@ static int ehci_msm_remove(struct platform_device *pdev)
 static int ehci_msm_pm_suspend(struct device *dev)
 {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
+       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
        bool do_wakeup = device_may_wakeup(dev);
 
        dev_dbg(dev, "ehci-msm PM suspend\n");
 
-       return ehci_suspend(hcd, do_wakeup);
+       /* Only call ehci_suspend if ehci_setup has been done */
+       if (ehci->sbrn)
+               return ehci_suspend(hcd, do_wakeup);
+
+       return 0;
 }
 
 static int ehci_msm_pm_resume(struct device *dev)
 {
        struct usb_hcd *hcd = dev_get_drvdata(dev);
+       struct ehci_hcd *ehci = hcd_to_ehci(hcd);
 
        dev_dbg(dev, "ehci-msm PM resume\n");
-       ehci_resume(hcd, false);
+
+       /* Only call ehci_resume if ehci_setup has been done */
+       if (ehci->sbrn)
+               ehci_resume(hcd, false);
 
        return 0;
 }
+
 #else
 #define ehci_msm_pm_suspend    NULL
 #define ehci_msm_pm_resume     NULL
index 4031b37..9a3d7db 100644 (file)
@@ -81,15 +81,23 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
        struct usb_hcd *hcd = platform_get_drvdata(pdev);
        struct tegra_ehci_hcd *tegra =
                (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
+       bool has_utmi_pad_registers = false;
 
        phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0);
        if (!phy_np)
                return -ENOENT;
 
+       if (of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers"))
+               has_utmi_pad_registers = true;
+
        if (!usb1_reset_attempted) {
                struct reset_control *usb1_reset;
 
-               usb1_reset = of_reset_control_get(phy_np, "usb");
+               if (!has_utmi_pad_registers)
+                       usb1_reset = of_reset_control_get(phy_np, "utmi-pads");
+               else
+                       usb1_reset = tegra->rst;
+
                if (IS_ERR(usb1_reset)) {
                        dev_warn(&pdev->dev,
                                 "can't get utmi-pads reset from the PHY\n");
@@ -99,13 +107,15 @@ static int tegra_reset_usb_controller(struct platform_device *pdev)
                        reset_control_assert(usb1_reset);
                        udelay(1);
                        reset_control_deassert(usb1_reset);
+
+                       if (!has_utmi_pad_registers)
+                               reset_control_put(usb1_reset);
                }
 
-               reset_control_put(usb1_reset);
                usb1_reset_attempted = true;
        }
 
-       if (!of_property_read_bool(phy_np, "nvidia,has-utmi-pad-registers")) {
+       if (!has_utmi_pad_registers) {
                reset_control_assert(tegra->rst);
                udelay(1);
                reset_control_deassert(tegra->rst);
index d029bbe..641fed6 100644 (file)
@@ -183,7 +183,6 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
 {
        int     branch;
 
-       ed->state = ED_OPER;
        ed->ed_prev = NULL;
        ed->ed_next = NULL;
        ed->hwNextED = 0;
@@ -259,6 +258,8 @@ static int ed_schedule (struct ohci_hcd *ohci, struct ed *ed)
        /* the HC may not see the schedule updates yet, but if it does
         * then they'll be properly ordered.
         */
+
+       ed->state = ED_OPER;
        return 0;
 }
 
index 48672fa..c10972f 100644 (file)
@@ -37,6 +37,7 @@
 /* Device for a quirk */
 #define PCI_VENDOR_ID_FRESCO_LOGIC     0x1b73
 #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
+#define PCI_DEVICE_ID_FRESCO_LOGIC_FL1009      0x1009
 #define PCI_DEVICE_ID_FRESCO_LOGIC_FL1400      0x1400
 
 #define PCI_VENDOR_ID_ETRON            0x1b6f
@@ -114,6 +115,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_TRUST_TX_LENGTH;
        }
 
+       if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
+                       pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1009)
+               xhci->quirks |= XHCI_BROKEN_STREAMS;
+
        if (pdev->vendor == PCI_VENDOR_ID_NEC)
                xhci->quirks |= XHCI_NEC_HOST;
 
index 676ea45..1f3f981 100644 (file)
@@ -196,6 +196,9 @@ static int xhci_plat_probe(struct platform_device *pdev)
                ret = clk_prepare_enable(clk);
                if (ret)
                        goto put_hcd;
+       } else if (PTR_ERR(clk) == -EPROBE_DEFER) {
+               ret = -EPROBE_DEFER;
+               goto put_hcd;
        }
 
        xhci = hcd_to_xhci(hcd);
index 52deae4..d7d5025 100644 (file)
@@ -290,6 +290,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
 
        temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
        xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
+
+       /*
+        * Writing the CMD_RING_ABORT bit should cause a cmd completion event,
+        * however on some host hw the CMD_RING_RUNNING bit is correctly cleared
+        * but the completion event in never sent. Use the cmd timeout timer to
+        * handle those cases. Use twice the time to cover the bit polling retry
+        */
+       mod_timer(&xhci->cmd_timer, jiffies + (2 * XHCI_CMD_DEFAULT_TIMEOUT));
        xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
                        &xhci->op_regs->cmd_ring);
 
@@ -314,6 +322,7 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
 
                xhci_err(xhci, "Stopped the command ring failed, "
                                "maybe the host is dead\n");
+               del_timer(&xhci->cmd_timer);
                xhci->xhc_state |= XHCI_STATE_DYING;
                xhci_quiesce(xhci);
                xhci_halt(xhci);
@@ -1246,22 +1255,21 @@ void xhci_handle_command_timeout(unsigned long data)
        int ret;
        unsigned long flags;
        u64 hw_ring_state;
-       struct xhci_command *cur_cmd = NULL;
+       bool second_timeout = false;
        xhci = (struct xhci_hcd *) data;
 
        /* mark this command to be cancelled */
        spin_lock_irqsave(&xhci->lock, flags);
        if (xhci->current_cmd) {
-               cur_cmd = xhci->current_cmd;
-               cur_cmd->status = COMP_CMD_ABORT;
+               if (xhci->current_cmd->status == COMP_CMD_ABORT)
+                       second_timeout = true;
+               xhci->current_cmd->status = COMP_CMD_ABORT;
        }
 
-
        /* Make sure command ring is running before aborting it */
        hw_ring_state = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
        if ((xhci->cmd_ring_state & CMD_RING_STATE_RUNNING) &&
            (hw_ring_state & CMD_RING_RUNNING))  {
-
                spin_unlock_irqrestore(&xhci->lock, flags);
                xhci_dbg(xhci, "Command timeout\n");
                ret = xhci_abort_cmd_ring(xhci);
@@ -1273,6 +1281,15 @@ void xhci_handle_command_timeout(unsigned long data)
                }
                return;
        }
+
+       /* command ring failed to restart, or host removed. Bail out */
+       if (second_timeout || xhci->xhc_state & XHCI_STATE_REMOVING) {
+               spin_unlock_irqrestore(&xhci->lock, flags);
+               xhci_dbg(xhci, "command timed out twice, ring start fail?\n");
+               xhci_cleanup_command_queue(xhci);
+               return;
+       }
+
        /* command timeout on stopped ring, ring can't be aborted */
        xhci_dbg(xhci, "Command timeout on stopped ring\n");
        xhci_handle_stopped_cmd_ring(xhci, xhci->current_cmd);
@@ -2721,7 +2738,8 @@ hw_died:
                writel(irq_pending, &xhci->ir_set->irq_pending);
        }
 
-       if (xhci->xhc_state & XHCI_STATE_DYING) {
+       if (xhci->xhc_state & XHCI_STATE_DYING ||
+           xhci->xhc_state & XHCI_STATE_HALTED) {
                xhci_dbg(xhci, "xHCI dying, ignoring interrupt. "
                                "Shouldn't IRQs be disabled?\n");
                /* Clear the event handler busy flag (RW1C);
index fa7e1ef..f2f9518 100644 (file)
@@ -685,20 +685,23 @@ void xhci_stop(struct usb_hcd *hcd)
        u32 temp;
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
-       if (xhci->xhc_state & XHCI_STATE_HALTED)
-               return;
-
        mutex_lock(&xhci->mutex);
-       spin_lock_irq(&xhci->lock);
-       xhci->xhc_state |= XHCI_STATE_HALTED;
-       xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
 
-       /* Make sure the xHC is halted for a USB3 roothub
-        * (xhci_stop() could be called as part of failed init).
-        */
-       xhci_halt(xhci);
-       xhci_reset(xhci);
-       spin_unlock_irq(&xhci->lock);
+       if (!(xhci->xhc_state & XHCI_STATE_HALTED)) {
+               spin_lock_irq(&xhci->lock);
+
+               xhci->xhc_state |= XHCI_STATE_HALTED;
+               xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+               xhci_halt(xhci);
+               xhci_reset(xhci);
+
+               spin_unlock_irq(&xhci->lock);
+       }
+
+       if (!usb_hcd_is_primary_hcd(hcd)) {
+               mutex_unlock(&xhci->mutex);
+               return;
+       }
 
        xhci_cleanup_msix(xhci);
 
@@ -4886,7 +4889,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
                xhci->hcc_params2 = readl(&xhci->cap_regs->hcc_params2);
        xhci_print_registers(xhci);
 
-       xhci->quirks = quirks;
+       xhci->quirks |= quirks;
 
        get_quirks(dev, xhci);
 
index 39fd958..f824336 100644 (file)
@@ -1090,29 +1090,6 @@ void musb_stop(struct musb *musb)
        musb_platform_try_idle(musb, 0);
 }
 
-static void musb_shutdown(struct platform_device *pdev)
-{
-       struct musb     *musb = dev_to_musb(&pdev->dev);
-       unsigned long   flags;
-
-       pm_runtime_get_sync(musb->controller);
-
-       musb_host_cleanup(musb);
-       musb_gadget_cleanup(musb);
-
-       spin_lock_irqsave(&musb->lock, flags);
-       musb_platform_disable(musb);
-       musb_generic_disable(musb);
-       spin_unlock_irqrestore(&musb->lock, flags);
-
-       musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
-       musb_platform_exit(musb);
-
-       pm_runtime_put(musb->controller);
-       /* FIXME power down */
-}
-
-
 /*-------------------------------------------------------------------------*/
 
 /*
@@ -1702,7 +1679,7 @@ EXPORT_SYMBOL_GPL(musb_dma_completion);
 #define use_dma                        0
 #endif
 
-static void (*musb_phy_callback)(enum musb_vbus_id_status status);
+static int (*musb_phy_callback)(enum musb_vbus_id_status status);
 
 /*
  * musb_mailbox - optional phy notifier function
@@ -1711,11 +1688,12 @@ static void (*musb_phy_callback)(enum musb_vbus_id_status status);
  * Optionally gets called from the USB PHY. Note that the USB PHY must be
  * disabled at the point the phy_callback is registered or unregistered.
  */
-void musb_mailbox(enum musb_vbus_id_status status)
+int musb_mailbox(enum musb_vbus_id_status status)
 {
        if (musb_phy_callback)
-               musb_phy_callback(status);
+               return musb_phy_callback(status);
 
+       return -ENODEV;
 };
 EXPORT_SYMBOL_GPL(musb_mailbox);
 
@@ -2028,11 +2006,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        musb_readl = musb_default_readl;
        musb_writel = musb_default_writel;
 
-       /* We need musb_read/write functions initialized for PM */
-       pm_runtime_use_autosuspend(musb->controller);
-       pm_runtime_set_autosuspend_delay(musb->controller, 200);
-       pm_runtime_enable(musb->controller);
-
        /* The musb_platform_init() call:
         *   - adjusts musb->mregs
         *   - sets the musb->isr
@@ -2134,6 +2107,16 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        if (musb->ops->phy_callback)
                musb_phy_callback = musb->ops->phy_callback;
 
+       /*
+        * We need musb_read/write functions initialized for PM.
+        * Note that at least 2430 glue needs autosuspend delay
+        * somewhere above 300 ms for the hardware to idle properly
+        * after disconnecting the cable in host mode. Let's use
+        * 500 ms for some margin.
+        */
+       pm_runtime_use_autosuspend(musb->controller);
+       pm_runtime_set_autosuspend_delay(musb->controller, 500);
+       pm_runtime_enable(musb->controller);
        pm_runtime_get_sync(musb->controller);
 
        status = usb_phy_init(musb->xceiv);
@@ -2237,13 +2220,8 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        if (status)
                goto fail5;
 
-       pm_runtime_put(musb->controller);
-
-       /*
-        * For why this is currently needed, see commit 3e43a0725637
-        * ("usb: musb: core: add pm_runtime_irq_safe()")
-        */
-       pm_runtime_irq_safe(musb->controller);
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
 
        return 0;
 
@@ -2265,7 +2243,9 @@ fail2_5:
        usb_phy_shutdown(musb->xceiv);
 
 err_usb_phy_init:
+       pm_runtime_dont_use_autosuspend(musb->controller);
        pm_runtime_put_sync(musb->controller);
+       pm_runtime_disable(musb->controller);
 
 fail2:
        if (musb->irq_wake)
@@ -2273,7 +2253,6 @@ fail2:
        musb_platform_exit(musb);
 
 fail1:
-       pm_runtime_disable(musb->controller);
        dev_err(musb->controller,
                "musb_init_controller failed with status %d\n", status);
 
@@ -2312,6 +2291,7 @@ static int musb_remove(struct platform_device *pdev)
 {
        struct device   *dev = &pdev->dev;
        struct musb     *musb = dev_to_musb(dev);
+       unsigned long   flags;
 
        /* this gets called on rmmod.
         *  - Host mode: host may still be active
@@ -2319,17 +2299,26 @@ static int musb_remove(struct platform_device *pdev)
         *  - OTG mode: both roles are deactivated (or never-activated)
         */
        musb_exit_debugfs(musb);
-       musb_shutdown(pdev);
-       musb_phy_callback = NULL;
-
-       if (musb->dma_controller)
-               musb_dma_controller_destroy(musb->dma_controller);
-
-       usb_phy_shutdown(musb->xceiv);
 
        cancel_work_sync(&musb->irq_work);
        cancel_delayed_work_sync(&musb->finish_resume_work);
        cancel_delayed_work_sync(&musb->deassert_reset_work);
+       pm_runtime_get_sync(musb->controller);
+       musb_host_cleanup(musb);
+       musb_gadget_cleanup(musb);
+       spin_lock_irqsave(&musb->lock, flags);
+       musb_platform_disable(musb);
+       musb_generic_disable(musb);
+       spin_unlock_irqrestore(&musb->lock, flags);
+       musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+       pm_runtime_dont_use_autosuspend(musb->controller);
+       pm_runtime_put_sync(musb->controller);
+       pm_runtime_disable(musb->controller);
+       musb_platform_exit(musb);
+       musb_phy_callback = NULL;
+       if (musb->dma_controller)
+               musb_dma_controller_destroy(musb->dma_controller);
+       usb_phy_shutdown(musb->xceiv);
        musb_free(musb);
        device_init_wakeup(dev, 0);
        return 0;
@@ -2429,7 +2418,8 @@ static void musb_restore_context(struct musb *musb)
        musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
        musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
        musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
-       musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
+       if (musb->context.devctl & MUSB_DEVCTL_SESSION)
+               musb_writeb(musb_base, MUSB_DEVCTL, musb->context.devctl);
 
        for (i = 0; i < musb->config->num_eps; ++i) {
                struct musb_hw_ep       *hw_ep;
@@ -2612,7 +2602,6 @@ static struct platform_driver musb_driver = {
        },
        .probe          = musb_probe,
        .remove         = musb_remove,
-       .shutdown       = musb_shutdown,
 };
 
 module_platform_driver(musb_driver);
index b6afe9e..b55a776 100644 (file)
@@ -215,7 +215,7 @@ struct musb_platform_ops {
                                dma_addr_t *dma_addr, u32 *len);
        void    (*pre_root_reset_end)(struct musb *musb);
        void    (*post_root_reset_end)(struct musb *musb);
-       void    (*phy_callback)(enum musb_vbus_id_status status);
+       int     (*phy_callback)(enum musb_vbus_id_status status);
 };
 
 /*
@@ -312,6 +312,7 @@ struct musb {
        struct work_struct      irq_work;
        struct delayed_work     deassert_reset_work;
        struct delayed_work     finish_resume_work;
+       struct delayed_work     gadget_work;
        u16                     hwvers;
 
        u16                     intrrxe;
index 152865b..af2a3a7 100644 (file)
@@ -1656,6 +1656,20 @@ static int musb_gadget_vbus_draw(struct usb_gadget *gadget, unsigned mA)
        return usb_phy_set_power(musb->xceiv, mA);
 }
 
+static void musb_gadget_work(struct work_struct *work)
+{
+       struct musb *musb;
+       unsigned long flags;
+
+       musb = container_of(work, struct musb, gadget_work.work);
+       pm_runtime_get_sync(musb->controller);
+       spin_lock_irqsave(&musb->lock, flags);
+       musb_pullup(musb, musb->softconnect);
+       spin_unlock_irqrestore(&musb->lock, flags);
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
+}
+
 static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
 {
        struct musb     *musb = gadget_to_musb(gadget);
@@ -1663,20 +1677,16 @@ static int musb_gadget_pullup(struct usb_gadget *gadget, int is_on)
 
        is_on = !!is_on;
 
-       pm_runtime_get_sync(musb->controller);
-
        /* NOTE: this assumes we are sensing vbus; we'd rather
         * not pullup unless the B-session is active.
         */
        spin_lock_irqsave(&musb->lock, flags);
        if (is_on != musb->softconnect) {
                musb->softconnect = is_on;
-               musb_pullup(musb, is_on);
+               schedule_delayed_work(&musb->gadget_work, 0);
        }
        spin_unlock_irqrestore(&musb->lock, flags);
 
-       pm_runtime_put(musb->controller);
-
        return 0;
 }
 
@@ -1845,7 +1855,7 @@ int musb_gadget_setup(struct musb *musb)
 #elif IS_ENABLED(CONFIG_USB_MUSB_GADGET)
        musb->g.is_otg = 0;
 #endif
-
+       INIT_DELAYED_WORK(&musb->gadget_work, musb_gadget_work);
        musb_g_init_endpoints(musb);
 
        musb->is_active = 0;
@@ -1866,6 +1876,8 @@ void musb_gadget_cleanup(struct musb *musb)
 {
        if (musb->port_mode == MUSB_PORT_MODE_HOST)
                return;
+
+       cancel_delayed_work_sync(&musb->gadget_work);
        usb_del_gadget_udc(&musb->g);
 }
 
@@ -1914,8 +1926,8 @@ static int musb_gadget_start(struct usb_gadget *g,
        if (musb->xceiv->last_event == USB_EVENT_ID)
                musb_platform_set_vbus(musb, 1);
 
-       if (musb->xceiv->last_event == USB_EVENT_NONE)
-               pm_runtime_put(musb->controller);
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
 
        return 0;
 
@@ -1934,8 +1946,7 @@ static int musb_gadget_stop(struct usb_gadget *g)
        struct musb     *musb = gadget_to_musb(g);
        unsigned long   flags;
 
-       if (musb->xceiv->last_event == USB_EVENT_NONE)
-               pm_runtime_get_sync(musb->controller);
+       pm_runtime_get_sync(musb->controller);
 
        /*
         * REVISIT always use otg_set_peripheral() here too;
@@ -1963,7 +1974,8 @@ static int musb_gadget_stop(struct usb_gadget *g)
         * that currently misbehaves.
         */
 
-       pm_runtime_put(musb->controller);
+       pm_runtime_mark_last_busy(musb->controller);
+       pm_runtime_put_autosuspend(musb->controller);
 
        return 0;
 }
index 2f8ad7f..d227a71 100644 (file)
@@ -434,7 +434,13 @@ static void musb_advance_schedule(struct musb *musb, struct urb *urb,
                }
        }
 
-       if (qh != NULL && qh->is_ready) {
+       /*
+        * The pipe must be broken if current urb->status is set, so don't
+        * start next urb.
+        * TODO: to minimize the risk of regression, only check urb->status
+        * for RX, until we have a test case to understand the behavior of TX.
+        */
+       if ((!status || !is_in) && qh && qh->is_ready) {
                dev_dbg(musb->controller, "... next ep%d %cX urb %p\n",
                    hw_ep->epnum, is_in ? 'R' : 'T', next_urb(qh));
                musb_start_urb(musb, is_in, qh);
@@ -594,14 +600,13 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
                musb_writew(ep->regs, MUSB_TXCSR, 0);
 
        /* scrub all previous state, clearing toggle */
-       } else {
-               csr = musb_readw(ep->regs, MUSB_RXCSR);
-               if (csr & MUSB_RXCSR_RXPKTRDY)
-                       WARNING("rx%d, packet/%d ready?\n", ep->epnum,
-                               musb_readw(ep->regs, MUSB_RXCOUNT));
-
-               musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
        }
+       csr = musb_readw(ep->regs, MUSB_RXCSR);
+       if (csr & MUSB_RXCSR_RXPKTRDY)
+               WARNING("rx%d, packet/%d ready?\n", ep->epnum,
+                       musb_readw(ep->regs, MUSB_RXCOUNT));
+
+       musb_h_flush_rxfifo(ep, MUSB_RXCSR_CLRDATATOG);
 
        /* target addr and (for multipoint) hub addr/port */
        if (musb->is_multipoint) {
@@ -627,7 +632,7 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, u8 epnum)
        ep->rx_reinit = 0;
 }
 
-static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
+static void musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
                struct musb_hw_ep *hw_ep, struct musb_qh *qh,
                struct urb *urb, u32 offset,
                u32 *length, u8 *mode)
@@ -664,23 +669,18 @@ static int musb_tx_dma_set_mode_mentor(struct dma_controller *dma,
        }
        channel->desired_mode = *mode;
        musb_writew(epio, MUSB_TXCSR, csr);
-
-       return 0;
 }
 
-static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
-                                         struct musb_hw_ep *hw_ep,
-                                         struct musb_qh *qh,
-                                         struct urb *urb,
-                                         u32 offset,
-                                         u32 *length,
-                                         u8 *mode)
+static void musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
+                                          struct musb_hw_ep *hw_ep,
+                                          struct musb_qh *qh,
+                                          struct urb *urb,
+                                          u32 offset,
+                                          u32 *length,
+                                          u8 *mode)
 {
        struct dma_channel *channel = hw_ep->tx_channel;
 
-       if (!is_cppi_enabled(hw_ep->musb) && !tusb_dma_omap(hw_ep->musb))
-               return -ENODEV;
-
        channel->actual_len = 0;
 
        /*
@@ -688,8 +688,6 @@ static int musb_tx_dma_set_mode_cppi_tusb(struct dma_controller *dma,
         * to identify the zero-length-final-packet case.
         */
        *mode = (urb->transfer_flags & URB_ZERO_PACKET) ? 1 : 0;
-
-       return 0;
 }
 
 static bool musb_tx_dma_program(struct dma_controller *dma,
@@ -699,15 +697,14 @@ static bool musb_tx_dma_program(struct dma_controller *dma,
        struct dma_channel      *channel = hw_ep->tx_channel;
        u16                     pkt_size = qh->maxpacket;
        u8                      mode;
-       int                     res;
 
        if (musb_dma_inventra(hw_ep->musb) || musb_dma_ux500(hw_ep->musb))
-               res = musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb,
-                                                offset, &length, &mode);
+               musb_tx_dma_set_mode_mentor(dma, hw_ep, qh, urb, offset,
+                                           &length, &mode);
+       else if (is_cppi_enabled(hw_ep->musb) || tusb_dma_omap(hw_ep->musb))
+               musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb, offset,
+                                              &length, &mode);
        else
-               res = musb_tx_dma_set_mode_cppi_tusb(dma, hw_ep, qh, urb,
-                                                    offset, &length, &mode);
-       if (res)
                return false;
 
        qh->segsize = length;
@@ -995,9 +992,15 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
        if (is_in) {
                dma = is_dma_capable() ? ep->rx_channel : NULL;
 
-               /* clear nak timeout bit */
+               /*
+                * Need to stop the transaction by clearing REQPKT first
+                * then the NAK Timeout bit ref MUSBMHDRC USB 2.0 HIGH-SPEED
+                * DUAL-ROLE CONTROLLER Programmer's Guide, section 9.2.2
+                */
                rx_csr = musb_readw(epio, MUSB_RXCSR);
                rx_csr |= MUSB_RXCSR_H_WZC_BITS;
+               rx_csr &= ~MUSB_RXCSR_H_REQPKT;
+               musb_writew(epio, MUSB_RXCSR, rx_csr);
                rx_csr &= ~MUSB_RXCSR_DATAERROR;
                musb_writew(epio, MUSB_RXCSR, rx_csr);
 
@@ -1551,7 +1554,7 @@ static int musb_rx_dma_iso_cppi41(struct dma_controller *dma,
                                  struct urb *urb,
                                  size_t len)
 {
-       struct dma_channel *channel = hw_ep->tx_channel;
+       struct dma_channel *channel = hw_ep->rx_channel;
        void __iomem *epio = hw_ep->regs;
        dma_addr_t *buf;
        u32 length, res;
@@ -1870,6 +1873,9 @@ void musb_host_rx(struct musb *musb, u8 epnum)
                status = -EPROTO;
                musb_writeb(epio, MUSB_RXINTERVAL, 0);
 
+               rx_csr &= ~MUSB_RXCSR_H_ERROR;
+               musb_writew(epio, MUSB_RXCSR, rx_csr);
+
        } else if (rx_csr & MUSB_RXCSR_DATAERROR) {
 
                if (USB_ENDPOINT_XFER_ISOC != qh->type) {
index c84e032..0b4cec9 100644 (file)
@@ -49,97 +49,14 @@ struct omap2430_glue {
        enum musb_vbus_id_status status;
        struct work_struct      omap_musb_mailbox_work;
        struct device           *control_otghs;
+       bool                    cable_connected;
+       bool                    enabled;
+       bool                    powered;
 };
 #define glue_to_musb(g)                platform_get_drvdata(g->musb)
 
 static struct omap2430_glue    *_glue;
 
-static struct timer_list musb_idle_timer;
-
-static void musb_do_idle(unsigned long _musb)
-{
-       struct musb     *musb = (void *)_musb;
-       unsigned long   flags;
-       u8      power;
-       u8      devctl;
-
-       spin_lock_irqsave(&musb->lock, flags);
-
-       switch (musb->xceiv->otg->state) {
-       case OTG_STATE_A_WAIT_BCON:
-
-               devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-               if (devctl & MUSB_DEVCTL_BDEVICE) {
-                       musb->xceiv->otg->state = OTG_STATE_B_IDLE;
-                       MUSB_DEV_MODE(musb);
-               } else {
-                       musb->xceiv->otg->state = OTG_STATE_A_IDLE;
-                       MUSB_HST_MODE(musb);
-               }
-               break;
-       case OTG_STATE_A_SUSPEND:
-               /* finish RESUME signaling? */
-               if (musb->port1_status & MUSB_PORT_STAT_RESUME) {
-                       power = musb_readb(musb->mregs, MUSB_POWER);
-                       power &= ~MUSB_POWER_RESUME;
-                       dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power);
-                       musb_writeb(musb->mregs, MUSB_POWER, power);
-                       musb->is_active = 1;
-                       musb->port1_status &= ~(USB_PORT_STAT_SUSPEND
-                                               | MUSB_PORT_STAT_RESUME);
-                       musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16;
-                       usb_hcd_poll_rh_status(musb->hcd);
-                       /* NOTE: it might really be A_WAIT_BCON ... */
-                       musb->xceiv->otg->state = OTG_STATE_A_HOST;
-               }
-               break;
-       case OTG_STATE_A_HOST:
-               devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-               if (devctl &  MUSB_DEVCTL_BDEVICE)
-                       musb->xceiv->otg->state = OTG_STATE_B_IDLE;
-               else
-                       musb->xceiv->otg->state = OTG_STATE_A_WAIT_BCON;
-       default:
-               break;
-       }
-       spin_unlock_irqrestore(&musb->lock, flags);
-}
-
-
-static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout)
-{
-       unsigned long           default_timeout = jiffies + msecs_to_jiffies(3);
-       static unsigned long    last_timer;
-
-       if (timeout == 0)
-               timeout = default_timeout;
-
-       /* Never idle if active, or when VBUS timeout is not set as host */
-       if (musb->is_active || ((musb->a_wait_bcon == 0)
-                       && (musb->xceiv->otg->state == OTG_STATE_A_WAIT_BCON))) {
-               dev_dbg(musb->controller, "%s active, deleting timer\n",
-                       usb_otg_state_string(musb->xceiv->otg->state));
-               del_timer(&musb_idle_timer);
-               last_timer = jiffies;
-               return;
-       }
-
-       if (time_after(last_timer, timeout)) {
-               if (!timer_pending(&musb_idle_timer))
-                       last_timer = timeout;
-               else {
-                       dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n");
-                       return;
-               }
-       }
-       last_timer = timeout;
-
-       dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n",
-               usb_otg_state_string(musb->xceiv->otg->state),
-               (unsigned long)jiffies_to_msecs(timeout - jiffies));
-       mod_timer(&musb_idle_timer, timeout);
-}
-
 static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
 {
        struct usb_otg  *otg = musb->xceiv->otg;
@@ -205,16 +122,6 @@ static void omap2430_musb_set_vbus(struct musb *musb, int is_on)
                musb_readb(musb->mregs, MUSB_DEVCTL));
 }
 
-static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode)
-{
-       u8      devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
-
-       devctl |= MUSB_DEVCTL_SESSION;
-       musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
-
-       return 0;
-}
-
 static inline void omap2430_low_level_exit(struct musb *musb)
 {
        u32 l;
@@ -234,22 +141,63 @@ static inline void omap2430_low_level_init(struct musb *musb)
        musb_writel(musb->mregs, OTG_FORCESTDBY, l);
 }
 
-static void omap2430_musb_mailbox(enum musb_vbus_id_status status)
+/*
+ * We can get multiple cable events so we need to keep track
+ * of the power state. Only keep power enabled if USB cable is
+ * connected and a gadget is started.
+ */
+static void omap2430_set_power(struct musb *musb, bool enabled, bool cable)
+{
+       struct device *dev = musb->controller;
+       struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
+       bool power_up;
+       int res;
+
+       if (glue->enabled != enabled)
+               glue->enabled = enabled;
+
+       if (glue->cable_connected != cable)
+               glue->cable_connected = cable;
+
+       power_up = glue->enabled && glue->cable_connected;
+       if (power_up == glue->powered) {
+               dev_warn(musb->controller, "power state already %i\n",
+                        power_up);
+               return;
+       }
+
+       glue->powered = power_up;
+
+       if (power_up) {
+               res = pm_runtime_get_sync(musb->controller);
+               if (res < 0) {
+                       dev_err(musb->controller, "could not enable: %i", res);
+                       glue->powered = false;
+               }
+       } else {
+               pm_runtime_mark_last_busy(musb->controller);
+               pm_runtime_put_autosuspend(musb->controller);
+       }
+}
+
+static int omap2430_musb_mailbox(enum musb_vbus_id_status status)
 {
        struct omap2430_glue    *glue = _glue;
 
        if (!glue) {
                pr_err("%s: musb core is not yet initialized\n", __func__);
-               return;
+               return -EPROBE_DEFER;
        }
        glue->status = status;
 
        if (!glue_to_musb(glue)) {
                pr_err("%s: musb core is not yet ready\n", __func__);
-               return;
+               return -EPROBE_DEFER;
        }
 
        schedule_work(&glue->omap_musb_mailbox_work);
+
+       return 0;
 }
 
 static void omap_musb_set_mailbox(struct omap2430_glue *glue)
@@ -259,6 +207,13 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
        struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
        struct omap_musb_board_data *data = pdata->board_data;
        struct usb_otg *otg = musb->xceiv->otg;
+       bool cable_connected;
+
+       cable_connected = ((glue->status == MUSB_ID_GROUND) ||
+                          (glue->status == MUSB_VBUS_VALID));
+
+       if (cable_connected)
+               omap2430_set_power(musb, glue->enabled, cable_connected);
 
        switch (glue->status) {
        case MUSB_ID_GROUND:
@@ -268,7 +223,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
                musb->xceiv->otg->state = OTG_STATE_A_IDLE;
                musb->xceiv->last_event = USB_EVENT_ID;
                if (musb->gadget_driver) {
-                       pm_runtime_get_sync(dev);
                        omap_control_usb_set_mode(glue->control_otghs,
                                USB_MODE_HOST);
                        omap2430_musb_set_vbus(musb, 1);
@@ -281,8 +235,6 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
                otg->default_a = false;
                musb->xceiv->otg->state = OTG_STATE_B_IDLE;
                musb->xceiv->last_event = USB_EVENT_VBUS;
-               if (musb->gadget_driver)
-                       pm_runtime_get_sync(dev);
                omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE);
                break;
 
@@ -291,11 +243,8 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
                dev_dbg(dev, "VBUS Disconnect\n");
 
                musb->xceiv->last_event = USB_EVENT_NONE;
-               if (musb->gadget_driver) {
+               if (musb->gadget_driver)
                        omap2430_musb_set_vbus(musb, 0);
-                       pm_runtime_mark_last_busy(dev);
-                       pm_runtime_put_autosuspend(dev);
-               }
 
                if (data->interface_type == MUSB_INTERFACE_UTMI)
                        otg_set_vbus(musb->xceiv->otg, 0);
@@ -307,6 +256,9 @@ static void omap_musb_set_mailbox(struct omap2430_glue *glue)
                dev_dbg(dev, "ID float\n");
        }
 
+       if (!cable_connected)
+               omap2430_set_power(musb, glue->enabled, cable_connected);
+
        atomic_notifier_call_chain(&musb->xceiv->notifier,
                        musb->xceiv->last_event, NULL);
 }
@@ -316,13 +268,8 @@ static void omap_musb_mailbox_work(struct work_struct *mailbox_work)
 {
        struct omap2430_glue *glue = container_of(mailbox_work,
                                struct omap2430_glue, omap_musb_mailbox_work);
-       struct musb *musb = glue_to_musb(glue);
-       struct device *dev = musb->controller;
 
-       pm_runtime_get_sync(dev);
        omap_musb_set_mailbox(glue);
-       pm_runtime_mark_last_busy(dev);
-       pm_runtime_put_autosuspend(dev);
 }
 
 static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci)
@@ -389,23 +336,7 @@ static int omap2430_musb_init(struct musb *musb)
                return PTR_ERR(musb->phy);
        }
        musb->isr = omap2430_musb_interrupt;
-
-       /*
-        * Enable runtime PM for musb parent (this driver). We can't
-        * do it earlier as struct musb is not yet allocated and we
-        * need to touch the musb registers for runtime PM.
-        */
-       pm_runtime_enable(glue->dev);
-       status = pm_runtime_get_sync(glue->dev);
-       if (status < 0)
-               goto err1;
-
-       status = pm_runtime_get_sync(dev);
-       if (status < 0) {
-               dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status);
-               pm_runtime_put_sync(glue->dev);
-               goto err1;
-       }
+       phy_init(musb->phy);
 
        l = musb_readl(musb->mregs, OTG_INTERFSEL);
 
@@ -427,20 +358,10 @@ static int omap2430_musb_init(struct musb *musb)
                        musb_readl(musb->mregs, OTG_INTERFSEL),
                        musb_readl(musb->mregs, OTG_SIMENABLE));
 
-       setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb);
-
        if (glue->status != MUSB_UNKNOWN)
                omap_musb_set_mailbox(glue);
 
-       phy_init(musb->phy);
-       phy_power_on(musb->phy);
-
-       pm_runtime_put_noidle(musb->controller);
-       pm_runtime_put_noidle(glue->dev);
        return 0;
-
-err1:
-       return status;
 }
 
 static void omap2430_musb_enable(struct musb *musb)
@@ -452,6 +373,11 @@ static void omap2430_musb_enable(struct musb *musb)
        struct musb_hdrc_platform_data *pdata = dev_get_platdata(dev);
        struct omap_musb_board_data *data = pdata->board_data;
 
+       if (!WARN_ON(!musb->phy))
+               phy_power_on(musb->phy);
+
+       omap2430_set_power(musb, true, glue->cable_connected);
+
        switch (glue->status) {
 
        case MUSB_ID_GROUND:
@@ -487,18 +413,25 @@ static void omap2430_musb_disable(struct musb *musb)
        struct device *dev = musb->controller;
        struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
 
+       if (!WARN_ON(!musb->phy))
+               phy_power_off(musb->phy);
+
        if (glue->status != MUSB_UNKNOWN)
                omap_control_usb_set_mode(glue->control_otghs,
                        USB_MODE_DISCONNECT);
+
+       omap2430_set_power(musb, false, glue->cable_connected);
 }
 
 static int omap2430_musb_exit(struct musb *musb)
 {
-       del_timer_sync(&musb_idle_timer);
+       struct device *dev = musb->controller;
+       struct omap2430_glue *glue = dev_get_drvdata(dev->parent);
 
        omap2430_low_level_exit(musb);
-       phy_power_off(musb->phy);
        phy_exit(musb->phy);
+       musb->phy = NULL;
+       cancel_work_sync(&glue->omap_musb_mailbox_work);
 
        return 0;
 }
@@ -512,9 +445,6 @@ static const struct musb_platform_ops omap2430_ops = {
        .init           = omap2430_musb_init,
        .exit           = omap2430_musb_exit,
 
-       .set_mode       = omap2430_musb_set_mode,
-       .try_idle       = omap2430_musb_try_idle,
-
        .set_vbus       = omap2430_musb_set_vbus,
 
        .enable         = omap2430_musb_enable,
@@ -639,11 +569,9 @@ static int omap2430_probe(struct platform_device *pdev)
                goto err2;
        }
 
-       /*
-        * Note that we cannot enable PM runtime yet for this
-        * driver as we need struct musb initialized first.
-        * See omap2430_musb_init above.
-        */
+       pm_runtime_enable(glue->dev);
+       pm_runtime_use_autosuspend(glue->dev);
+       pm_runtime_set_autosuspend_delay(glue->dev, 500);
 
        ret = platform_device_add(musb);
        if (ret) {
@@ -662,12 +590,14 @@ err0:
 
 static int omap2430_remove(struct platform_device *pdev)
 {
-       struct omap2430_glue            *glue = platform_get_drvdata(pdev);
+       struct omap2430_glue *glue = platform_get_drvdata(pdev);
+       struct musb *musb = glue_to_musb(glue);
 
        pm_runtime_get_sync(glue->dev);
-       cancel_work_sync(&glue->omap_musb_mailbox_work);
        platform_device_unregister(glue->musb);
+       omap2430_set_power(musb, false, false);
        pm_runtime_put_sync(glue->dev);
+       pm_runtime_dont_use_autosuspend(glue->dev);
        pm_runtime_disable(glue->dev);
 
        return 0;
@@ -680,12 +610,13 @@ static int omap2430_runtime_suspend(struct device *dev)
        struct omap2430_glue            *glue = dev_get_drvdata(dev);
        struct musb                     *musb = glue_to_musb(glue);
 
-       if (musb) {
-               musb->context.otg_interfsel = musb_readl(musb->mregs,
-                               OTG_INTERFSEL);
+       if (!musb)
+               return 0;
 
-               omap2430_low_level_exit(musb);
-       }
+       musb->context.otg_interfsel = musb_readl(musb->mregs,
+                                                OTG_INTERFSEL);
+
+       omap2430_low_level_exit(musb);
 
        return 0;
 }
@@ -696,7 +627,7 @@ static int omap2430_runtime_resume(struct device *dev)
        struct musb                     *musb = glue_to_musb(glue);
 
        if (!musb)
-               return -EPROBE_DEFER;
+               return 0;
 
        omap2430_low_level_init(musb);
        musb_writel(musb->mregs, OTG_INTERFSEL,
@@ -738,18 +669,8 @@ static struct platform_driver omap2430_driver = {
        },
 };
 
+module_platform_driver(omap2430_driver);
+
 MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer");
 MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
 MODULE_LICENSE("GPL v2");
-
-static int __init omap2430_init(void)
-{
-       return platform_driver_register(&omap2430_driver);
-}
-subsys_initcall(omap2430_init);
-
-static void __exit omap2430_exit(void)
-{
-       platform_driver_unregister(&omap2430_driver);
-}
-module_exit(omap2430_exit);
index fdab423..7650051 100644 (file)
@@ -80,7 +80,8 @@ static struct musb *sunxi_musb;
 
 struct sunxi_glue {
        struct device           *dev;
-       struct platform_device  *musb;
+       struct musb             *musb;
+       struct platform_device  *musb_pdev;
        struct clk              *clk;
        struct reset_control    *rst;
        struct phy              *phy;
@@ -102,7 +103,7 @@ static void sunxi_musb_work(struct work_struct *work)
                return;
 
        if (test_and_clear_bit(SUNXI_MUSB_FL_HOSTMODE_PEND, &glue->flags)) {
-               struct musb *musb = platform_get_drvdata(glue->musb);
+               struct musb *musb = glue->musb;
                unsigned long flags;
                u8 devctl;
 
@@ -112,7 +113,7 @@ static void sunxi_musb_work(struct work_struct *work)
                if (test_bit(SUNXI_MUSB_FL_HOSTMODE, &glue->flags)) {
                        set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
                        musb->xceiv->otg->default_a = 1;
-                       musb->xceiv->otg->state = OTG_STATE_A_IDLE;
+                       musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
                        MUSB_HST_MODE(musb);
                        devctl |= MUSB_DEVCTL_SESSION;
                } else {
@@ -145,10 +146,12 @@ static void sunxi_musb_set_vbus(struct musb *musb, int is_on)
 {
        struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
 
-       if (is_on)
+       if (is_on) {
                set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
-       else
+               musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+       } else {
                clear_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+       }
 
        schedule_work(&glue->work);
 }
@@ -264,15 +267,6 @@ static int sunxi_musb_init(struct musb *musb)
        if (ret)
                goto error_unregister_notifier;
 
-       if (musb->port_mode == MUSB_PORT_MODE_HOST) {
-               ret = phy_power_on(glue->phy);
-               if (ret)
-                       goto error_phy_exit;
-               set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
-               /* Stop musb work from turning vbus off again */
-               set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
-       }
-
        musb->isr = sunxi_musb_interrupt;
 
        /* Stop the musb-core from doing runtime pm (not supported on sunxi) */
@@ -280,8 +274,6 @@ static int sunxi_musb_init(struct musb *musb)
 
        return 0;
 
-error_phy_exit:
-       phy_exit(glue->phy);
 error_unregister_notifier:
        if (musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
                extcon_unregister_notifier(glue->extcon, EXTCON_USB_HOST,
@@ -323,10 +315,31 @@ static int sunxi_musb_exit(struct musb *musb)
        return 0;
 }
 
+static int sunxi_set_mode(struct musb *musb, u8 mode)
+{
+       struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
+       int ret;
+
+       if (mode == MUSB_HOST) {
+               ret = phy_power_on(glue->phy);
+               if (ret)
+                       return ret;
+
+               set_bit(SUNXI_MUSB_FL_PHY_ON, &glue->flags);
+               /* Stop musb work from turning vbus off again */
+               set_bit(SUNXI_MUSB_FL_VBUS_ON, &glue->flags);
+               musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE;
+       }
+
+       return 0;
+}
+
 static void sunxi_musb_enable(struct musb *musb)
 {
        struct sunxi_glue *glue = dev_get_drvdata(musb->controller->parent);
 
+       glue->musb = musb;
+
        /* musb_core does not call us in a balanced manner */
        if (test_and_set_bit(SUNXI_MUSB_FL_ENABLED, &glue->flags))
                return;
@@ -569,6 +582,7 @@ static const struct musb_platform_ops sunxi_musb_ops = {
        .exit           = sunxi_musb_exit,
        .enable         = sunxi_musb_enable,
        .disable        = sunxi_musb_disable,
+       .set_mode       = sunxi_set_mode,
        .fifo_offset    = sunxi_musb_fifo_offset,
        .ep_offset      = sunxi_musb_ep_offset,
        .busctl_offset  = sunxi_musb_busctl_offset,
@@ -721,9 +735,9 @@ static int sunxi_musb_probe(struct platform_device *pdev)
        pinfo.data      = &pdata;
        pinfo.size_data = sizeof(pdata);
 
-       glue->musb = platform_device_register_full(&pinfo);
-       if (IS_ERR(glue->musb)) {
-               ret = PTR_ERR(glue->musb);
+       glue->musb_pdev = platform_device_register_full(&pinfo);
+       if (IS_ERR(glue->musb_pdev)) {
+               ret = PTR_ERR(glue->musb_pdev);
                dev_err(&pdev->dev, "Error registering musb dev: %d\n", ret);
                goto err_unregister_usb_phy;
        }
@@ -740,7 +754,7 @@ static int sunxi_musb_remove(struct platform_device *pdev)
        struct sunxi_glue *glue = platform_get_drvdata(pdev);
        struct platform_device *usb_phy = glue->usb_phy;
 
-       platform_device_unregister(glue->musb); /* Frees glue ! */
+       platform_device_unregister(glue->musb_pdev);
        usb_phy_generic_unregister(usb_phy);
 
        return 0;
index 24e2b3c..a72e8d6 100644 (file)
@@ -97,6 +97,9 @@ struct twl6030_usb {
 
        struct regulator                *usb3v3;
 
+       /* used to check initial cable status after probe */
+       struct delayed_work     get_status_work;
+
        /* used to set vbus, in atomic path */
        struct work_struct      set_vbus_work;
 
@@ -227,12 +230,16 @@ static irqreturn_t twl6030_usb_irq(int irq, void *_twl)
                        twl->asleep = 1;
                        status = MUSB_VBUS_VALID;
                        twl->linkstat = status;
-                       musb_mailbox(status);
+                       ret = musb_mailbox(status);
+                       if (ret)
+                               twl->linkstat = MUSB_UNKNOWN;
                } else {
                        if (twl->linkstat != MUSB_UNKNOWN) {
                                status = MUSB_VBUS_OFF;
                                twl->linkstat = status;
-                               musb_mailbox(status);
+                               ret = musb_mailbox(status);
+                               if (ret)
+                                       twl->linkstat = MUSB_UNKNOWN;
                                if (twl->asleep) {
                                        regulator_disable(twl->usb3v3);
                                        twl->asleep = 0;
@@ -264,7 +271,9 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
                twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_SET);
                status = MUSB_ID_GROUND;
                twl->linkstat = status;
-               musb_mailbox(status);
+               ret = musb_mailbox(status);
+               if (ret)
+                       twl->linkstat = MUSB_UNKNOWN;
        } else  {
                twl6030_writeb(twl, TWL_MODULE_USB, 0x10, USB_ID_INT_EN_HI_CLR);
                twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
@@ -274,6 +283,15 @@ static irqreturn_t twl6030_usbotg_irq(int irq, void *_twl)
        return IRQ_HANDLED;
 }
 
+static void twl6030_status_work(struct work_struct *work)
+{
+       struct twl6030_usb *twl = container_of(work, struct twl6030_usb,
+                                              get_status_work.work);
+
+       twl6030_usb_irq(twl->irq2, twl);
+       twl6030_usbotg_irq(twl->irq1, twl);
+}
+
 static int twl6030_enable_irq(struct twl6030_usb *twl)
 {
        twl6030_writeb(twl, TWL_MODULE_USB, 0x1, USB_ID_INT_EN_HI_SET);
@@ -284,8 +302,6 @@ static int twl6030_enable_irq(struct twl6030_usb *twl)
                                REG_INT_MSK_LINE_C);
        twl6030_interrupt_unmask(TWL6030_CHARGER_CTRL_INT_MASK,
                                REG_INT_MSK_STS_C);
-       twl6030_usb_irq(twl->irq2, twl);
-       twl6030_usbotg_irq(twl->irq1, twl);
 
        return 0;
 }
@@ -371,6 +387,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
                dev_warn(&pdev->dev, "could not create sysfs file\n");
 
        INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work);
+       INIT_DELAYED_WORK(&twl->get_status_work, twl6030_status_work);
 
        status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
                        IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING | IRQF_ONESHOT,
@@ -395,6 +412,7 @@ static int twl6030_usb_probe(struct platform_device *pdev)
 
        twl->asleep = 0;
        twl6030_enable_irq(twl);
+       schedule_delayed_work(&twl->get_status_work, HZ);
        dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");
 
        return 0;
@@ -404,6 +422,7 @@ static int twl6030_usb_remove(struct platform_device *pdev)
 {
        struct twl6030_usb *twl = platform_get_drvdata(pdev);
 
+       cancel_delayed_work(&twl->get_status_work);
        twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
                REG_INT_MSK_LINE_C);
        twl6030_interrupt_mask(TWL6030_USBOTG_INT_MASK,
index 2eddbe5..5608af4 100644 (file)
@@ -2007,6 +2007,7 @@ static void mos7720_release(struct usb_serial *serial)
                                    urblist_entry)
                        usb_unlink_urb(urbtrack->urb);
                spin_unlock_irqrestore(&mos_parport->listlock, flags);
+               parport_del_port(mos_parport->pp);
 
                kref_put(&mos_parport->ref_count, destroy_mos_parport);
        }
index 4d49fce..5ef014b 100644 (file)
@@ -836,6 +836,7 @@ static int uas_slave_configure(struct scsi_device *sdev)
        if (devinfo->flags & US_FL_BROKEN_FUA)
                sdev->broken_fua = 1;
 
+       scsi_change_queue_depth(sdev, devinfo->qdepth - 2);
        return 0;
 }
 
@@ -848,7 +849,6 @@ static struct scsi_host_template uas_host_template = {
        .slave_configure = uas_slave_configure,
        .eh_abort_handler = uas_eh_abort_handler,
        .eh_bus_reset_handler = uas_eh_bus_reset_handler,
-       .can_queue = MAX_CMNDS,
        .this_id = -1,
        .sg_tablesize = SG_NONE,
        .skip_settle_delay = 1,
index fca5110..2e0450b 100644 (file)
@@ -941,7 +941,7 @@ static void vhci_stop(struct usb_hcd *hcd)
 
 static int vhci_get_frame_number(struct usb_hcd *hcd)
 {
-       pr_err("Not yet implemented\n");
+       dev_err_ratelimited(&hcd->self.root_hub->dev, "Not yet implemented\n");
        return 0;
 }
 
index b54f26c..b4b3e25 100644 (file)
@@ -746,7 +746,7 @@ config ALIM7101_WDT
 
 config EBC_C384_WDT
        tristate "WinSystems EBC-C384 Watchdog Timer"
-       depends on X86 && ISA
+       depends on X86 && ISA_BUS_API
        select WATCHDOG_CORE
        help
          Enables watchdog timer support for the watchdog timer on the
index d46839f..e4db19e 100644 (file)
@@ -151,8 +151,6 @@ static DECLARE_WAIT_QUEUE_HEAD(balloon_wq);
 static void balloon_process(struct work_struct *work);
 static DECLARE_DELAYED_WORK(balloon_worker, balloon_process);
 
-static void release_memory_resource(struct resource *resource);
-
 /* When ballooning out (allocating memory to return to Xen) we don't really
    want the kernel to try too hard since that can trigger the oom killer. */
 #define GFP_BALLOON \
@@ -248,6 +246,19 @@ static enum bp_state update_schedule(enum bp_state state)
 }
 
 #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG
+static void release_memory_resource(struct resource *resource)
+{
+       if (!resource)
+               return;
+
+       /*
+        * No need to reset region to identity mapped since we now
+        * know that no I/O can be in this region
+        */
+       release_resource(resource);
+       kfree(resource);
+}
+
 static struct resource *additional_memory_resource(phys_addr_t size)
 {
        struct resource *res;
@@ -286,19 +297,6 @@ static struct resource *additional_memory_resource(phys_addr_t size)
        return res;
 }
 
-static void release_memory_resource(struct resource *resource)
-{
-       if (!resource)
-               return;
-
-       /*
-        * No need to reset region to identity mapped since we now
-        * know that no I/O can be in this region
-        */
-       release_resource(resource);
-       kfree(resource);
-}
-
 static enum bp_state reserve_additional_memory(void)
 {
        long credit;
index 8e67336..6a25533 100644 (file)
@@ -183,8 +183,7 @@ int xen_pcibk_config_read(struct pci_dev *dev, int offset, int size,
                field_start = OFFSET(cfg_entry);
                field_end = OFFSET(cfg_entry) + field->size;
 
-               if ((req_start >= field_start && req_start < field_end)
-                   || (req_end > field_start && req_end <= field_end)) {
+                if (req_end > field_start && field_end > req_start) {
                        err = conf_space_read(dev, cfg_entry, field_start,
                                              &tmp_val);
                        if (err)
@@ -230,8 +229,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
                field_start = OFFSET(cfg_entry);
                field_end = OFFSET(cfg_entry) + field->size;
 
-               if ((req_start >= field_start && req_start < field_end)
-                   || (req_end > field_start && req_end <= field_end)) {
+                if (req_end > field_start && field_end > req_start) {
                        tmp_val = 0;
 
                        err = xen_pcibk_config_read(dev, field_start,
index ad3d17d..9ead1c2 100644 (file)
@@ -145,7 +145,7 @@ static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
        /* A write to obtain the length must happen as a 32-bit write.
         * This does not (yet) support writing individual bytes
         */
-       if (value == ~PCI_ROM_ADDRESS_ENABLE)
+       if ((value | ~PCI_ROM_ADDRESS_MASK) == ~0U)
                bar->which = 1;
        else {
                u32 tmpval;
@@ -225,38 +225,42 @@ static inline void read_dev_bar(struct pci_dev *dev,
                           (PCI_BASE_ADDRESS_SPACE_MEMORY |
                                PCI_BASE_ADDRESS_MEM_TYPE_64))) {
                        bar_info->val = res[pos - 1].start >> 32;
-                       bar_info->len_val = res[pos - 1].end >> 32;
+                       bar_info->len_val = -resource_size(&res[pos - 1]) >> 32;
                        return;
                }
        }
 
+       if (!res[pos].flags ||
+           (res[pos].flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET |
+                              IORESOURCE_BUSY)))
+               return;
+
        bar_info->val = res[pos].start |
                        (res[pos].flags & PCI_REGION_FLAG_MASK);
-       bar_info->len_val = resource_size(&res[pos]);
+       bar_info->len_val = -resource_size(&res[pos]) |
+                           (res[pos].flags & PCI_REGION_FLAG_MASK);
 }
 
 static void *bar_init(struct pci_dev *dev, int offset)
 {
-       struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+       struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
 
        if (!bar)
                return ERR_PTR(-ENOMEM);
 
        read_dev_bar(dev, bar, offset, ~0);
-       bar->which = 0;
 
        return bar;
 }
 
 static void *rom_init(struct pci_dev *dev, int offset)
 {
-       struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
+       struct pci_bar_info *bar = kzalloc(sizeof(*bar), GFP_KERNEL);
 
        if (!bar)
                return ERR_PTR(-ENOMEM);
 
        read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
-       bar->which = 0;
 
        return bar;
 }
index f0d268b..a439548 100644 (file)
@@ -70,9 +70,13 @@ struct autofs_info {
 };
 
 #define AUTOFS_INF_EXPIRING    (1<<0) /* dentry in the process of expiring */
-#define AUTOFS_INF_NO_RCU      (1<<1) /* the dentry is being considered
+#define AUTOFS_INF_WANT_EXPIRE (1<<1) /* the dentry is being considered
                                        * for expiry, so RCU_walk is
-                                       * not permitted
+                                       * not permitted.  If it progresses to
+                                       * actual expiry attempt, the flag is
+                                       * not cleared when EXPIRING is set -
+                                       * in that case it gets cleared only
+                                       * when it comes to clearing EXPIRING.
                                        */
 #define AUTOFS_INF_PENDING     (1<<2) /* dentry pending mount */
 
index 9510d8d..b493909 100644 (file)
@@ -316,19 +316,17 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
        if (ino->flags & AUTOFS_INF_PENDING)
                goto out;
        if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
-               ino->flags |= AUTOFS_INF_NO_RCU;
+               ino->flags |= AUTOFS_INF_WANT_EXPIRE;
                spin_unlock(&sbi->fs_lock);
                synchronize_rcu();
                spin_lock(&sbi->fs_lock);
                if (!autofs4_direct_busy(mnt, root, timeout, do_now)) {
                        ino->flags |= AUTOFS_INF_EXPIRING;
-                       smp_mb();
-                       ino->flags &= ~AUTOFS_INF_NO_RCU;
                        init_completion(&ino->expire_complete);
                        spin_unlock(&sbi->fs_lock);
                        return root;
                }
-               ino->flags &= ~AUTOFS_INF_NO_RCU;
+               ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
        }
 out:
        spin_unlock(&sbi->fs_lock);
@@ -446,7 +444,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
        while ((dentry = get_next_positive_subdir(dentry, root))) {
                spin_lock(&sbi->fs_lock);
                ino = autofs4_dentry_ino(dentry);
-               if (ino->flags & AUTOFS_INF_NO_RCU)
+               if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
                        expired = NULL;
                else
                        expired = should_expire(dentry, mnt, timeout, how);
@@ -455,7 +453,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
                        continue;
                }
                ino = autofs4_dentry_ino(expired);
-               ino->flags |= AUTOFS_INF_NO_RCU;
+               ino->flags |= AUTOFS_INF_WANT_EXPIRE;
                spin_unlock(&sbi->fs_lock);
                synchronize_rcu();
                spin_lock(&sbi->fs_lock);
@@ -465,7 +463,7 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
                        goto found;
                }
 
-               ino->flags &= ~AUTOFS_INF_NO_RCU;
+               ino->flags &= ~AUTOFS_INF_WANT_EXPIRE;
                if (expired != dentry)
                        dput(expired);
                spin_unlock(&sbi->fs_lock);
@@ -475,17 +473,8 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb,
 found:
        pr_debug("returning %p %pd\n", expired, expired);
        ino->flags |= AUTOFS_INF_EXPIRING;
-       smp_mb();
-       ino->flags &= ~AUTOFS_INF_NO_RCU;
        init_completion(&ino->expire_complete);
        spin_unlock(&sbi->fs_lock);
-       spin_lock(&sbi->lookup_lock);
-       spin_lock(&expired->d_parent->d_lock);
-       spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
-       list_move(&expired->d_parent->d_subdirs, &expired->d_child);
-       spin_unlock(&expired->d_lock);
-       spin_unlock(&expired->d_parent->d_lock);
-       spin_unlock(&sbi->lookup_lock);
        return expired;
 }
 
@@ -496,7 +485,7 @@ int autofs4_expire_wait(struct dentry *dentry, int rcu_walk)
        int status;
 
        /* Block on any pending expire */
-       if (!(ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU)))
+       if (!(ino->flags & AUTOFS_INF_WANT_EXPIRE))
                return 0;
        if (rcu_walk)
                return -ECHILD;
@@ -554,7 +543,7 @@ int autofs4_expire_run(struct super_block *sb,
        ino = autofs4_dentry_ino(dentry);
        /* avoid rapid-fire expire attempts if expiry fails */
        ino->last_used = now;
-       ino->flags &= ~AUTOFS_INF_EXPIRING;
+       ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
        complete_all(&ino->expire_complete);
        spin_unlock(&sbi->fs_lock);
 
@@ -583,7 +572,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
                spin_lock(&sbi->fs_lock);
                /* avoid rapid-fire expire attempts if expiry fails */
                ino->last_used = now;
-               ino->flags &= ~AUTOFS_INF_EXPIRING;
+               ino->flags &= ~(AUTOFS_INF_EXPIRING|AUTOFS_INF_WANT_EXPIRE);
                complete_all(&ino->expire_complete);
                spin_unlock(&sbi->fs_lock);
                dput(dentry);
index 78bd802..3767f66 100644 (file)
@@ -458,7 +458,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
                 */
                struct inode *inode;
 
-               if (ino->flags & (AUTOFS_INF_EXPIRING | AUTOFS_INF_NO_RCU))
+               if (ino->flags & AUTOFS_INF_WANT_EXPIRE)
                        return 0;
                if (d_mountpoint(dentry))
                        return 0;
index 0146d91..631f155 100644 (file)
@@ -66,11 +66,12 @@ static int autofs4_write(struct autofs_sb_info *sbi,
        set_fs(KERNEL_DS);
 
        mutex_lock(&sbi->pipe_mutex);
-       wr = __vfs_write(file, data, bytes, &file->f_pos);
-       while (bytes && wr) {
+       while (bytes) {
+               wr = __vfs_write(file, data, bytes, &file->f_pos);
+               if (wr <= 0)
+                       break;
                data += wr;
                bytes -= wr;
-               wr = __vfs_write(file, data, bytes, &file->f_pos);
        }
        mutex_unlock(&sbi->pipe_mutex);
 
index b677a6e..7706c8d 100644 (file)
@@ -2645,7 +2645,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
         * This algorithm is recursive because the amount of used stack space
         * is very small and the max recursion depth is limited.
         */
-       indent_add = sprintf(buf, "%c-%llu(%s/%llu/%d)",
+       indent_add = sprintf(buf, "%c-%llu(%s/%llu/%u)",
                             btrfsic_get_block_type(state, block),
                             block->logical_bytenr, block->dev_state->name,
                             block->dev_bytenr, block->mirror_num);
index 4602568..a85cf7d 100644 (file)
@@ -1554,6 +1554,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
                       trans->transid, root->fs_info->generation);
 
        if (!should_cow_block(trans, root, buf)) {
+               trans->dirty = true;
                *cow_ret = buf;
                return 0;
        }
@@ -1785,10 +1786,12 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
                        if (!err) {
                                tmp = (struct btrfs_disk_key *)(kaddr + offset -
                                                        map_start);
-                       } else {
+                       } else if (err == 1) {
                                read_extent_buffer(eb, &unaligned,
                                                   offset, sizeof(unaligned));
                                tmp = &unaligned;
+                       } else {
+                               return err;
                        }
 
                } else {
@@ -2512,6 +2515,8 @@ read_block_for_search(struct btrfs_trans_handle *trans,
                if (!btrfs_buffer_uptodate(tmp, 0, 0))
                        ret = -EIO;
                free_extent_buffer(tmp);
+       } else {
+               ret = PTR_ERR(tmp);
        }
        return ret;
 }
@@ -2775,8 +2780,10 @@ again:
                         * then we don't want to set the path blocking,
                         * so we test it here
                         */
-                       if (!should_cow_block(trans, root, b))
+                       if (!should_cow_block(trans, root, b)) {
+                               trans->dirty = true;
                                goto cow_done;
+                       }
 
                        /*
                         * must have write locks on this node and the
@@ -2825,6 +2832,8 @@ cow_done:
                }
 
                ret = key_search(b, key, level, &prev_cmp, &slot);
+               if (ret < 0)
+                       goto done;
 
                if (level != 0) {
                        int dec = 0;
index 101c3cf..4274a7b 100644 (file)
@@ -2518,7 +2518,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root, unsigned long count);
 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
-                                unsigned long count, int wait);
+                                unsigned long count, u64 transid, int wait);
 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root, u64 bytenr,
index 61561c2..d3aaabb 100644 (file)
@@ -1606,15 +1606,23 @@ int btrfs_inode_delayed_dir_index_count(struct inode *inode)
        return 0;
 }
 
-void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
-                            struct list_head *del_list)
+bool btrfs_readdir_get_delayed_items(struct inode *inode,
+                                    struct list_head *ins_list,
+                                    struct list_head *del_list)
 {
        struct btrfs_delayed_node *delayed_node;
        struct btrfs_delayed_item *item;
 
        delayed_node = btrfs_get_delayed_node(inode);
        if (!delayed_node)
-               return;
+               return false;
+
+       /*
+        * We can only do one readdir with delayed items at a time because of
+        * item->readdir_list.
+        */
+       inode_unlock_shared(inode);
+       inode_lock(inode);
 
        mutex_lock(&delayed_node->mutex);
        item = __btrfs_first_delayed_insertion_item(delayed_node);
@@ -1641,10 +1649,13 @@ void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
         * requeue or dequeue this delayed node.
         */
        atomic_dec(&delayed_node->refs);
+
+       return true;
 }
 
-void btrfs_put_delayed_items(struct list_head *ins_list,
-                            struct list_head *del_list)
+void btrfs_readdir_put_delayed_items(struct inode *inode,
+                                    struct list_head *ins_list,
+                                    struct list_head *del_list)
 {
        struct btrfs_delayed_item *curr, *next;
 
@@ -1659,6 +1670,12 @@ void btrfs_put_delayed_items(struct list_head *ins_list,
                if (atomic_dec_and_test(&curr->refs))
                        kfree(curr);
        }
+
+       /*
+        * The VFS is going to do up_read(), so we need to downgrade back to a
+        * read lock.
+        */
+       downgrade_write(&inode->i_rwsem);
 }
 
 int btrfs_should_delete_dir_index(struct list_head *del_list,
index 0167853..2495b3d 100644 (file)
@@ -137,10 +137,12 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
 void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
 
 /* Used for readdir() */
-void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
-                            struct list_head *del_list);
-void btrfs_put_delayed_items(struct list_head *ins_list,
-                            struct list_head *del_list);
+bool btrfs_readdir_get_delayed_items(struct inode *inode,
+                                    struct list_head *ins_list,
+                                    struct list_head *del_list);
+void btrfs_readdir_put_delayed_items(struct inode *inode,
+                                    struct list_head *ins_list,
+                                    struct list_head *del_list);
 int btrfs_should_delete_dir_index(struct list_head *del_list,
                                  u64 index);
 int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
index 1142127..60ce119 100644 (file)
@@ -1098,7 +1098,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
        struct inode *btree_inode = root->fs_info->btree_inode;
 
        buf = btrfs_find_create_tree_block(root, bytenr);
-       if (!buf)
+       if (IS_ERR(buf))
                return;
        read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
                                 buf, 0, WAIT_NONE, btree_get_extent, 0);
@@ -1114,7 +1114,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
        int ret;
 
        buf = btrfs_find_create_tree_block(root, bytenr);
-       if (!buf)
+       if (IS_ERR(buf))
                return 0;
 
        set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
@@ -1172,8 +1172,8 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
        int ret;
 
        buf = btrfs_find_create_tree_block(root, bytenr);
-       if (!buf)
-               return ERR_PTR(-ENOMEM);
+       if (IS_ERR(buf))
+               return buf;
 
        ret = btree_read_extent_buffer_pages(root, buf, 0, parent_transid);
        if (ret) {
@@ -1806,6 +1806,13 @@ static int cleaner_kthread(void *arg)
                if (btrfs_need_cleaner_sleep(root))
                        goto sleep;
 
+               /*
+                * Do not do anything if we might cause open_ctree() to block
+                * before we have finished mounting the filesystem.
+                */
+               if (!root->fs_info->open)
+                       goto sleep;
+
                if (!mutex_trylock(&root->fs_info->cleaner_mutex))
                        goto sleep;
 
@@ -2520,7 +2527,6 @@ int open_ctree(struct super_block *sb,
        int num_backups_tried = 0;
        int backup_index = 0;
        int max_active;
-       bool cleaner_mutex_locked = false;
 
        tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
        chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL);
@@ -2800,7 +2806,7 @@ int open_ctree(struct super_block *sb,
 
        nodesize = btrfs_super_nodesize(disk_super);
        sectorsize = btrfs_super_sectorsize(disk_super);
-       stripesize = btrfs_super_stripesize(disk_super);
+       stripesize = sectorsize;
        fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
        fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
 
@@ -2999,13 +3005,6 @@ retry_root_backup:
                goto fail_sysfs;
        }
 
-       /*
-        * Hold the cleaner_mutex thread here so that we don't block
-        * for a long time on btrfs_recover_relocation.  cleaner_kthread
-        * will wait for us to finish mounting the filesystem.
-        */
-       mutex_lock(&fs_info->cleaner_mutex);
-       cleaner_mutex_locked = true;
        fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root,
                                               "btrfs-cleaner");
        if (IS_ERR(fs_info->cleaner_kthread))
@@ -3065,8 +3064,10 @@ retry_root_backup:
                ret = btrfs_cleanup_fs_roots(fs_info);
                if (ret)
                        goto fail_qgroup;
-               /* We locked cleaner_mutex before creating cleaner_kthread. */
+
+               mutex_lock(&fs_info->cleaner_mutex);
                ret = btrfs_recover_relocation(tree_root);
+               mutex_unlock(&fs_info->cleaner_mutex);
                if (ret < 0) {
                        btrfs_warn(fs_info, "failed to recover relocation: %d",
                                        ret);
@@ -3074,8 +3075,6 @@ retry_root_backup:
                        goto fail_qgroup;
                }
        }
-       mutex_unlock(&fs_info->cleaner_mutex);
-       cleaner_mutex_locked = false;
 
        location.objectid = BTRFS_FS_TREE_OBJECTID;
        location.type = BTRFS_ROOT_ITEM_KEY;
@@ -3189,10 +3188,6 @@ fail_cleaner:
        filemap_write_and_wait(fs_info->btree_inode->i_mapping);
 
 fail_sysfs:
-       if (cleaner_mutex_locked) {
-               mutex_unlock(&fs_info->cleaner_mutex);
-               cleaner_mutex_locked = false;
-       }
        btrfs_sysfs_remove_mounted(fs_info);
 
 fail_fsdev_sysfs:
@@ -4138,8 +4133,7 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
                       btrfs_super_bytes_used(sb));
                ret = -EINVAL;
        }
-       if (!is_power_of_2(btrfs_super_stripesize(sb)) ||
-           btrfs_super_stripesize(sb) != sectorsize) {
+       if (!is_power_of_2(btrfs_super_stripesize(sb))) {
                btrfs_err(fs_info, "invalid stripesize %u",
                       btrfs_super_stripesize(sb));
                ret = -EINVAL;
index 689d25a..82b912a 100644 (file)
@@ -2835,6 +2835,7 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
 
 struct async_delayed_refs {
        struct btrfs_root *root;
+       u64 transid;
        int count;
        int error;
        int sync;
@@ -2850,6 +2851,10 @@ static void delayed_ref_async_start(struct btrfs_work *work)
 
        async = container_of(work, struct async_delayed_refs, work);
 
+       /* if the commit is already started, we don't need to wait here */
+       if (btrfs_transaction_blocked(async->root->fs_info))
+               goto done;
+
        trans = btrfs_join_transaction(async->root);
        if (IS_ERR(trans)) {
                async->error = PTR_ERR(trans);
@@ -2861,10 +2866,15 @@ static void delayed_ref_async_start(struct btrfs_work *work)
         * wait on delayed refs
         */
        trans->sync = true;
+
+       /* Don't bother flushing if we got into a different transaction */
+       if (trans->transid > async->transid)
+               goto end;
+
        ret = btrfs_run_delayed_refs(trans, async->root, async->count);
        if (ret)
                async->error = ret;
-
+end:
        ret = btrfs_end_transaction(trans, async->root);
        if (ret && !async->error)
                async->error = ret;
@@ -2876,7 +2886,7 @@ done:
 }
 
 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
-                                unsigned long count, int wait)
+                                unsigned long count, u64 transid, int wait)
 {
        struct async_delayed_refs *async;
        int ret;
@@ -2888,6 +2898,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
        async->root = root->fs_info->tree_root;
        async->count = count;
        async->error = 0;
+       async->transid = transid;
        if (wait)
                async->sync = 1;
        else
@@ -8016,8 +8027,9 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
        struct extent_buffer *buf;
 
        buf = btrfs_find_create_tree_block(root, bytenr);
-       if (!buf)
-               return ERR_PTR(-ENOMEM);
+       if (IS_ERR(buf))
+               return buf;
+
        btrfs_set_header_generation(buf, trans->transid);
        btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
        btrfs_tree_lock(buf);
@@ -8044,7 +8056,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
                         buf->start + buf->len - 1, GFP_NOFS);
        }
-       trans->blocks_used++;
+       trans->dirty = true;
        /* this returns a buffer locked for blocking */
        return buf;
 }
@@ -8659,8 +8671,9 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
        next = btrfs_find_tree_block(root->fs_info, bytenr);
        if (!next) {
                next = btrfs_find_create_tree_block(root, bytenr);
-               if (!next)
-                       return -ENOMEM;
+               if (IS_ERR(next))
+                       return PTR_ERR(next);
+
                btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
                                               level - 1);
                reada = 1;
index a3412d6..75533ad 100644 (file)
@@ -4892,18 +4892,25 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
        int uptodate = 1;
        int ret;
 
+       if (!IS_ALIGNED(start, fs_info->tree_root->sectorsize)) {
+               btrfs_err(fs_info, "bad tree block start %llu", start);
+               return ERR_PTR(-EINVAL);
+       }
+
        eb = find_extent_buffer(fs_info, start);
        if (eb)
                return eb;
 
        eb = __alloc_extent_buffer(fs_info, start, len);
        if (!eb)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        for (i = 0; i < num_pages; i++, index++) {
                p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL);
-               if (!p)
+               if (!p) {
+                       exists = ERR_PTR(-ENOMEM);
                        goto free_eb;
+               }
 
                spin_lock(&mapping->private_lock);
                if (PagePrivate(p)) {
@@ -4948,8 +4955,10 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
                set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
 again:
        ret = radix_tree_preload(GFP_NOFS);
-       if (ret)
+       if (ret) {
+               exists = ERR_PTR(ret);
                goto free_eb;
+       }
 
        spin_lock(&fs_info->buffer_lock);
        ret = radix_tree_insert(&fs_info->buffer_radix,
@@ -5333,6 +5342,11 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
        return ret;
 }
 
+/*
+ * return 0 if the item is found within a page.
+ * return 1 if the item spans two pages.
+ * return -EINVAL otherwise.
+ */
 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
                               unsigned long min_len, char **map,
                               unsigned long *map_start,
@@ -5347,7 +5361,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
                PAGE_SHIFT;
 
        if (i != end_i)
-               return -EINVAL;
+               return 1;
 
        if (i == 0) {
                offset = start_offset;
index e0c9bd3..2234e88 100644 (file)
@@ -1534,30 +1534,30 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                reserve_bytes = round_up(write_bytes + sector_offset,
                                root->sectorsize);
 
-               if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
-                                             BTRFS_INODE_PREALLOC)) &&
-                   check_can_nocow(inode, pos, &write_bytes) > 0) {
-                       /*
-                        * For nodata cow case, no need to reserve
-                        * data space.
-                        */
-                       only_release_metadata = true;
-                       /*
-                        * our prealloc extent may be smaller than
-                        * write_bytes, so scale down.
-                        */
-                       num_pages = DIV_ROUND_UP(write_bytes + offset,
-                                                PAGE_SIZE);
-                       reserve_bytes = round_up(write_bytes + sector_offset,
-                                       root->sectorsize);
-                       goto reserve_metadata;
-               }
-
                ret = btrfs_check_data_free_space(inode, pos, write_bytes);
-               if (ret < 0)
-                       break;
+               if (ret < 0) {
+                       if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
+                                                     BTRFS_INODE_PREALLOC)) &&
+                           check_can_nocow(inode, pos, &write_bytes) > 0) {
+                               /*
+                                * For nodata cow case, no need to reserve
+                                * data space.
+                                */
+                               only_release_metadata = true;
+                               /*
+                                * our prealloc extent may be smaller than
+                                * write_bytes, so scale down.
+                                */
+                               num_pages = DIV_ROUND_UP(write_bytes + offset,
+                                                        PAGE_SIZE);
+                               reserve_bytes = round_up(write_bytes +
+                                                        sector_offset,
+                                                        root->sectorsize);
+                       } else {
+                               break;
+                       }
+               }
 
-reserve_metadata:
                ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
                if (ret) {
                        if (!only_release_metadata)
index 8b1212e..4421954 100644 (file)
@@ -3271,7 +3271,16 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
        /* grab metadata reservation from transaction handle */
        if (reserve) {
                ret = btrfs_orphan_reserve_metadata(trans, inode);
-               BUG_ON(ret); /* -ENOSPC in reservation; Logic error? JDM */
+               ASSERT(!ret);
+               if (ret) {
+                       atomic_dec(&root->orphan_inodes);
+                       clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
+                                 &BTRFS_I(inode)->runtime_flags);
+                       if (insert)
+                               clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
+                                         &BTRFS_I(inode)->runtime_flags);
+                       return ret;
+               }
        }
 
        /* insert an orphan item to track this unlinked/truncated file */
@@ -4549,6 +4558,7 @@ delete:
                        BUG_ON(ret);
                        if (btrfs_should_throttle_delayed_refs(trans, root))
                                btrfs_async_run_delayed_refs(root,
+                                                            trans->transid,
                                        trans->delayed_ref_updates * 2, 0);
                        if (be_nice) {
                                if (truncate_space_check(trans, root,
@@ -5748,6 +5758,7 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
        int name_len;
        int is_curr = 0;        /* ctx->pos points to the current index? */
        bool emitted;
+       bool put = false;
 
        /* FIXME, use a real flag for deciding about the key type */
        if (root->fs_info->tree_root == root)
@@ -5765,7 +5776,8 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx)
        if (key_type == BTRFS_DIR_INDEX_KEY) {
                INIT_LIST_HEAD(&ins_list);
                INIT_LIST_HEAD(&del_list);
-               btrfs_get_delayed_items(inode, &ins_list, &del_list);
+               put = btrfs_readdir_get_delayed_items(inode, &ins_list,
+                                                     &del_list);
        }
 
        key.type = key_type;
@@ -5912,8 +5924,8 @@ next:
 nopos:
        ret = 0;
 err:
-       if (key_type == BTRFS_DIR_INDEX_KEY)
-               btrfs_put_delayed_items(&ins_list, &del_list);
+       if (put)
+               btrfs_readdir_put_delayed_items(inode, &ins_list, &del_list);
        btrfs_free_path(path);
        return ret;
 }
@@ -10525,7 +10537,7 @@ static const struct inode_operations btrfs_dir_ro_inode_operations = {
 static const struct file_operations btrfs_dir_file_operations = {
        .llseek         = generic_file_llseek,
        .read           = generic_read_dir,
-       .iterate        = btrfs_real_readdir,
+       .iterate_shared = btrfs_real_readdir,
        .unlocked_ioctl = btrfs_ioctl,
 #ifdef CONFIG_COMPAT
        .compat_ioctl   = btrfs_compat_ioctl,
index e96634a..aca8264 100644 (file)
@@ -968,6 +968,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
        struct rb_node *prev = NULL;
        struct btrfs_ordered_extent *test;
        int ret = 1;
+       u64 orig_offset = offset;
 
        spin_lock_irq(&tree->lock);
        if (ordered) {
@@ -983,7 +984,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
 
        /* truncate file */
        if (disk_i_size > i_size) {
-               BTRFS_I(inode)->disk_i_size = i_size;
+               BTRFS_I(inode)->disk_i_size = orig_offset;
                ret = 0;
                goto out;
        }
index 4339b66..60e7179 100644 (file)
@@ -235,7 +235,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
        trans->aborted = errno;
        /* Nothing used. The other threads that have joined this
         * transaction may be able to continue. */
-       if (!trans->blocks_used && list_empty(&trans->new_bgs)) {
+       if (!trans->dirty && list_empty(&trans->new_bgs)) {
                const char *errstr;
 
                errstr = btrfs_decode_error(errno);
@@ -1807,6 +1807,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                        }
                }
                sb->s_flags &= ~MS_RDONLY;
+
+               fs_info->open = 1;
        }
 out:
        wake_up_process(fs_info->transaction_kthread);
index 10eb249..02223f3 100644 (file)
@@ -68,7 +68,7 @@ int btrfs_init_test_fs(void)
        if (IS_ERR(test_mnt)) {
                printk(KERN_ERR "btrfs: cannot mount test file system\n");
                unregister_filesystem(&test_type);
-               return ret;
+               return PTR_ERR(test_mnt);
        }
        return 0;
 }
index f6e24cb..948aa18 100644 (file)
@@ -818,6 +818,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 {
        struct btrfs_transaction *cur_trans = trans->transaction;
        struct btrfs_fs_info *info = root->fs_info;
+       u64 transid = trans->transid;
        unsigned long cur = trans->delayed_ref_updates;
        int lock = (trans->type != TRANS_JOIN_NOLOCK);
        int err = 0;
@@ -905,7 +906,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 
        kmem_cache_free(btrfs_trans_handle_cachep, trans);
        if (must_run_delayed_refs) {
-               btrfs_async_run_delayed_refs(root, cur,
+               btrfs_async_run_delayed_refs(root, cur, transid,
                                             must_run_delayed_refs == 1);
        }
        return err;
@@ -1311,11 +1312,6 @@ int btrfs_defrag_root(struct btrfs_root *root)
        return ret;
 }
 
-/* Bisesctability fixup, remove in 4.8 */
-#ifndef btrfs_std_error
-#define btrfs_std_error btrfs_handle_fs_error
-#endif
-
 /*
  * Do all special snapshot related qgroup dirty hack.
  *
@@ -1385,7 +1381,7 @@ static int qgroup_account_snapshot(struct btrfs_trans_handle *trans,
        switch_commit_roots(trans->transaction, fs_info);
        ret = btrfs_write_and_wait_transaction(trans, src);
        if (ret)
-               btrfs_std_error(fs_info, ret,
+               btrfs_handle_fs_error(fs_info, ret,
                        "Error while writing out transaction for qgroup");
 
 out:
index 9fe0ec2..c5abee4 100644 (file)
@@ -110,7 +110,6 @@ struct btrfs_trans_handle {
        u64 chunk_bytes_reserved;
        unsigned long use_count;
        unsigned long blocks_reserved;
-       unsigned long blocks_used;
        unsigned long delayed_ref_updates;
        struct btrfs_transaction *transaction;
        struct btrfs_block_rsv *block_rsv;
@@ -121,6 +120,7 @@ struct btrfs_trans_handle {
        bool can_flush_pending_bgs;
        bool reloc_reserved;
        bool sync;
+       bool dirty;
        unsigned int type;
        /*
         * this root is only needed to validate that the root passed to
index b7665af..c05f69a 100644 (file)
@@ -2422,8 +2422,8 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
                root_owner = btrfs_header_owner(parent);
 
                next = btrfs_find_create_tree_block(root, bytenr);
-               if (!next)
-                       return -ENOMEM;
+               if (IS_ERR(next))
+                       return PTR_ERR(next);
 
                if (*level == 1) {
                        ret = wc->process_func(root, next, wc, ptr_gen);
index 548faaa..589f128 100644 (file)
@@ -4694,12 +4694,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 
        if (type & BTRFS_BLOCK_GROUP_RAID5) {
                raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
-                                btrfs_super_stripesize(info->super_copy));
+                                               extent_root->stripesize);
                data_stripes = num_stripes - 1;
        }
        if (type & BTRFS_BLOCK_GROUP_RAID6) {
                raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
-                                btrfs_super_stripesize(info->super_copy));
+                                               extent_root->stripesize);
                data_stripes = num_stripes - 2;
        }
 
@@ -6607,8 +6607,8 @@ int btrfs_read_sys_array(struct btrfs_root *root)
         * overallocate but we can keep it as-is, only the first page is used.
         */
        sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
-       if (!sb)
-               return -ENOMEM;
+       if (IS_ERR(sb))
+               return PTR_ERR(sb);
        set_extent_buffer_uptodate(sb);
        btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
        /*
index 817c243..d6847d7 100644 (file)
@@ -507,6 +507,44 @@ void d_drop(struct dentry *dentry)
 }
 EXPORT_SYMBOL(d_drop);
 
+static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
+{
+       struct dentry *next;
+       /*
+        * Inform d_walk() and shrink_dentry_list() that we are no longer
+        * attached to the dentry tree
+        */
+       dentry->d_flags |= DCACHE_DENTRY_KILLED;
+       if (unlikely(list_empty(&dentry->d_child)))
+               return;
+       __list_del_entry(&dentry->d_child);
+       /*
+        * Cursors can move around the list of children.  While we'd been
+        * a normal list member, it didn't matter - ->d_child.next would've
+        * been updated.  However, from now on it won't be and for the
+        * things like d_walk() it might end up with a nasty surprise.
+        * Normally d_walk() doesn't care about cursors moving around -
+        * ->d_lock on parent prevents that and since a cursor has no children
+        * of its own, we get through it without ever unlocking the parent.
+        * There is one exception, though - if we ascend from a child that
+        * gets killed as soon as we unlock it, the next sibling is found
+        * using the value left in its ->d_child.next.  And if _that_
+        * pointed to a cursor, and cursor got moved (e.g. by lseek())
+        * before d_walk() regains parent->d_lock, we'll end up skipping
+        * everything the cursor had been moved past.
+        *
+        * Solution: make sure that the pointer left behind in ->d_child.next
+        * points to something that won't be moving around.  I.e. skip the
+        * cursors.
+        */
+       while (dentry->d_child.next != &parent->d_subdirs) {
+               next = list_entry(dentry->d_child.next, struct dentry, d_child);
+               if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
+                       break;
+               dentry->d_child.next = next->d_child.next;
+       }
+}
+
 static void __dentry_kill(struct dentry *dentry)
 {
        struct dentry *parent = NULL;
@@ -532,12 +570,7 @@ static void __dentry_kill(struct dentry *dentry)
        }
        /* if it was on the hash then remove it */
        __d_drop(dentry);
-       __list_del_entry(&dentry->d_child);
-       /*
-        * Inform d_walk() that we are no longer attached to the
-        * dentry tree
-        */
-       dentry->d_flags |= DCACHE_DENTRY_KILLED;
+       dentry_unlist(dentry, parent);
        if (parent)
                spin_unlock(&parent->d_lock);
        dentry_iput(dentry);
@@ -1203,6 +1236,9 @@ resume:
                struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
                next = tmp->next;
 
+               if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
+                       continue;
+
                spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
 
                ret = enter(data, dentry);
@@ -1651,6 +1687,16 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
 }
 EXPORT_SYMBOL(d_alloc);
 
+struct dentry *d_alloc_cursor(struct dentry * parent)
+{
+       struct dentry *dentry = __d_alloc(parent->d_sb, NULL);
+       if (dentry) {
+               dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
+               dentry->d_parent = dget(parent);
+       }
+       return dentry;
+}
+
 /**
  * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
  * @sb: the superblock
@@ -2457,7 +2503,6 @@ retry:
                rcu_read_unlock();
                goto retry;
        }
-       rcu_read_unlock();
        /*
         * No changes for the parent since the beginning of d_lookup().
         * Since all removals from the chain happen with hlist_bl_lock(),
@@ -2470,8 +2515,6 @@ retry:
                        continue;
                if (dentry->d_parent != parent)
                        continue;
-               if (d_unhashed(dentry))
-                       continue;
                if (parent->d_flags & DCACHE_OP_COMPARE) {
                        int tlen = dentry->d_name.len;
                        const char *tname = dentry->d_name.name;
@@ -2483,9 +2526,18 @@ retry:
                        if (dentry_cmp(dentry, str, len))
                                continue;
                }
-               dget(dentry);
                hlist_bl_unlock(b);
-               /* somebody is doing lookup for it right now; wait for it */
+               /* now we can try to grab a reference */
+               if (!lockref_get_not_dead(&dentry->d_lockref)) {
+                       rcu_read_unlock();
+                       goto retry;
+               }
+
+               rcu_read_unlock();
+               /*
+                * somebody is likely to be still doing lookup for it;
+                * wait for them to finish
+                */
                spin_lock(&dentry->d_lock);
                d_wait_lookup(dentry);
                /*
@@ -2516,6 +2568,7 @@ retry:
                dput(new);
                return dentry;
        }
+       rcu_read_unlock();
        /* we can't take ->d_lock here; it's OK, though. */
        new->d_flags |= DCACHE_PAR_LOOKUP;
        new->d_wait = wq;
index 9c1c9a0..592059f 100644 (file)
@@ -127,7 +127,6 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
                r = real_fops->open(inode, filp);
 
 out:
-       fops_put(real_fops);
        debugfs_use_file_finish(srcu_idx);
        return r;
 }
@@ -262,8 +261,10 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
 
        if (real_fops->open) {
                r = real_fops->open(inode, filp);
-
-               if (filp->f_op != proxy_fops) {
+               if (r) {
+                       replace_fops(filp, d_inode(dentry)->i_fop);
+                       goto free_proxy;
+               } else if (filp->f_op != proxy_fops) {
                        /* No protection against file removal anymore. */
                        WARN(1, "debugfs file owner replaced proxy fops: %pd",
                                dentry);
index b71deee..f57ced5 100644 (file)
@@ -130,6 +130,7 @@ extern int invalidate_inodes(struct super_block *, bool);
 extern struct dentry *__d_alloc(struct super_block *, const struct qstr *);
 extern int d_set_mounted(struct dentry *dentry);
 extern long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc);
+extern struct dentry *d_alloc_cursor(struct dentry *);
 
 /*
  * read_write.c
index b31852f..e3ca4b4 100644 (file)
@@ -2329,18 +2329,10 @@ void *jbd2_alloc(size_t size, gfp_t flags)
 
        BUG_ON(size & (size-1)); /* Must be a power of 2 */
 
-       flags |= __GFP_REPEAT;
-       if (size == PAGE_SIZE)
-               ptr = (void *)__get_free_pages(flags, 0);
-       else if (size > PAGE_SIZE) {
-               int order = get_order(size);
-
-               if (order < 3)
-                       ptr = (void *)__get_free_pages(flags, order);
-               else
-                       ptr = vmalloc(size);
-       } else
+       if (size < PAGE_SIZE)
                ptr = kmem_cache_alloc(get_slab(size), flags);
+       else
+               ptr = (void *)__get_free_pages(flags, get_order(size));
 
        /* Check alignment; SLUB has gotten this wrong in the past,
         * and this can lead to user data corruption! */
@@ -2351,20 +2343,10 @@ void *jbd2_alloc(size_t size, gfp_t flags)
 
 void jbd2_free(void *ptr, size_t size)
 {
-       if (size == PAGE_SIZE) {
-               free_pages((unsigned long)ptr, 0);
-               return;
-       }
-       if (size > PAGE_SIZE) {
-               int order = get_order(size);
-
-               if (order < 3)
-                       free_pages((unsigned long)ptr, order);
-               else
-                       vfree(ptr);
-               return;
-       }
-       kmem_cache_free(get_slab(size), ptr);
+       if (size < PAGE_SIZE)
+               kmem_cache_free(get_slab(size), ptr);
+       else
+               free_pages((unsigned long)ptr, get_order(size));
 };
 
 /*
index 3db2721..cedeacb 100644 (file)
@@ -71,9 +71,7 @@ EXPORT_SYMBOL(simple_lookup);
 
 int dcache_dir_open(struct inode *inode, struct file *file)
 {
-       static struct qstr cursor_name = QSTR_INIT(".", 1);
-
-       file->private_data = d_alloc(file->f_path.dentry, &cursor_name);
+       file->private_data = d_alloc_cursor(file->f_path.dentry);
 
        return file->private_data ? 0 : -ENOMEM;
 }
index a7ec92c..783004a 100644 (file)
@@ -3247,6 +3247,10 @@ static bool fs_fully_visible(struct file_system_type *type, int *new_mnt_flags)
                if (mnt->mnt.mnt_sb->s_iflags & SB_I_NOEXEC)
                        mnt_flags &= ~(MNT_LOCK_NOSUID | MNT_LOCK_NOEXEC);
 
+               /* Don't miss readonly hidden in the superblock flags */
+               if (mnt->mnt.mnt_sb->s_flags & MS_RDONLY)
+                       mnt_flags |= MNT_LOCK_READONLY;
+
                /* Verify the mount flags are equal to or more permissive
                 * than the proposed new mount.
                 */
index e55b524..31f3df1 100644 (file)
@@ -290,7 +290,7 @@ out_free_buf:
        return error;
 }
 
-#define NFSD_MDS_PR_KEY                0x0100000000000000
+#define NFSD_MDS_PR_KEY                0x0100000000000000ULL
 
 /*
  * We use the client ID as a unique key for the reservations.
index 1580ea6..d08cd88 100644 (file)
@@ -104,22 +104,21 @@ static __be32 nfsacld_proc_setacl(struct svc_rqst * rqstp,
                goto out;
 
        inode = d_inode(fh->fh_dentry);
-       if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
-               error = -EOPNOTSUPP;
-               goto out_errno;
-       }
 
        error = fh_want_write(fh);
        if (error)
                goto out_errno;
 
-       error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
+       fh_lock(fh);
+
+       error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
        if (error)
-               goto out_drop_write;
-       error = inode->i_op->set_acl(inode, argp->acl_default,
-                                    ACL_TYPE_DEFAULT);
+               goto out_drop_lock;
+       error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
        if (error)
-               goto out_drop_write;
+               goto out_drop_lock;
+
+       fh_unlock(fh);
 
        fh_drop_write(fh);
 
@@ -131,7 +130,8 @@ out:
        posix_acl_release(argp->acl_access);
        posix_acl_release(argp->acl_default);
        return nfserr;
-out_drop_write:
+out_drop_lock:
+       fh_unlock(fh);
        fh_drop_write(fh);
 out_errno:
        nfserr = nfserrno(error);
index 01df4cd..0c89034 100644 (file)
@@ -95,22 +95,20 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst * rqstp,
                goto out;
 
        inode = d_inode(fh->fh_dentry);
-       if (!IS_POSIXACL(inode) || !inode->i_op->set_acl) {
-               error = -EOPNOTSUPP;
-               goto out_errno;
-       }
 
        error = fh_want_write(fh);
        if (error)
                goto out_errno;
 
-       error = inode->i_op->set_acl(inode, argp->acl_access, ACL_TYPE_ACCESS);
+       fh_lock(fh);
+
+       error = set_posix_acl(inode, ACL_TYPE_ACCESS, argp->acl_access);
        if (error)
-               goto out_drop_write;
-       error = inode->i_op->set_acl(inode, argp->acl_default,
-                                    ACL_TYPE_DEFAULT);
+               goto out_drop_lock;
+       error = set_posix_acl(inode, ACL_TYPE_DEFAULT, argp->acl_default);
 
-out_drop_write:
+out_drop_lock:
+       fh_unlock(fh);
        fh_drop_write(fh);
 out_errno:
        nfserr = nfserrno(error);
index 6adabd6..71292a0 100644 (file)
@@ -770,9 +770,6 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
        dentry = fhp->fh_dentry;
        inode = d_inode(dentry);
 
-       if (!inode->i_op->set_acl || !IS_POSIXACL(inode))
-               return nfserr_attrnotsupp;
-
        if (S_ISDIR(inode->i_mode))
                flags = NFS4_ACL_DIR;
 
@@ -782,16 +779,19 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
        if (host_error < 0)
                goto out_nfserr;
 
-       host_error = inode->i_op->set_acl(inode, pacl, ACL_TYPE_ACCESS);
+       fh_lock(fhp);
+
+       host_error = set_posix_acl(inode, ACL_TYPE_ACCESS, pacl);
        if (host_error < 0)
-               goto out_release;
+               goto out_drop_lock;
 
        if (S_ISDIR(inode->i_mode)) {
-               host_error = inode->i_op->set_acl(inode, dpacl,
-                                                 ACL_TYPE_DEFAULT);
+               host_error = set_posix_acl(inode, ACL_TYPE_DEFAULT, dpacl);
        }
 
-out_release:
+out_drop_lock:
+       fh_unlock(fhp);
+
        posix_acl_release(pacl);
        posix_acl_release(dpacl);
 out_nfserr:
index 7389cb1..04c68d9 100644 (file)
@@ -710,22 +710,6 @@ static struct rpc_cred *get_backchannel_cred(struct nfs4_client *clp, struct rpc
        }
 }
 
-static struct rpc_clnt *create_backchannel_client(struct rpc_create_args *args)
-{
-       struct rpc_xprt *xprt;
-
-       if (args->protocol != XPRT_TRANSPORT_BC_TCP)
-               return rpc_create(args);
-
-       xprt = args->bc_xprt->xpt_bc_xprt;
-       if (xprt) {
-               xprt_get(xprt);
-               return rpc_create_xprt(args, xprt);
-       }
-
-       return rpc_create(args);
-}
-
 static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *conn, struct nfsd4_session *ses)
 {
        int maxtime = max_cb_time(clp->net);
@@ -768,7 +752,7 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
                args.authflavor = ses->se_cb_sec.flavor;
        }
        /* Create RPC client */
-       client = create_backchannel_client(&args);
+       client = rpc_create(&args);
        if (IS_ERR(client)) {
                dprintk("NFSD: couldn't create callback client: %ld\n",
                        PTR_ERR(client));
index f5f82e1..70d0b9b 100644 (file)
@@ -3480,12 +3480,17 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
 }
 
 static struct nfs4_ol_stateid *
-init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
-               struct nfsd4_open *open)
+init_open_stateid(struct nfs4_file *fp, struct nfsd4_open *open)
 {
 
        struct nfs4_openowner *oo = open->op_openowner;
        struct nfs4_ol_stateid *retstp = NULL;
+       struct nfs4_ol_stateid *stp;
+
+       stp = open->op_stp;
+       /* We are moving these outside of the spinlocks to avoid the warnings */
+       mutex_init(&stp->st_mutex);
+       mutex_lock(&stp->st_mutex);
 
        spin_lock(&oo->oo_owner.so_client->cl_lock);
        spin_lock(&fp->fi_lock);
@@ -3493,6 +3498,8 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
        retstp = nfsd4_find_existing_open(fp, open);
        if (retstp)
                goto out_unlock;
+
+       open->op_stp = NULL;
        atomic_inc(&stp->st_stid.sc_count);
        stp->st_stid.sc_type = NFS4_OPEN_STID;
        INIT_LIST_HEAD(&stp->st_locks);
@@ -3502,14 +3509,19 @@ init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = 0;
        stp->st_openstp = NULL;
-       init_rwsem(&stp->st_rwsem);
        list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
        list_add(&stp->st_perfile, &fp->fi_stateids);
 
 out_unlock:
        spin_unlock(&fp->fi_lock);
        spin_unlock(&oo->oo_owner.so_client->cl_lock);
-       return retstp;
+       if (retstp) {
+               mutex_lock(&retstp->st_mutex);
+               /* To keep mutex tracking happy */
+               mutex_unlock(&stp->st_mutex);
+               stp = retstp;
+       }
+       return stp;
 }
 
 /*
@@ -4305,7 +4317,6 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
        struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
        struct nfs4_file *fp = NULL;
        struct nfs4_ol_stateid *stp = NULL;
-       struct nfs4_ol_stateid *swapstp = NULL;
        struct nfs4_delegation *dp = NULL;
        __be32 status;
 
@@ -4335,32 +4346,28 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
         */
        if (stp) {
                /* Stateid was found, this is an OPEN upgrade */
-               down_read(&stp->st_rwsem);
+               mutex_lock(&stp->st_mutex);
                status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
                if (status) {
-                       up_read(&stp->st_rwsem);
+                       mutex_unlock(&stp->st_mutex);
                        goto out;
                }
        } else {
-               stp = open->op_stp;
-               open->op_stp = NULL;
-               swapstp = init_open_stateid(stp, fp, open);
-               if (swapstp) {
-                       nfs4_put_stid(&stp->st_stid);
-                       stp = swapstp;
-                       down_read(&stp->st_rwsem);
+               /* stp is returned locked. */
+               stp = init_open_stateid(fp, open);
+               /* See if we lost the race to some other thread */
+               if (stp->st_access_bmap != 0) {
                        status = nfs4_upgrade_open(rqstp, fp, current_fh,
                                                stp, open);
                        if (status) {
-                               up_read(&stp->st_rwsem);
+                               mutex_unlock(&stp->st_mutex);
                                goto out;
                        }
                        goto upgrade_out;
                }
-               down_read(&stp->st_rwsem);
                status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
                if (status) {
-                       up_read(&stp->st_rwsem);
+                       mutex_unlock(&stp->st_mutex);
                        release_open_stateid(stp);
                        goto out;
                }
@@ -4372,7 +4379,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
        }
 upgrade_out:
        nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
-       up_read(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
 
        if (nfsd4_has_session(&resp->cstate)) {
                if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
@@ -4977,12 +4984,12 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
                 * revoked delegations are kept only for free_stateid.
                 */
                return nfserr_bad_stateid;
-       down_write(&stp->st_rwsem);
+       mutex_lock(&stp->st_mutex);
        status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
        if (status == nfs_ok)
                status = nfs4_check_fh(current_fh, &stp->st_stid);
        if (status != nfs_ok)
-               up_write(&stp->st_rwsem);
+               mutex_unlock(&stp->st_mutex);
        return status;
 }
 
@@ -5030,7 +5037,7 @@ static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cs
                return status;
        oo = openowner(stp->st_stateowner);
        if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
-               up_write(&stp->st_rwsem);
+               mutex_unlock(&stp->st_mutex);
                nfs4_put_stid(&stp->st_stid);
                return nfserr_bad_stateid;
        }
@@ -5062,12 +5069,12 @@ nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        oo = openowner(stp->st_stateowner);
        status = nfserr_bad_stateid;
        if (oo->oo_flags & NFS4_OO_CONFIRMED) {
-               up_write(&stp->st_rwsem);
+               mutex_unlock(&stp->st_mutex);
                goto put_stateid;
        }
        oo->oo_flags |= NFS4_OO_CONFIRMED;
        nfs4_inc_and_copy_stateid(&oc->oc_resp_stateid, &stp->st_stid);
-       up_write(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
        dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
                __func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
 
@@ -5143,7 +5150,7 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
        nfs4_inc_and_copy_stateid(&od->od_stateid, &stp->st_stid);
        status = nfs_ok;
 put_stateid:
-       up_write(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
        nfs4_put_stid(&stp->st_stid);
 out:
        nfsd4_bump_seqid(cstate, status);
@@ -5196,7 +5203,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        if (status)
                goto out; 
        nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
-       up_write(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
 
        nfsd4_close_open_stateid(stp);
 
@@ -5422,7 +5429,7 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
        stp->st_access_bmap = 0;
        stp->st_deny_bmap = open_stp->st_deny_bmap;
        stp->st_openstp = open_stp;
-       init_rwsem(&stp->st_rwsem);
+       mutex_init(&stp->st_mutex);
        list_add(&stp->st_locks, &open_stp->st_locks);
        list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
        spin_lock(&fp->fi_lock);
@@ -5591,7 +5598,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                                        &open_stp, nn);
                if (status)
                        goto out;
-               up_write(&open_stp->st_rwsem);
+               mutex_unlock(&open_stp->st_mutex);
                open_sop = openowner(open_stp->st_stateowner);
                status = nfserr_bad_stateid;
                if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
@@ -5600,7 +5607,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
                status = lookup_or_create_lock_state(cstate, open_stp, lock,
                                                        &lock_stp, &new);
                if (status == nfs_ok)
-                       down_write(&lock_stp->st_rwsem);
+                       mutex_lock(&lock_stp->st_mutex);
        } else {
                status = nfs4_preprocess_seqid_op(cstate,
                                       lock->lk_old_lock_seqid,
@@ -5704,7 +5711,7 @@ out:
                    seqid_mutating_err(ntohl(status)))
                        lock_sop->lo_owner.so_seqid++;
 
-               up_write(&lock_stp->st_rwsem);
+               mutex_unlock(&lock_stp->st_mutex);
 
                /*
                 * If this is a new, never-before-used stateid, and we are
@@ -5874,7 +5881,7 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 fput:
        fput(filp);
 put_stateid:
-       up_write(&stp->st_rwsem);
+       mutex_unlock(&stp->st_mutex);
        nfs4_put_stid(&stp->st_stid);
 out:
        nfsd4_bump_seqid(cstate, status);
index 986e51e..64053ea 100644 (file)
@@ -535,7 +535,7 @@ struct nfs4_ol_stateid {
        unsigned char                   st_access_bmap;
        unsigned char                   st_deny_bmap;
        struct nfs4_ol_stateid          *st_openstp;
-       struct rw_semaphore             st_rwsem;
+       struct mutex                    st_mutex;
 };
 
 static inline struct nfs4_ol_stateid *openlockstateid(struct nfs4_stid *s)
index 809bd2d..e9fd241 100644 (file)
@@ -439,7 +439,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
        if (!sbp || le16_to_cpu(sbp->s_magic) != NILFS_SUPER_MAGIC)
                return 0;
        bytes = le16_to_cpu(sbp->s_bytes);
-       if (bytes > BLOCK_SIZE)
+       if (bytes < sumoff + 4 || bytes > BLOCK_SIZE)
                return 0;
        crc = crc32_le(le32_to_cpu(sbp->s_crc_seed), (unsigned char *)sbp,
                       sumoff);
index e27e652..4342c7e 100644 (file)
@@ -1,7 +1,5 @@
 ccflags-y := -Ifs/ocfs2
 
-ccflags-y += -DCATCH_BH_JBD_RACES
-
 obj-$(CONFIG_OCFS2_FS) +=      \
        ocfs2.o                 \
        ocfs2_stackglue.o
index fe50ded..498641e 100644 (file)
@@ -139,11 +139,16 @@ int ocfs2_read_blocks_sync(struct ocfs2_super *osb, u64 block,
 
                lock_buffer(bh);
                if (buffer_jbd(bh)) {
+#ifdef CATCH_BH_JBD_RACES
                        mlog(ML_ERROR,
                             "block %llu had the JBD bit set "
                             "while I was in lock_buffer!",
                             (unsigned long long)bh->b_blocknr);
                        BUG();
+#else
+                       unlock_buffer(bh);
+                       continue;
+#endif
                }
 
                clear_buffer_uptodate(bh);
index 22f0253..c2a6b08 100644 (file)
@@ -405,12 +405,21 @@ static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev,
                err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
        } else {
                const struct cred *old_cred;
+               struct cred *override_cred;
 
                old_cred = ovl_override_creds(dentry->d_sb);
 
-               err = ovl_create_over_whiteout(dentry, inode, &stat, link,
-                                              hardlink);
+               err = -ENOMEM;
+               override_cred = prepare_creds();
+               if (override_cred) {
+                       override_cred->fsuid = old_cred->fsuid;
+                       override_cred->fsgid = old_cred->fsgid;
+                       put_cred(override_creds(override_cred));
+                       put_cred(override_cred);
 
+                       err = ovl_create_over_whiteout(dentry, inode, &stat,
+                                                      link, hardlink);
+               }
                revert_creds(old_cred);
        }
 
index 0ed7c40..1dbeab6 100644 (file)
@@ -238,41 +238,27 @@ out:
        return err;
 }
 
-static bool ovl_need_xattr_filter(struct dentry *dentry,
-                                 enum ovl_path_type type)
-{
-       if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER)
-               return S_ISDIR(dentry->d_inode->i_mode);
-       else
-               return false;
-}
-
 ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode,
                     const char *name, void *value, size_t size)
 {
-       struct path realpath;
-       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
+       struct dentry *realdentry = ovl_dentry_real(dentry);
 
-       if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
+       if (ovl_is_private_xattr(name))
                return -ENODATA;
 
-       return vfs_getxattr(realpath.dentry, name, value, size);
+       return vfs_getxattr(realdentry, name, value, size);
 }
 
 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
 {
-       struct path realpath;
-       enum ovl_path_type type = ovl_path_real(dentry, &realpath);
+       struct dentry *realdentry = ovl_dentry_real(dentry);
        ssize_t res;
        int off;
 
-       res = vfs_listxattr(realpath.dentry, list, size);
+       res = vfs_listxattr(realdentry, list, size);
        if (res <= 0 || size == 0)
                return res;
 
-       if (!ovl_need_xattr_filter(dentry, type))
-               return res;
-
        /* filter out private xattrs */
        for (off = 0; off < res;) {
                char *s = list + off;
@@ -302,7 +288,7 @@ int ovl_removexattr(struct dentry *dentry, const char *name)
                goto out;
 
        err = -ENODATA;
-       if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
+       if (ovl_is_private_xattr(name))
                goto out_drop_write;
 
        if (!OVL_TYPE_UPPER(type)) {
index 8a4a266..edc452c 100644 (file)
@@ -820,39 +820,43 @@ posix_acl_xattr_get(const struct xattr_handler *handler,
        return error;
 }
 
-static int
-posix_acl_xattr_set(const struct xattr_handler *handler,
-                   struct dentry *unused, struct inode *inode,
-                   const char *name, const void *value,
-                   size_t size, int flags)
+int
+set_posix_acl(struct inode *inode, int type, struct posix_acl *acl)
 {
-       struct posix_acl *acl = NULL;
-       int ret;
-
        if (!IS_POSIXACL(inode))
                return -EOPNOTSUPP;
        if (!inode->i_op->set_acl)
                return -EOPNOTSUPP;
 
-       if (handler->flags == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
-               return value ? -EACCES : 0;
+       if (type == ACL_TYPE_DEFAULT && !S_ISDIR(inode->i_mode))
+               return acl ? -EACCES : 0;
        if (!inode_owner_or_capable(inode))
                return -EPERM;
 
+       if (acl) {
+               int ret = posix_acl_valid(acl);
+               if (ret)
+                       return ret;
+       }
+       return inode->i_op->set_acl(inode, acl, type);
+}
+EXPORT_SYMBOL(set_posix_acl);
+
+static int
+posix_acl_xattr_set(const struct xattr_handler *handler,
+                   struct dentry *unused, struct inode *inode,
+                   const char *name, const void *value,
+                   size_t size, int flags)
+{
+       struct posix_acl *acl = NULL;
+       int ret;
+
        if (value) {
                acl = posix_acl_from_xattr(&init_user_ns, value, size);
                if (IS_ERR(acl))
                        return PTR_ERR(acl);
-
-               if (acl) {
-                       ret = posix_acl_valid(acl);
-                       if (ret)
-                               goto out;
-               }
        }
-
-       ret = inode->i_op->set_acl(inode, acl, handler->flags);
-out:
+       ret = set_posix_acl(inode, handler->flags, acl);
        posix_acl_release(acl);
        return ret;
 }
index b8f2d1e..c72c16c 100644 (file)
@@ -1393,7 +1393,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        unsigned long safe_mask = 0;
        unsigned int commit_max_age = (unsigned int)-1;
        struct reiserfs_journal *journal = SB_JOURNAL(s);
-       char *new_opts = kstrdup(arg, GFP_KERNEL);
+       char *new_opts;
        int err;
        char *qf_names[REISERFS_MAXQUOTAS];
        unsigned int qfmt = 0;
@@ -1401,6 +1401,10 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        int i;
 #endif
 
+       new_opts = kstrdup(arg, GFP_KERNEL);
+       if (arg && !new_opts)
+               return -ENOMEM;
+
        sync_filesystem(s);
        reiserfs_write_lock(s);
 
@@ -1546,7 +1550,8 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        }
 
 out_ok_unlocked:
-       replace_mount_options(s, new_opts);
+       if (new_opts)
+               replace_mount_options(s, new_opts);
        return 0;
 
 out_err_unlock:
index 0831697..7bbf420 100644 (file)
@@ -52,6 +52,7 @@
 #include "ubifs.h"
 #include <linux/mount.h>
 #include <linux/slab.h>
+#include <linux/migrate.h>
 
 static int read_block(struct inode *inode, void *addr, unsigned int block,
                      struct ubifs_data_node *dn)
@@ -1452,6 +1453,26 @@ static int ubifs_set_page_dirty(struct page *page)
        return ret;
 }
 
+#ifdef CONFIG_MIGRATION
+static int ubifs_migrate_page(struct address_space *mapping,
+               struct page *newpage, struct page *page, enum migrate_mode mode)
+{
+       int rc;
+
+       rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
+       if (rc != MIGRATEPAGE_SUCCESS)
+               return rc;
+
+       if (PagePrivate(page)) {
+               ClearPagePrivate(page);
+               SetPagePrivate(newpage);
+       }
+
+       migrate_page_copy(newpage, page);
+       return MIGRATEPAGE_SUCCESS;
+}
+#endif
+
 static int ubifs_releasepage(struct page *page, gfp_t unused_gfp_flags)
 {
        /*
@@ -1591,6 +1612,9 @@ const struct address_space_operations ubifs_file_address_operations = {
        .write_end      = ubifs_write_end,
        .invalidatepage = ubifs_invalidatepage,
        .set_page_dirty = ubifs_set_page_dirty,
+#ifdef CONFIG_MIGRATION
+       .migratepage    = ubifs_migrate_page,
+#endif
        .releasepage    = ubifs_releasepage,
 };
 
index 5f861ed..888c364 100644 (file)
@@ -295,7 +295,8 @@ static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
                map = &UDF_SB(sb)->s_partmaps[partition];
                /* map to sparable/physical partition desc */
                phyblock = udf_get_pblock(sb, eloc.logicalBlockNum,
-                       map->s_partition_num, ext_offset + offset);
+                       map->s_type_specific.s_metadata.s_phys_partition_ref,
+                       ext_offset + offset);
        }
 
        brelse(epos.bh);
@@ -317,14 +318,18 @@ uint32_t udf_get_pblock_meta25(struct super_block *sb, uint32_t block,
        mdata = &map->s_type_specific.s_metadata;
        inode = mdata->s_metadata_fe ? : mdata->s_mirror_fe;
 
-       /* We shouldn't mount such media... */
-       BUG_ON(!inode);
+       if (!inode)
+               return 0xFFFFFFFF;
+
        retblk = udf_try_read_meta(inode, block, partition, offset);
        if (retblk == 0xFFFFFFFF && mdata->s_metadata_fe) {
                udf_warn(sb, "error reading from METADATA, trying to read from MIRROR\n");
                if (!(mdata->s_flags & MF_MIRROR_FE_LOADED)) {
                        mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb,
-                               mdata->s_mirror_file_loc, map->s_partition_num);
+                               mdata->s_mirror_file_loc,
+                               mdata->s_phys_partition_ref);
+                       if (IS_ERR(mdata->s_mirror_fe))
+                               mdata->s_mirror_fe = NULL;
                        mdata->s_flags |= MF_MIRROR_FE_LOADED;
                }
 
index 5e2c8c8..4942549 100644 (file)
@@ -951,13 +951,13 @@ out2:
 }
 
 struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
-                                       u32 meta_file_loc, u32 partition_num)
+                                       u32 meta_file_loc, u32 partition_ref)
 {
        struct kernel_lb_addr addr;
        struct inode *metadata_fe;
 
        addr.logicalBlockNum = meta_file_loc;
-       addr.partitionReferenceNum = partition_num;
+       addr.partitionReferenceNum = partition_ref;
 
        metadata_fe = udf_iget_special(sb, &addr);
 
@@ -974,7 +974,8 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
        return metadata_fe;
 }
 
-static int udf_load_metadata_files(struct super_block *sb, int partition)
+static int udf_load_metadata_files(struct super_block *sb, int partition,
+                                  int type1_index)
 {
        struct udf_sb_info *sbi = UDF_SB(sb);
        struct udf_part_map *map;
@@ -984,20 +985,21 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
 
        map = &sbi->s_partmaps[partition];
        mdata = &map->s_type_specific.s_metadata;
+       mdata->s_phys_partition_ref = type1_index;
 
        /* metadata address */
        udf_debug("Metadata file location: block = %d part = %d\n",
-                 mdata->s_meta_file_loc, map->s_partition_num);
+                 mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
 
        fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
-                                        map->s_partition_num);
+                                        mdata->s_phys_partition_ref);
        if (IS_ERR(fe)) {
                /* mirror file entry */
                udf_debug("Mirror metadata file location: block = %d part = %d\n",
-                         mdata->s_mirror_file_loc, map->s_partition_num);
+                         mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
 
                fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
-                                                map->s_partition_num);
+                                                mdata->s_phys_partition_ref);
 
                if (IS_ERR(fe)) {
                        udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
@@ -1015,7 +1017,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
        */
        if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
                addr.logicalBlockNum = mdata->s_bitmap_file_loc;
-               addr.partitionReferenceNum = map->s_partition_num;
+               addr.partitionReferenceNum = mdata->s_phys_partition_ref;
 
                udf_debug("Bitmap file location: block = %d part = %d\n",
                          addr.logicalBlockNum, addr.partitionReferenceNum);
@@ -1283,7 +1285,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
        p = (struct partitionDesc *)bh->b_data;
        partitionNumber = le16_to_cpu(p->partitionNumber);
 
-       /* First scan for TYPE1, SPARABLE and METADATA partitions */
+       /* First scan for TYPE1 and SPARABLE partitions */
        for (i = 0; i < sbi->s_partitions; i++) {
                map = &sbi->s_partmaps[i];
                udf_debug("Searching map: (%d == %d)\n",
@@ -1333,7 +1335,7 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
                goto out_bh;
 
        if (map->s_partition_type == UDF_METADATA_MAP25) {
-               ret = udf_load_metadata_files(sb, i);
+               ret = udf_load_metadata_files(sb, i, type1_idx);
                if (ret < 0) {
                        udf_err(sb, "error loading MetaData partition map %d\n",
                                i);
index 27b5335..c13875d 100644 (file)
@@ -61,6 +61,11 @@ struct udf_meta_data {
        __u32   s_bitmap_file_loc;
        __u32   s_alloc_unit_size;
        __u16   s_align_unit_size;
+       /*
+        * Partition Reference Number of the associated physical / sparable
+        * partition
+        */
+       __u16   s_phys_partition_ref;
        int     s_flags;
        struct inode *s_metadata_fe;
        struct inode *s_mirror_fe;
index 484c879..f53fa05 100644 (file)
@@ -212,6 +212,7 @@ struct dentry_operations {
 #define DCACHE_OP_REAL                 0x08000000
 
 #define DCACHE_PAR_LOOKUP              0x10000000 /* being looked up (with parent locked shared) */
+#define DCACHE_DENTRY_CURSOR           0x20000000
 
 extern seqlock_t rename_lock;
 
@@ -575,5 +576,17 @@ static inline struct inode *vfs_select_inode(struct dentry *dentry,
        return inode;
 }
 
+/**
+ * d_real_inode - Return the real inode
+ * @dentry: The dentry to query
+ *
+ * If dentry is on an union/overlay, then return the underlying, real inode.
+ * Otherwise return d_inode().
+ */
+static inline struct inode *d_real_inode(struct dentry *dentry)
+{
+       return d_backing_inode(d_real(dentry));
+}
+
 
 #endif /* __LINUX_DCACHE_H */
index d029ffa..99403b1 100644 (file)
@@ -223,6 +223,8 @@ struct st_sensor_settings {
  * @get_irq_data_ready: Function to get the IRQ used for data ready signal.
  * @tf: Transfer function structure used by I/O operations.
  * @tb: Transfer buffers and mutex used by I/O operations.
+ * @hw_irq_trigger: if we're using the hardware interrupt on the sensor.
+ * @hw_timestamp: Latest timestamp from the interrupt handler, when in use.
  */
 struct st_sensor_data {
        struct device *dev;
@@ -247,6 +249,9 @@ struct st_sensor_data {
 
        const struct st_sensor_transfer_function *tf;
        struct st_sensor_transfer_buffer tb;
+
+       bool hw_irq_trigger;
+       s64 hw_timestamp;
 };
 
 #ifdef CONFIG_IIO_BUFFER
@@ -260,7 +265,8 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                                const struct iio_trigger_ops *trigger_ops);
 
 void st_sensors_deallocate_trigger(struct iio_dev *indio_dev);
-
+int st_sensors_validate_device(struct iio_trigger *trig,
+                              struct iio_dev *indio_dev);
 #else
 static inline int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
                                const struct iio_trigger_ops *trigger_ops)
@@ -271,6 +277,7 @@ static inline void st_sensors_deallocate_trigger(struct iio_dev *indio_dev)
 {
        return;
 }
+#define st_sensors_validate_device NULL
 #endif
 
 int st_sensors_init_sensor(struct iio_dev *indio_dev,
index f2cb8d4..f8834f8 100644 (file)
@@ -190,7 +190,7 @@ extern struct task_group root_task_group;
 #define INIT_TASK(tsk) \
 {                                                                      \
        .state          = 0,                                            \
-       .stack          = &init_thread_info,                            \
+       .stack          = init_stack,                                   \
        .usage          = ATOMIC_INIT(2),                               \
        .flags          = PF_KTHREAD,                                   \
        .prio           = MAX_PRIO-20,                                  \
index 5ab8528..f2d0258 100644 (file)
@@ -6,6 +6,7 @@
 #define __LINUX_ISA_H
 
 #include <linux/device.h>
+#include <linux/errno.h>
 #include <linux/kernel.h>
 
 struct isa_driver {
@@ -22,13 +23,13 @@ struct isa_driver {
 
 #define to_isa_driver(x) container_of((x), struct isa_driver, driver)
 
-#ifdef CONFIG_ISA
+#ifdef CONFIG_ISA_BUS_API
 int isa_register_driver(struct isa_driver *, unsigned int);
 void isa_unregister_driver(struct isa_driver *);
 #else
 static inline int isa_register_driver(struct isa_driver *d, unsigned int i)
 {
-       return 0;
+       return -ENODEV;
 }
 
 static inline void isa_unregister_driver(struct isa_driver *d)
index 0536524..6890446 100644 (file)
@@ -117,13 +117,18 @@ struct module;
 
 #include <linux/atomic.h>
 
+#ifdef HAVE_JUMP_LABEL
+
 static inline int static_key_count(struct static_key *key)
 {
-       return atomic_read(&key->enabled);
+       /*
+        * -1 means the first static_key_slow_inc() is in progress.
+        *  static_key_enabled() must return true, so return 1 here.
+        */
+       int n = atomic_read(&key->enabled);
+       return n >= 0 ? n : 1;
 }
 
-#ifdef HAVE_JUMP_LABEL
-
 #define JUMP_TYPE_FALSE        0UL
 #define JUMP_TYPE_TRUE 1UL
 #define JUMP_TYPE_MASK 1UL
@@ -162,6 +167,11 @@ extern void jump_label_apply_nops(struct module *mod);
 
 #else  /* !HAVE_JUMP_LABEL */
 
+static inline int static_key_count(struct static_key *key)
+{
+       return atomic_read(&key->enabled);
+}
+
 static __always_inline void jump_label_init(void)
 {
        static_key_initialized = true;
index 611927f..ac4b3c4 100644 (file)
@@ -59,14 +59,13 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
 
 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
 void kasan_kfree_large(const void *ptr);
-void kasan_kfree(void *ptr);
+void kasan_poison_kfree(void *ptr);
 void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
                  gfp_t flags);
 void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
 
 void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
 bool kasan_slab_free(struct kmem_cache *s, void *object);
-void kasan_poison_slab_free(struct kmem_cache *s, void *object);
 
 struct kasan_cache {
        int alloc_meta_offset;
@@ -76,6 +75,9 @@ struct kasan_cache {
 int kasan_module_alloc(void *addr, size_t size);
 void kasan_free_shadow(const struct vm_struct *vm);
 
+size_t ksize(const void *);
+static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
+
 #else /* CONFIG_KASAN */
 
 static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
@@ -102,7 +104,7 @@ static inline void kasan_poison_object_data(struct kmem_cache *cache,
 
 static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
 static inline void kasan_kfree_large(const void *ptr) {}
-static inline void kasan_kfree(void *ptr) {}
+static inline void kasan_poison_kfree(void *ptr) {}
 static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
                                size_t size, gfp_t flags) {}
 static inline void kasan_krealloc(const void *object, size_t new_size,
@@ -114,11 +116,12 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
 {
        return false;
 }
-static inline void kasan_poison_slab_free(struct kmem_cache *s, void *object) {}
 
 static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
 static inline void kasan_free_shadow(const struct vm_struct *vm) {}
 
+static inline void kasan_unpoison_slab(const void *ptr) { }
+
 #endif /* CONFIG_KASAN */
 
 #endif /* LINUX_KASAN_H */
index d2b1306..e5e7f2e 100644 (file)
@@ -42,15 +42,16 @@ struct led_classdev {
 #define LED_UNREGISTERING      (1 << 1)
        /* Upper 16 bits reflect control information */
 #define LED_CORE_SUSPENDRESUME (1 << 16)
-#define LED_BLINK_ONESHOT      (1 << 17)
-#define LED_BLINK_ONESHOT_STOP (1 << 18)
-#define LED_BLINK_INVERT       (1 << 19)
-#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 20)
-#define LED_BLINK_DISABLE      (1 << 21)
-#define LED_SYSFS_DISABLE      (1 << 22)
-#define LED_DEV_CAP_FLASH      (1 << 23)
-#define LED_HW_PLUGGABLE       (1 << 24)
-#define LED_PANIC_INDICATOR    (1 << 25)
+#define LED_BLINK_SW           (1 << 17)
+#define LED_BLINK_ONESHOT      (1 << 18)
+#define LED_BLINK_ONESHOT_STOP (1 << 19)
+#define LED_BLINK_INVERT       (1 << 20)
+#define LED_BLINK_BRIGHTNESS_CHANGE (1 << 21)
+#define LED_BLINK_DISABLE      (1 << 22)
+#define LED_SYSFS_DISABLE      (1 << 23)
+#define LED_DEV_CAP_FLASH      (1 << 24)
+#define LED_HW_PLUGGABLE       (1 << 25)
+#define LED_PANIC_INDICATOR    (1 << 26)
 
        /* Set LED brightness level
         * Must not sleep. Use brightness_set_blocking for drivers
@@ -72,8 +73,8 @@ struct led_classdev {
         * and if both are zero then a sensible default should be chosen.
         * The call should adjust the timings in that case and if it can't
         * match the values specified exactly.
-        * Deactivate blinking again when the brightness is set to a fixed
-        * value via the brightness_set() callback.
+        * Deactivate blinking again when the brightness is set to LED_OFF
+        * via the brightness_set() callback.
         */
        int             (*blink_set)(struct led_classdev *led_cdev,
                                     unsigned long *delay_on,
index 266320f..ab31081 100644 (file)
@@ -172,6 +172,7 @@ enum {
 enum {
        MLX5_FENCE_MODE_NONE                    = 0 << 5,
        MLX5_FENCE_MODE_INITIATOR_SMALL         = 1 << 5,
+       MLX5_FENCE_MODE_FENCE                   = 2 << 5,
        MLX5_FENCE_MODE_STRONG_ORDERING         = 3 << 5,
        MLX5_FENCE_MODE_SMALL_AND_FENCE         = 4 << 5,
 };
index 5df5feb..ece042d 100644 (file)
@@ -602,7 +602,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
 }
 
 void do_set_pte(struct vm_area_struct *vma, unsigned long address,
-               struct page *page, pte_t *pte, bool write, bool anon, bool old);
+               struct page *page, pte_t *pte, bool write, bool anon);
 #endif
 
 /*
index 17018f3..908b67c 100644 (file)
@@ -235,6 +235,9 @@ static inline int pwm_config(struct pwm_device *pwm, int duty_ns,
        if (!pwm)
                return -EINVAL;
 
+       if (duty_ns < 0 || period_ns < 0)
+               return -EINVAL;
+
        pwm_get_state(pwm, &state);
        if (state.duty_cycle == duty_ns && state.period == period_ns)
                return 0;
index dee41bf..b45acfd 100644 (file)
@@ -3010,7 +3010,7 @@ static inline int object_is_on_stack(void *obj)
        return (obj >= stack) && (obj < (stack + THREAD_SIZE));
 }
 
-extern void thread_info_cache_init(void);
+extern void thread_stack_cache_init(void);
 
 #ifdef CONFIG_DEBUG_STACK_USAGE
 static inline unsigned long stack_not_used(struct task_struct *p)
index 19c659d..b6810c9 100644 (file)
@@ -137,8 +137,6 @@ struct rpc_create_args {
 #define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT     (1UL << 9)
 
 struct rpc_clnt *rpc_create(struct rpc_create_args *args);
-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
-                                       struct rpc_xprt *xprt);
 struct rpc_clnt        *rpc_bind_new_program(struct rpc_clnt *,
                                const struct rpc_program *, u32);
 struct rpc_clnt *rpc_clone_client(struct rpc_clnt *);
index b7dabc4..79ba508 100644 (file)
@@ -84,6 +84,7 @@ struct svc_xprt {
 
        struct net              *xpt_net;
        struct rpc_xprt         *xpt_bc_xprt;   /* NFSv4.1 backchannel */
+       struct rpc_xprt_switch  *xpt_bc_xps;    /* NFSv4.1 backchannel */
 };
 
 static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
index 5aa3834..5e3e1b6 100644 (file)
@@ -297,6 +297,7 @@ struct xprt_create {
        size_t                  addrlen;
        const char              *servername;
        struct svc_xprt         *bc_xprt;       /* NFSv4.1 backchannel */
+       struct rpc_xprt_switch  *bc_xps;
        unsigned int            flags;
 };
 
index 457651b..fefe8b0 100644 (file)
@@ -1034,6 +1034,8 @@ static inline int usb_gadget_activate(struct usb_gadget *gadget)
  * @udc_name: A name of UDC this driver should be bound to. If udc_name is NULL,
  *     this driver will be bound to any available UDC.
  * @pending: UDC core private data used for deferred probe of this driver.
+ * @match_existing_only: If udc is not found, return an error and don't add this
+ *      gadget driver to list of pending driver
  *
  * Devices are disabled till a gadget driver successfully bind()s, which
  * means the driver will handle setup() requests needed to enumerate (and
@@ -1097,6 +1099,7 @@ struct usb_gadget_driver {
 
        char                    *udc_name;
        struct list_head        pending;
+       unsigned                match_existing_only:1;
 };
 
 
index 0b3da40..d315c89 100644 (file)
@@ -142,10 +142,11 @@ enum musb_vbus_id_status {
 };
 
 #if IS_ENABLED(CONFIG_USB_MUSB_HDRC)
-void musb_mailbox(enum musb_vbus_id_status status);
+int musb_mailbox(enum musb_vbus_id_status status);
 #else
-static inline void musb_mailbox(enum musb_vbus_id_status status)
+static inline int musb_mailbox(enum musb_vbus_id_status status)
 {
+       return 0;
 }
 #endif
 
index 98a938a..7a8d603 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * v4l2-mc.h - Media Controller V4L2 types and prototypes
  *
- * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@osg.samsung.com>
+ * Copyright (C) 2016 Mauro Carvalho Chehab <mchehab@kernel.org>
  * Copyright (C) 2006-2010 Nokia Corporation
  * Copyright (c) 2016 Intel Corporation.
  *
index 16274e2..9c9a27d 100644 (file)
@@ -203,7 +203,9 @@ struct rvt_driver_provided {
 
        /*
         * Allocate a private queue pair data structure for driver specific
-        * information which is opaque to rdmavt.
+        * information which is opaque to rdmavt.  Errors are returned via
+        * ERR_PTR(err).  The driver is free to return NULL or a valid
+        * pointer.
         */
        void * (*qp_priv_alloc)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
                                gfp_t gfp);
index 4c17fda..eae02aa 100644 (file)
@@ -453,7 +453,7 @@ void __init __weak smp_setup_processor_id(void)
 }
 
 # if THREAD_SIZE >= PAGE_SIZE
-void __init __weak thread_info_cache_init(void)
+void __init __weak thread_stack_cache_init(void)
 {
 }
 #endif
@@ -627,7 +627,7 @@ asmlinkage __visible void __init start_kernel(void)
        /* Should be run before the first non-init thread is created */
        init_espfix_bsp();
 #endif
-       thread_info_cache_init();
+       thread_stack_cache_init();
        cred_init();
        fork_init();
        proc_caches_init();
@@ -708,11 +708,13 @@ static bool __init_or_module initcall_blacklisted(initcall_t fn)
 {
        struct blacklist_entry *entry;
        char fn_name[KSYM_SYMBOL_LEN];
+       unsigned long addr;
 
        if (list_empty(&blacklisted_initcalls))
                return false;
 
-       sprint_symbol_no_offset(fn_name, (unsigned long)fn);
+       addr = (unsigned long) dereference_function_descriptor(fn);
+       sprint_symbol_no_offset(fn_name, addr);
 
        list_for_each_entry(entry, &blacklisted_initcalls, next) {
                if (!strcmp(fn_name, entry->buf)) {
index 5c2c355..4a7ec0c 100644 (file)
@@ -148,18 +148,18 @@ static inline void free_task_struct(struct task_struct *tsk)
 }
 #endif
 
-void __weak arch_release_thread_info(struct thread_info *ti)
+void __weak arch_release_thread_stack(unsigned long *stack)
 {
 }
 
-#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
+#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
 
 /*
  * Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
  * kmemcache based allocator.
  */
 # if THREAD_SIZE >= PAGE_SIZE
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
                                                  int node)
 {
        struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
@@ -172,33 +172,33 @@ static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
        return page ? page_address(page) : NULL;
 }
 
-static inline void free_thread_info(struct thread_info *ti)
+static inline void free_thread_stack(unsigned long *stack)
 {
-       struct page *page = virt_to_page(ti);
+       struct page *page = virt_to_page(stack);
 
        memcg_kmem_update_page_stat(page, MEMCG_KERNEL_STACK,
                                    -(1 << THREAD_SIZE_ORDER));
        __free_kmem_pages(page, THREAD_SIZE_ORDER);
 }
 # else
-static struct kmem_cache *thread_info_cache;
+static struct kmem_cache *thread_stack_cache;
 
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
                                                  int node)
 {
-       return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
+       return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
 }
 
-static void free_thread_info(struct thread_info *ti)
+static void free_thread_stack(unsigned long *stack)
 {
-       kmem_cache_free(thread_info_cache, ti);
+       kmem_cache_free(thread_stack_cache, stack);
 }
 
-void thread_info_cache_init(void)
+void thread_stack_cache_init(void)
 {
-       thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+       thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
                                              THREAD_SIZE, 0, NULL);
-       BUG_ON(thread_info_cache == NULL);
+       BUG_ON(thread_stack_cache == NULL);
 }
 # endif
 #endif
@@ -221,9 +221,9 @@ struct kmem_cache *vm_area_cachep;
 /* SLAB cache for mm_struct structures (tsk->mm) */
 static struct kmem_cache *mm_cachep;
 
-static void account_kernel_stack(struct thread_info *ti, int account)
+static void account_kernel_stack(unsigned long *stack, int account)
 {
-       struct zone *zone = page_zone(virt_to_page(ti));
+       struct zone *zone = page_zone(virt_to_page(stack));
 
        mod_zone_page_state(zone, NR_KERNEL_STACK, account);
 }
@@ -231,8 +231,8 @@ static void account_kernel_stack(struct thread_info *ti, int account)
 void free_task(struct task_struct *tsk)
 {
        account_kernel_stack(tsk->stack, -1);
-       arch_release_thread_info(tsk->stack);
-       free_thread_info(tsk->stack);
+       arch_release_thread_stack(tsk->stack);
+       free_thread_stack(tsk->stack);
        rt_mutex_debug_task_free(tsk);
        ftrace_graph_exit_task(tsk);
        put_seccomp_filter(tsk);
@@ -343,7 +343,7 @@ void set_task_stack_end_magic(struct task_struct *tsk)
 static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
 {
        struct task_struct *tsk;
-       struct thread_info *ti;
+       unsigned long *stack;
        int err;
 
        if (node == NUMA_NO_NODE)
@@ -352,15 +352,15 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
        if (!tsk)
                return NULL;
 
-       ti = alloc_thread_info_node(tsk, node);
-       if (!ti)
+       stack = alloc_thread_stack_node(tsk, node);
+       if (!stack)
                goto free_tsk;
 
        err = arch_dup_task_struct(tsk, orig);
        if (err)
-               goto free_ti;
+               goto free_stack;
 
-       tsk->stack = ti;
+       tsk->stack = stack;
 #ifdef CONFIG_SECCOMP
        /*
         * We must handle setting up seccomp filters once we're under
@@ -392,14 +392,14 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
        tsk->task_frag.page = NULL;
        tsk->wake_q.next = NULL;
 
-       account_kernel_stack(ti, 1);
+       account_kernel_stack(stack, 1);
 
        kcov_task_init(tsk);
 
        return tsk;
 
-free_ti:
-       free_thread_info(ti);
+free_stack:
+       free_thread_stack(stack);
 free_tsk:
        free_task_struct(tsk);
        return NULL;
index 05254ee..4b353e0 100644 (file)
@@ -58,13 +58,36 @@ static void jump_label_update(struct static_key *key);
 
 void static_key_slow_inc(struct static_key *key)
 {
+       int v, v1;
+
        STATIC_KEY_CHECK_USE();
-       if (atomic_inc_not_zero(&key->enabled))
-               return;
+
+       /*
+        * Careful if we get concurrent static_key_slow_inc() calls;
+        * later calls must wait for the first one to _finish_ the
+        * jump_label_update() process.  At the same time, however,
+        * the jump_label_update() call below wants to see
+        * static_key_enabled(&key) for jumps to be updated properly.
+        *
+        * So give a special meaning to negative key->enabled: it sends
+        * static_key_slow_inc() down the slow path, and it is non-zero
+        * so it counts as "enabled" in jump_label_update().  Note that
+        * atomic_inc_unless_negative() checks >= 0, so roll our own.
+        */
+       for (v = atomic_read(&key->enabled); v > 0; v = v1) {
+               v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
+               if (likely(v1 == v))
+                       return;
+       }
 
        jump_label_lock();
-       if (atomic_inc_return(&key->enabled) == 1)
+       if (atomic_read(&key->enabled) == 0) {
+               atomic_set(&key->enabled, -1);
                jump_label_update(key);
+               atomic_set(&key->enabled, 1);
+       } else {
+               atomic_inc(&key->enabled);
+       }
        jump_label_unlock();
 }
 EXPORT_SYMBOL_GPL(static_key_slow_inc);
@@ -72,6 +95,13 @@ EXPORT_SYMBOL_GPL(static_key_slow_inc);
 static void __static_key_slow_dec(struct static_key *key,
                unsigned long rate_limit, struct delayed_work *work)
 {
+       /*
+        * The negative count check is valid even when a negative
+        * key->enabled is in use by static_key_slow_inc(); a
+        * __static_key_slow_dec() before the first static_key_slow_inc()
+        * returns is unbalanced, because all other static_key_slow_inc()
+        * instances block while the update is in progress.
+        */
        if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
                WARN(atomic_read(&key->enabled) < 0,
                     "jump label: negative count!\n");
index a02f2dd..8d44b3f 100644 (file)
@@ -264,7 +264,12 @@ static const struct file_operations kcov_fops = {
 
 static int __init kcov_init(void)
 {
-       if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) {
+       /*
+        * The kcov debugfs file won't ever get removed and thus,
+        * there is no need to protect it against removal races. The
+        * use of debugfs_create_file_unsafe() is actually safe here.
+        */
+       if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
                pr_err("failed to create kcov in debugfs\n");
                return -ENOMEM;
        }
index 3ef3736..9c951fa 100644 (file)
@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
 }
 
 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-                           struct thread_info *ti)
+                           struct task_struct *task)
 {
        SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
 
        /* Mark the current thread as blocked on the lock: */
-       ti->task->blocked_on = waiter;
+       task->blocked_on = waiter;
 }
 
 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-                        struct thread_info *ti)
+                        struct task_struct *task)
 {
        DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
-       DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
-       DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
-       ti->task->blocked_on = NULL;
+       DEBUG_LOCKS_WARN_ON(waiter->task != task);
+       DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
+       task->blocked_on = NULL;
 
        list_del_init(&waiter->list);
        waiter->task = NULL;
index 0799fd3..d06ae3b 100644 (file)
@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
 extern void debug_mutex_add_waiter(struct mutex *lock,
                                   struct mutex_waiter *waiter,
-                                  struct thread_info *ti);
+                                  struct task_struct *task);
 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
-                               struct thread_info *ti);
+                               struct task_struct *task);
 extern void debug_mutex_unlock(struct mutex *lock);
 extern void debug_mutex_init(struct mutex *lock, const char *name,
                             struct lock_class_key *key);
index 79d2d76..a70b90d 100644 (file)
@@ -537,7 +537,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                goto skip_wait;
 
        debug_mutex_lock_common(lock, &waiter);
-       debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
+       debug_mutex_add_waiter(lock, &waiter, task);
 
        /* add waiting tasks to the end of the waitqueue (FIFO): */
        list_add_tail(&waiter.list, &lock->wait_list);
@@ -584,7 +584,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        }
        __set_task_state(task, TASK_RUNNING);
 
-       mutex_remove_waiter(lock, &waiter, current_thread_info());
+       mutex_remove_waiter(lock, &waiter, task);
        /* set it to 0 if there are no waiters left: */
        if (likely(list_empty(&lock->wait_list)))
                atomic_set(&lock->count, 0);
@@ -605,7 +605,7 @@ skip_wait:
        return 0;
 
 err:
-       mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+       mutex_remove_waiter(lock, &waiter, task);
        spin_unlock_mutex(&lock->wait_lock, flags);
        debug_mutex_free_waiter(&waiter);
        mutex_release(&lock->dep_map, 1, ip);
index 5cda397..a68bae5 100644 (file)
@@ -13,7 +13,7 @@
                do { spin_lock(lock); (void)(flags); } while (0)
 #define spin_unlock_mutex(lock, flags) \
                do { spin_unlock(lock); (void)(flags); } while (0)
-#define mutex_remove_waiter(lock, waiter, ti) \
+#define mutex_remove_waiter(lock, waiter, task) \
                __list_del((waiter)->list.prev, (waiter)->list.next)
 
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
index df058be..0c2ee97 100644 (file)
@@ -146,6 +146,18 @@ int freeze_processes(void)
        if (!error && !oom_killer_disable())
                error = -EBUSY;
 
+       /*
+        * There is a hard to fix race between oom_reaper kernel thread
+        * and oom_killer_disable. oom_reaper calls exit_oom_victim
+        * before the victim reaches exit_mm so try to freeze all the tasks
+        * again and catch such a left over task.
+        */
+       if (!error) {
+               pr_info("Double checking all user space processes after OOM killer disable... ");
+               error = try_to_freeze_tasks(true);
+               pr_cont("\n");
+       }
+
        if (error)
                thaw_processes();
        return error;
index c1b537b..e406ba0 100644 (file)
@@ -1536,7 +1536,9 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
        for (;;) {
                /* Any allowed, online CPU? */
                for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
-                       if (!cpu_active(dest_cpu))
+                       if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
+                               continue;
+                       if (!cpu_online(dest_cpu))
                                continue;
                        goto out;
                }
@@ -5147,14 +5149,16 @@ void show_state_filter(unsigned long state_filter)
                /*
                 * reset the NMI-timeout, listing all files on a slow
                 * console might take a lot of time:
+                * Also, reset softlockup watchdogs on all CPUs, because
+                * another CPU might be blocked waiting for us to process
+                * an IPI.
                 */
                touch_nmi_watchdog();
+               touch_all_softlockup_watchdogs();
                if (!state_filter || (p->state & state_filter))
                        sched_show_task(p);
        }
 
-       touch_all_softlockup_watchdogs();
-
 #ifdef CONFIG_SCHED_DEBUG
        if (!state_filter)
                sysrq_sched_debug_show();
index 40d5ace..7306356 100644 (file)
@@ -735,8 +735,6 @@ void post_init_entity_util_avg(struct sched_entity *se)
        }
 }
 
-static inline unsigned long cfs_rq_runnable_load_avg(struct cfs_rq *cfs_rq);
-static inline unsigned long cfs_rq_load_avg(struct cfs_rq *cfs_rq);
 #else
 void init_entity_runnable_average(struct sched_entity *se)
 {
@@ -2473,28 +2471,22 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
 # ifdef CONFIG_SMP
-static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
 {
-       long tg_weight;
+       long tg_weight, load, shares;
 
        /*
-        * Use this CPU's real-time load instead of the last load contribution
-        * as the updating of the contribution is delayed, and we will use the
-        * the real-time load to calc the share. See update_tg_load_avg().
+        * This really should be: cfs_rq->avg.load_avg, but instead we use
+        * cfs_rq->load.weight, which is its upper bound. This helps ramp up
+        * the shares for small weight interactive tasks.
         */
-       tg_weight = atomic_long_read(&tg->load_avg);
-       tg_weight -= cfs_rq->tg_load_avg_contrib;
-       tg_weight += cfs_rq->load.weight;
+       load = scale_load_down(cfs_rq->load.weight);
 
-       return tg_weight;
-}
-
-static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
-{
-       long tg_weight, load, shares;
+       tg_weight = atomic_long_read(&tg->load_avg);
 
-       tg_weight = calc_tg_weight(tg, cfs_rq);
-       load = cfs_rq->load.weight;
+       /* Ensure tg_weight >= load */
+       tg_weight -= cfs_rq->tg_load_avg_contrib;
+       tg_weight += load;
 
        shares = (tg->shares * load);
        if (tg_weight)
@@ -2513,6 +2505,7 @@ static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
        return tg->shares;
 }
 # endif /* CONFIG_SMP */
+
 static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
                            unsigned long weight)
 {
@@ -2878,6 +2871,23 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq)
        }
 }
 
+/*
+ * Unsigned subtract and clamp on underflow.
+ *
+ * Explicitly do a load-store to ensure the intermediate value never hits
+ * memory. This allows lockless observations without ever seeing the negative
+ * values.
+ */
+#define sub_positive(_ptr, _val) do {                          \
+       typeof(_ptr) ptr = (_ptr);                              \
+       typeof(*ptr) val = (_val);                              \
+       typeof(*ptr) res, var = READ_ONCE(*ptr);                \
+       res = var - val;                                        \
+       if (res > var)                                          \
+               res = 0;                                        \
+       WRITE_ONCE(*ptr, res);                                  \
+} while (0)
+
 /* Group cfs_rq's load_avg is used for task_h_load and update_cfs_share */
 static inline int
 update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
@@ -2887,15 +2897,15 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq)
 
        if (atomic_long_read(&cfs_rq->removed_load_avg)) {
                s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
-               sa->load_avg = max_t(long, sa->load_avg - r, 0);
-               sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
+               sub_positive(&sa->load_avg, r);
+               sub_positive(&sa->load_sum, r * LOAD_AVG_MAX);
                removed_load = 1;
        }
 
        if (atomic_long_read(&cfs_rq->removed_util_avg)) {
                long r = atomic_long_xchg(&cfs_rq->removed_util_avg, 0);
-               sa->util_avg = max_t(long, sa->util_avg - r, 0);
-               sa->util_sum = max_t(s32, sa->util_sum - r * LOAD_AVG_MAX, 0);
+               sub_positive(&sa->util_avg, r);
+               sub_positive(&sa->util_sum, r * LOAD_AVG_MAX);
                removed_util = 1;
        }
 
@@ -2968,10 +2978,10 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
                          &se->avg, se->on_rq * scale_load_down(se->load.weight),
                          cfs_rq->curr == se, NULL);
 
-       cfs_rq->avg.load_avg = max_t(long, cfs_rq->avg.load_avg - se->avg.load_avg, 0);
-       cfs_rq->avg.load_sum = max_t(s64,  cfs_rq->avg.load_sum - se->avg.load_sum, 0);
-       cfs_rq->avg.util_avg = max_t(long, cfs_rq->avg.util_avg - se->avg.util_avg, 0);
-       cfs_rq->avg.util_sum = max_t(s32,  cfs_rq->avg.util_sum - se->avg.util_sum, 0);
+       sub_positive(&cfs_rq->avg.load_avg, se->avg.load_avg);
+       sub_positive(&cfs_rq->avg.load_sum, se->avg.load_sum);
+       sub_positive(&cfs_rq->avg.util_avg, se->avg.util_avg);
+       sub_positive(&cfs_rq->avg.util_sum, se->avg.util_sum);
 
        cfs_rq_util_change(cfs_rq);
 }
@@ -3220,7 +3230,7 @@ static inline void check_schedstat_required(void)
                        trace_sched_stat_iowait_enabled()  ||
                        trace_sched_stat_blocked_enabled() ||
                        trace_sched_stat_runtime_enabled())  {
-               pr_warn_once("Scheduler tracepoints stat_sleep, stat_iowait, "
+               printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, "
                             "stat_blocked and stat_runtime require the "
                             "kernel parameter schedstats=enabled or "
                             "kernel.sched_schedstats=1\n");
@@ -4157,6 +4167,26 @@ static void check_enqueue_throttle(struct cfs_rq *cfs_rq)
        if (!cfs_bandwidth_used())
                return;
 
+       /* Synchronize hierarchical throttle counter: */
+       if (unlikely(!cfs_rq->throttle_uptodate)) {
+               struct rq *rq = rq_of(cfs_rq);
+               struct cfs_rq *pcfs_rq;
+               struct task_group *tg;
+
+               cfs_rq->throttle_uptodate = 1;
+
+               /* Get closest up-to-date node, because leaves go first: */
+               for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
+                       pcfs_rq = tg->cfs_rq[cpu_of(rq)];
+                       if (pcfs_rq->throttle_uptodate)
+                               break;
+               }
+               if (tg) {
+                       cfs_rq->throttle_count = pcfs_rq->throttle_count;
+                       cfs_rq->throttled_clock_task = rq_clock_task(rq);
+               }
+       }
+
        /* an active group must be handled by the update_curr()->put() path */
        if (!cfs_rq->runtime_enabled || cfs_rq->curr)
                return;
@@ -4472,15 +4502,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 
                /* Don't dequeue parent if it has other entities besides us */
                if (cfs_rq->load.weight) {
+                       /* Avoid re-evaluating load for this entity: */
+                       se = parent_entity(se);
                        /*
                         * Bias pick_next to pick a task from this cfs_rq, as
                         * p is sleeping when it is within its sched_slice.
                         */
-                       if (task_sleep && parent_entity(se))
-                               set_next_buddy(parent_entity(se));
-
-                       /* avoid re-evaluating load for this entity */
-                       se = parent_entity(se);
+                       if (task_sleep && se && !throttled_hierarchy(cfs_rq))
+                               set_next_buddy(se);
                        break;
                }
                flags |= DEQUEUE_SLEEP;
@@ -4882,19 +4911,24 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
                return wl;
 
        for_each_sched_entity(se) {
-               long w, W;
+               struct cfs_rq *cfs_rq = se->my_q;
+               long W, w = cfs_rq_load_avg(cfs_rq);
 
-               tg = se->my_q->tg;
+               tg = cfs_rq->tg;
 
                /*
                 * W = @wg + \Sum rw_j
                 */
-               W = wg + calc_tg_weight(tg, se->my_q);
+               W = wg + atomic_long_read(&tg->load_avg);
+
+               /* Ensure \Sum rw_j >= rw_i */
+               W -= cfs_rq->tg_load_avg_contrib;
+               W += w;
 
                /*
                 * w = rw_i + @wl
                 */
-               w = cfs_rq_load_avg(se->my_q) + wl;
+               w += wl;
 
                /*
                 * wl = S * s'_i; see (2)
index de607e4..71ce986 100644 (file)
@@ -437,7 +437,7 @@ struct cfs_rq {
 
        u64 throttled_clock, throttled_clock_task;
        u64 throttled_clock_task_time;
-       int throttled, throttle_count;
+       int throttled, throttle_count, throttle_uptodate;
        struct list_head throttled_list;
 #endif /* CONFIG_CFS_BANDWIDTH */
 #endif /* CONFIG_FAIR_GROUP_SCHED */
index f96f038..ad1d616 100644 (file)
@@ -36,6 +36,10 @@ struct trace_bprintk_fmt {
 static inline struct trace_bprintk_fmt *lookup_format(const char *fmt)
 {
        struct trace_bprintk_fmt *pos;
+
+       if (!fmt)
+               return ERR_PTR(-EINVAL);
+
        list_for_each_entry(pos, &trace_bprintk_fmt_list, list) {
                if (!strcmp(pos->fmt, fmt))
                        return pos;
@@ -57,7 +61,8 @@ void hold_module_trace_bprintk_format(const char **start, const char **end)
        for (iter = start; iter < end; iter++) {
                struct trace_bprintk_fmt *tb_fmt = lookup_format(*iter);
                if (tb_fmt) {
-                       *iter = tb_fmt->fmt;
+                       if (!IS_ERR(tb_fmt))
+                               *iter = tb_fmt->fmt;
                        continue;
                }
 
index 1427366..79bfe0e 100644 (file)
@@ -441,25 +441,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
                /* Found a free page, break it into order-0 pages */
                isolated = split_free_page(page);
+               if (!isolated)
+                       break;
+
                total_isolated += isolated;
+               cc->nr_freepages += isolated;
                for (i = 0; i < isolated; i++) {
                        list_add(&page->lru, freelist);
                        page++;
                }
-
-               /* If a page was split, advance to the end of it */
-               if (isolated) {
-                       cc->nr_freepages += isolated;
-                       if (!strict &&
-                               cc->nr_migratepages <= cc->nr_freepages) {
-                               blockpfn += isolated;
-                               break;
-                       }
-
-                       blockpfn += isolated - 1;
-                       cursor += isolated - 1;
-                       continue;
+               if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
+                       blockpfn += isolated;
+                       break;
                }
+               /* Advance to the end of split page */
+               blockpfn += isolated - 1;
+               cursor += isolated - 1;
+               continue;
 
 isolate_fail:
                if (strict)
@@ -469,6 +467,9 @@ isolate_fail:
 
        }
 
+       if (locked)
+               spin_unlock_irqrestore(&cc->zone->lock, flags);
+
        /*
         * There is a tiny chance that we have read bogus compound_order(),
         * so be careful to not go outside of the pageblock.
@@ -490,9 +491,6 @@ isolate_fail:
        if (strict && blockpfn < end_pfn)
                total_isolated = 0;
 
-       if (locked)
-               spin_unlock_irqrestore(&cc->zone->lock, flags);
-
        /* Update the pageblock-skip if the whole pageblock was scanned */
        if (blockpfn == end_pfn)
                update_pageblock_skip(cc, valid_page, total_isolated, false);
@@ -1011,6 +1009,7 @@ static void isolate_freepages(struct compact_control *cc)
                                block_end_pfn = block_start_pfn,
                                block_start_pfn -= pageblock_nr_pages,
                                isolate_start_pfn = block_start_pfn) {
+               unsigned long isolated;
 
                /*
                 * This can iterate a massively long zone without finding any
@@ -1035,8 +1034,12 @@ static void isolate_freepages(struct compact_control *cc)
                        continue;
 
                /* Found a block suitable for isolating free pages from. */
-               isolate_freepages_block(cc, &isolate_start_pfn,
-                                       block_end_pfn, freelist, false);
+               isolated = isolate_freepages_block(cc, &isolate_start_pfn,
+                                               block_end_pfn, freelist, false);
+               /* If isolation failed early, do not continue needlessly */
+               if (!isolated && isolate_start_pfn < block_end_pfn &&
+                   cc->nr_migratepages > cc->nr_freepages)
+                       break;
 
                /*
                 * If we isolated enough freepages, or aborted due to async
index 00ae878..20f3b1f 100644 (file)
@@ -2186,7 +2186,7 @@ repeat:
                if (file->f_ra.mmap_miss > 0)
                        file->f_ra.mmap_miss--;
                addr = address + (page->index - vmf->pgoff) * PAGE_SIZE;
-               do_set_pte(vma, addr, page, pte, false, false, true);
+               do_set_pte(vma, addr, page, pte, false, false);
                unlock_page(page);
                goto next;
 unlock:
index 388c2bb..c1f3c0b 100644 (file)
@@ -1030,6 +1030,7 @@ static void destroy_compound_gigantic_page(struct page *page,
        int nr_pages = 1 << order;
        struct page *p = page + 1;
 
+       atomic_set(compound_mapcount_ptr(page), 0);
        for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
                clear_compound_head(p);
                set_page_refcounted(p);
@@ -4228,7 +4229,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
                if (saddr) {
                        spte = huge_pte_offset(svma->vm_mm, saddr);
                        if (spte) {
-                               mm_inc_nr_pmds(mm);
                                get_page(virt_to_page(spte));
                                break;
                        }
@@ -4243,9 +4243,9 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
        if (pud_none(*pud)) {
                pud_populate(mm, pud,
                                (pmd_t *)((unsigned long)spte & PAGE_MASK));
+               mm_inc_nr_pmds(mm);
        } else {
                put_page(virt_to_page(spte));
-               mm_inc_nr_pmds(mm);
        }
        spin_unlock(ptl);
 out:
index a37e5b6..2524ec8 100644 (file)
@@ -24,7 +24,8 @@
  */
 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
                        __GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
-                       __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC)
+                       __GFP_NORETRY|__GFP_MEMALLOC|__GFP_NOMEMALLOC|\
+                       __GFP_ATOMIC)
 
 /* The GFP flags allowed during early boot */
 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
index 28439ac..6845f92 100644 (file)
@@ -508,7 +508,7 @@ void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
        kasan_kmalloc(cache, object, cache->object_size, flags);
 }
 
-void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
+static void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
 {
        unsigned long size = cache->object_size;
        unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
@@ -626,7 +626,7 @@ void kasan_krealloc(const void *object, size_t size, gfp_t flags)
                kasan_kmalloc(page->slab_cache, object, size, flags);
 }
 
-void kasan_kfree(void *ptr)
+void kasan_poison_kfree(void *ptr)
 {
        struct page *page;
 
@@ -636,7 +636,7 @@ void kasan_kfree(void *ptr)
                kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
                                KASAN_FREE_PAGE);
        else
-               kasan_slab_free(page->slab_cache, ptr);
+               kasan_poison_slab_free(page->slab_cache, ptr);
 }
 
 void kasan_kfree_large(const void *ptr)
index e642992..04320d3 100644 (file)
@@ -307,8 +307,10 @@ static void hex_dump_object(struct seq_file *seq,
        len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
 
        seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
+       kasan_disable_current();
        seq_hex_dump(seq, "    ", DUMP_PREFIX_NONE, HEX_ROW_SIZE,
                     HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
+       kasan_enable_current();
 }
 
 /*
index 75e7440..ac8664d 100644 (file)
@@ -4203,7 +4203,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
        return &memcg->css;
 fail:
        mem_cgroup_free(memcg);
-       return NULL;
+       return ERR_PTR(-ENOMEM);
 }
 
 static int
@@ -5544,6 +5544,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
        struct mem_cgroup *memcg;
        unsigned int nr_pages;
        bool compound;
+       unsigned long flags;
 
        VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
        VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
@@ -5574,10 +5575,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
 
        commit_charge(newpage, memcg, false);
 
-       local_irq_disable();
+       local_irq_save(flags);
        mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
        memcg_check_events(memcg, newpage);
-       local_irq_enable();
+       local_irq_restore(flags);
 }
 
 DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
index 15322b7..cd1f29e 100644 (file)
@@ -2877,7 +2877,7 @@ static int __do_fault(struct vm_area_struct *vma, unsigned long address,
  * vm_ops->map_pages.
  */
 void do_set_pte(struct vm_area_struct *vma, unsigned long address,
-               struct page *page, pte_t *pte, bool write, bool anon, bool old)
+               struct page *page, pte_t *pte, bool write, bool anon)
 {
        pte_t entry;
 
@@ -2885,8 +2885,6 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
        entry = mk_pte(page, vma->vm_page_prot);
        if (write)
                entry = maybe_mkwrite(pte_mkdirty(entry), vma);
-       if (old)
-               entry = pte_mkold(entry);
        if (anon) {
                inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
                page_add_new_anon_rmap(page, vma, address, false);
@@ -2900,16 +2898,8 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
        update_mmu_cache(vma, address, pte);
 }
 
-/*
- * If architecture emulates "accessed" or "young" bit without HW support,
- * there is no much gain with fault_around.
- */
 static unsigned long fault_around_bytes __read_mostly =
-#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-       PAGE_SIZE;
-#else
        rounddown_pow_of_two(65536);
-#endif
 
 #ifdef CONFIG_DEBUG_FS
 static int fault_around_bytes_get(void *data, u64 *val)
@@ -3032,20 +3022,9 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         */
        if (vma->vm_ops->map_pages && fault_around_bytes >> PAGE_SHIFT > 1) {
                pte = pte_offset_map_lock(mm, pmd, address, &ptl);
-               if (!pte_same(*pte, orig_pte))
-                       goto unlock_out;
                do_fault_around(vma, address, pte, pgoff, flags);
-               /* Check if the fault is handled by faultaround */
-               if (!pte_same(*pte, orig_pte)) {
-                       /*
-                        * Faultaround produce old pte, but the pte we've
-                        * handler fault for should be young.
-                        */
-                       pte_t entry = pte_mkyoung(*pte);
-                       if (ptep_set_access_flags(vma, address, pte, entry, 0))
-                               update_mmu_cache(vma, address, pte);
+               if (!pte_same(*pte, orig_pte))
                        goto unlock_out;
-               }
                pte_unmap_unlock(pte, ptl);
        }
 
@@ -3060,7 +3039,7 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                put_page(fault_page);
                return ret;
        }
-       do_set_pte(vma, address, fault_page, pte, false, false, false);
+       do_set_pte(vma, address, fault_page, pte, false, false);
        unlock_page(fault_page);
 unlock_out:
        pte_unmap_unlock(pte, ptl);
@@ -3111,7 +3090,7 @@ static int do_cow_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                }
                goto uncharge_out;
        }
-       do_set_pte(vma, address, new_page, pte, true, true, false);
+       do_set_pte(vma, address, new_page, pte, true, true);
        mem_cgroup_commit_charge(new_page, memcg, false, false);
        lru_cache_add_active_or_unevictable(new_page, vma);
        pte_unmap_unlock(pte, ptl);
@@ -3164,7 +3143,7 @@ static int do_shared_fault(struct mm_struct *mm, struct vm_area_struct *vma,
                put_page(fault_page);
                return ret;
        }
-       do_set_pte(vma, address, fault_page, pte, true, false, false);
+       do_set_pte(vma, address, fault_page, pte, true, false);
        pte_unmap_unlock(pte, ptl);
 
        if (set_page_dirty(fault_page))
index 9e075f8..8f65464 100644 (file)
@@ -104,20 +104,16 @@ static inline void poison_element(mempool_t *pool, void *element)
 
 static void kasan_poison_element(mempool_t *pool, void *element)
 {
-       if (pool->alloc == mempool_alloc_slab)
-               kasan_poison_slab_free(pool->pool_data, element);
-       if (pool->alloc == mempool_kmalloc)
-               kasan_kfree(element);
+       if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+               kasan_poison_kfree(element);
        if (pool->alloc == mempool_alloc_pages)
                kasan_free_pages(element, (unsigned long)pool->pool_data);
 }
 
 static void kasan_unpoison_element(mempool_t *pool, void *element, gfp_t flags)
 {
-       if (pool->alloc == mempool_alloc_slab)
-               kasan_slab_alloc(pool->pool_data, element, flags);
-       if (pool->alloc == mempool_kmalloc)
-               kasan_krealloc(element, (size_t)pool->pool_data, flags);
+       if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
+               kasan_unpoison_slab(element);
        if (pool->alloc == mempool_alloc_pages)
                kasan_alloc_pages(element, (unsigned long)pool->pool_data);
 }
index 9baf41c..bd3fdc2 100644 (file)
@@ -431,6 +431,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
 
        return MIGRATEPAGE_SUCCESS;
 }
+EXPORT_SYMBOL(migrate_page_move_mapping);
 
 /*
  * The expected number of remaining references is the same as that
@@ -586,6 +587,7 @@ void migrate_page_copy(struct page *newpage, struct page *page)
 
        mem_cgroup_migrate(page, newpage);
 }
+EXPORT_SYMBOL(migrate_page_copy);
 
 /************************************************************
  *                    Migration functions
index acbc432..ddf7448 100644 (file)
@@ -474,13 +474,8 @@ static bool __oom_reap_task(struct task_struct *tsk)
        p = find_lock_task_mm(tsk);
        if (!p)
                goto unlock_oom;
-
        mm = p->mm;
-       if (!atomic_inc_not_zero(&mm->mm_users)) {
-               task_unlock(p);
-               goto unlock_oom;
-       }
-
+       atomic_inc(&mm->mm_users);
        task_unlock(p);
 
        if (!down_read_trylock(&mm->mmap_sem)) {
index c6cda3e..fedeba8 100644 (file)
@@ -207,13 +207,15 @@ void __dump_page_owner(struct page *page)
                .nr_entries = page_ext->nr_entries,
                .entries = &page_ext->trace_entries[0],
        };
-       gfp_t gfp_mask = page_ext->gfp_mask;
-       int mt = gfpflags_to_migratetype(gfp_mask);
+       gfp_t gfp_mask;
+       int mt;
 
        if (unlikely(!page_ext)) {
                pr_alert("There is not page extension available.\n");
                return;
        }
+       gfp_mask = page_ext->gfp_mask;
+       mt = gfpflags_to_migratetype(gfp_mask);
 
        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags)) {
                pr_alert("page_owner info is not active (free page?)\n");
index a361449..24463b6 100644 (file)
@@ -2227,7 +2227,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                        /* Remove the !PageUptodate pages we added */
                        shmem_undo_range(inode,
                                (loff_t)start << PAGE_SHIFT,
-                               (loff_t)index << PAGE_SHIFT, true);
+                               ((loff_t)index << PAGE_SHIFT) - 1, true);
                        goto undone;
                }
 
index 59f5faf..90530ff 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -242,7 +242,7 @@ void rotate_reclaimable_page(struct page *page)
                get_page(page);
                local_irq_save(flags);
                pvec = this_cpu_ptr(&lru_rotate_pvecs);
-               if (!pagevec_add(pvec, page))
+               if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_move_tail(pvec);
                local_irq_restore(flags);
        }
@@ -296,7 +296,7 @@ void activate_page(struct page *page)
                struct pagevec *pvec = &get_cpu_var(activate_page_pvecs);
 
                get_page(page);
-               if (!pagevec_add(pvec, page))
+               if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_lru_move_fn(pvec, __activate_page, NULL);
                put_cpu_var(activate_page_pvecs);
        }
@@ -391,9 +391,8 @@ static void __lru_cache_add(struct page *page)
        struct pagevec *pvec = &get_cpu_var(lru_add_pvec);
 
        get_page(page);
-       if (!pagevec_space(pvec))
+       if (!pagevec_add(pvec, page) || PageCompound(page))
                __pagevec_lru_add(pvec);
-       pagevec_add(pvec, page);
        put_cpu_var(lru_add_pvec);
 }
 
@@ -628,7 +627,7 @@ void deactivate_file_page(struct page *page)
        if (likely(get_page_unless_zero(page))) {
                struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs);
 
-               if (!pagevec_add(pvec, page))
+               if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL);
                put_cpu_var(lru_deactivate_file_pvecs);
        }
@@ -648,7 +647,7 @@ void deactivate_page(struct page *page)
                struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs);
 
                get_page(page);
-               if (!pagevec_add(pvec, page))
+               if (!pagevec_add(pvec, page) || PageCompound(page))
                        pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL);
                put_cpu_var(lru_deactivate_pvecs);
        }
index 06b4df9..2808d55 100644 (file)
@@ -446,16 +446,27 @@ out_no_rpciod:
        return ERR_PTR(err);
 }
 
-struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
+static struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
                                        struct rpc_xprt *xprt)
 {
        struct rpc_clnt *clnt = NULL;
        struct rpc_xprt_switch *xps;
 
-       xps = xprt_switch_alloc(xprt, GFP_KERNEL);
-       if (xps == NULL)
-               return ERR_PTR(-ENOMEM);
-
+       if (args->bc_xprt && args->bc_xprt->xpt_bc_xps) {
+               WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+               xps = args->bc_xprt->xpt_bc_xps;
+               xprt_switch_get(xps);
+       } else {
+               xps = xprt_switch_alloc(xprt, GFP_KERNEL);
+               if (xps == NULL) {
+                       xprt_put(xprt);
+                       return ERR_PTR(-ENOMEM);
+               }
+               if (xprt->bc_xprt) {
+                       xprt_switch_get(xps);
+                       xprt->bc_xprt->xpt_bc_xps = xps;
+               }
+       }
        clnt = rpc_new_client(args, xps, xprt, NULL);
        if (IS_ERR(clnt))
                return clnt;
@@ -483,7 +494,6 @@ struct rpc_clnt *rpc_create_xprt(struct rpc_create_args *args,
 
        return clnt;
 }
-EXPORT_SYMBOL_GPL(rpc_create_xprt);
 
 /**
  * rpc_create - create an RPC client and transport with one call
@@ -509,6 +519,15 @@ struct rpc_clnt *rpc_create(struct rpc_create_args *args)
        };
        char servername[48];
 
+       if (args->bc_xprt) {
+               WARN_ON(args->protocol != XPRT_TRANSPORT_BC_TCP);
+               xprt = args->bc_xprt->xpt_bc_xprt;
+               if (xprt) {
+                       xprt_get(xprt);
+                       return rpc_create_xprt(args, xprt);
+               }
+       }
+
        if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS)
                xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS;
        if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT)
index f5572e3..4f01f63 100644 (file)
@@ -136,6 +136,8 @@ static void svc_xprt_free(struct kref *kref)
        /* See comment on corresponding get in xs_setup_bc_tcp(): */
        if (xprt->xpt_bc_xprt)
                xprt_put(xprt->xpt_bc_xprt);
+       if (xprt->xpt_bc_xps)
+               xprt_switch_put(xprt->xpt_bc_xps);
        xprt->xpt_ops->xpo_free(xprt);
        module_put(owner);
 }
index 2d3e0c4..7e2b2fa 100644 (file)
@@ -3057,6 +3057,7 @@ static struct rpc_xprt *xs_setup_bc_tcp(struct xprt_create *args)
                return xprt;
 
        args->bc_xprt->xpt_bc_xprt = NULL;
+       args->bc_xprt->xpt_bc_xps = NULL;
        xprt_put(xprt);
        ret = ERR_PTR(-EINVAL);
 out_err:
index 80aa6a3..735362c 100644 (file)
@@ -315,7 +315,7 @@ static struct sock *unix_find_socket_byinode(struct inode *i)
                    &unix_socket_table[i->i_ino & (UNIX_HASH_SIZE - 1)]) {
                struct dentry *dentry = unix_sk(s)->path.dentry;
 
-               if (dentry && d_backing_inode(dentry) == i) {
+               if (dentry && d_real_inode(dentry) == i) {
                        sock_hold(s);
                        goto found;
                }
@@ -911,7 +911,7 @@ static struct sock *unix_find_other(struct net *net,
                err = kern_path(sunname->sun_path, LOOKUP_FOLLOW, &path);
                if (err)
                        goto fail;
-               inode = d_backing_inode(path.dentry);
+               inode = d_real_inode(path.dentry);
                err = inode_permission(inode, MAY_WRITE);
                if (err)
                        goto put_fail;
@@ -1048,7 +1048,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                        goto out_up;
                }
                addr->hash = UNIX_HASH_SIZE;
-               hash = d_backing_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
+               hash = d_real_inode(dentry)->i_ino & (UNIX_HASH_SIZE - 1);
                spin_lock(&unix_table_lock);
                u->path = u_path;
                list = &unix_socket_table[hash];
index bd5a272..346fbf2 100644 (file)
@@ -597,7 +597,7 @@ int key_reject_and_link(struct key *key,
 
        mutex_unlock(&key_construction_mutex);
 
-       if (keyring)
+       if (keyring && link_ret == 0)
                __key_link_end(keyring, &key->index_key, edit);
 
        /* wake up anyone waiting for a key to be constructed */
index c0f8f61..172dacd 100644 (file)
@@ -420,6 +420,7 @@ static int dummy_hrtimer_stop(struct snd_pcm_substream *substream)
 
 static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
 {
+       hrtimer_cancel(&dpcm->timer);
        tasklet_kill(&dpcm->tasklet);
 }
 
index 87041dd..47a358f 100644 (file)
@@ -444,7 +444,7 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
        err = reg_raw_write(codec, reg, val);
        if (err == -EAGAIN) {
                err = snd_hdac_power_up_pm(codec);
-               if (!err)
+               if (err >= 0)
                        err = reg_raw_write(codec, reg, val);
                snd_hdac_power_down_pm(codec);
        }
@@ -470,7 +470,7 @@ static int __snd_hdac_regmap_read_raw(struct hdac_device *codec,
        err = reg_raw_read(codec, reg, val, uncached);
        if (err == -EAGAIN) {
                err = snd_hdac_power_up_pm(codec);
-               if (!err)
+               if (err >= 0)
                        err = reg_raw_read(codec, reg, val, uncached);
                snd_hdac_power_down_pm(codec);
        }
index 17fd817..0621920 100644 (file)
@@ -115,20 +115,20 @@ static int substream_free_pages(struct azx *chip,
 /*
  * Register access ops. Tegra HDA register access is DWORD only.
  */
-static void hda_tegra_writel(u32 value, u32 *addr)
+static void hda_tegra_writel(u32 value, u32 __iomem *addr)
 {
        writel(value, addr);
 }
 
-static u32 hda_tegra_readl(u32 *addr)
+static u32 hda_tegra_readl(u32 __iomem *addr)
 {
        return readl(addr);
 }
 
-static void hda_tegra_writew(u16 value, u16 *addr)
+static void hda_tegra_writew(u16 value, u16 __iomem  *addr)
 {
        unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
-       void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+       void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
        u32 v;
 
        v = readl(dword_addr);
@@ -137,20 +137,20 @@ static void hda_tegra_writew(u16 value, u16 *addr)
        writel(v, dword_addr);
 }
 
-static u16 hda_tegra_readw(u16 *addr)
+static u16 hda_tegra_readw(u16 __iomem *addr)
 {
        unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
-       void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+       void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
        u32 v;
 
        v = readl(dword_addr);
        return (v >> shift) & 0xffff;
 }
 
-static void hda_tegra_writeb(u8 value, u8 *addr)
+static void hda_tegra_writeb(u8 value, u8 __iomem *addr)
 {
        unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
-       void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+       void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
        u32 v;
 
        v = readl(dword_addr);
@@ -159,10 +159,10 @@ static void hda_tegra_writeb(u8 value, u8 *addr)
        writel(v, dword_addr);
 }
 
-static u8 hda_tegra_readb(u8 *addr)
+static u8 hda_tegra_readb(u8 __iomem *addr)
 {
        unsigned int shift = ((unsigned long)(addr) & 0x3) << 3;
-       void *dword_addr = (void *)((unsigned long)(addr) & ~0x3);
+       void __iomem *dword_addr = (void __iomem *)((unsigned long)(addr) & ~0x3);
        u32 v;
 
        v = readl(dword_addr);
index 0fe18ed..900bfbc 100644 (file)
@@ -5650,6 +5650,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
+       SND_PCI_QUIRK(0x17aa, 0x5050, "Thinkpad T560p", ALC292_FIXUP_TPT460),
+       SND_PCI_QUIRK(0x17aa, 0x5053, "Thinkpad T460", ALC292_FIXUP_TPT460),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -5831,6 +5833,10 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x12, 0x90a60160},
                {0x14, 0x90170120},
                {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60170},
+               {0x14, 0x90170120},
+               {0x21, 0x02211030}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS),
        SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
index c2b61c4..0bf5085 100644 (file)
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
     exit_unsupported
 fi
 
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
     echo "hist trigger is not supported"
     exit_unsupported
 fi
 
+reset_tracer
+do_reset
+
 echo "Test histogram with execname modifier"
 
 echo 'hist:keys=common_pid.execname' > events/sched/sched_process_fork/trigger
index b2902d4..a00184c 100644 (file)
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
     exit_unsupported
 fi
 
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
     echo "hist trigger is not supported"
     exit_unsupported
 fi
 
+reset_tracer
+do_reset
+
 echo "Test histogram basic tigger"
 
 echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
index 03c4a46..3478b00 100644 (file)
@@ -23,15 +23,14 @@ if [ ! -f events/sched/sched_process_fork/trigger ]; then
     exit_unsupported
 fi
 
-reset_tracer
-do_reset
-
-FEATURE=`grep hist events/sched/sched_process_fork/trigger`
-if [ -z "$FEATURE" ]; then
+if [ ! -f events/sched/sched_process_fork/hist ]; then
     echo "hist trigger is not supported"
     exit_unsupported
 fi
 
+reset_tracer
+do_reset
+
 reset_trigger
 
 echo "Test histogram multiple tiggers"
index 932ff57..00c4f65 100644 (file)
@@ -136,7 +136,7 @@ int check_compaction(unsigned long mem_free, unsigned int hugepage_size)
        printf("No of huge pages allocated = %d\n",
               (atoi(nr_hugepages)));
 
-       if (write(fd, initial_nr_hugepages, sizeof(initial_nr_hugepages))
+       if (write(fd, initial_nr_hugepages, strlen(initial_nr_hugepages))
            != strlen(initial_nr_hugepages)) {
                perror("Failed to write to /proc/sys/vm/nr_hugepages\n");
                goto close_fd;
index 6ba7455..6173ada 100644 (file)
@@ -1,6 +1,6 @@
 all:
 
-all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder
+all: ring virtio_ring_0_9 virtio_ring_poll virtio_ring_inorder noring
 
 CFLAGS += -Wall
 CFLAGS += -pthread -O2 -ggdb
@@ -15,11 +15,13 @@ ring: ring.o main.o
 virtio_ring_0_9: virtio_ring_0_9.o main.o
 virtio_ring_poll: virtio_ring_poll.o main.o
 virtio_ring_inorder: virtio_ring_inorder.o main.o
+noring: noring.o main.o
 clean:
        -rm main.o
        -rm ring.o ring
        -rm virtio_ring_0_9.o virtio_ring_0_9
        -rm virtio_ring_poll.o virtio_ring_poll
        -rm virtio_ring_inorder.o virtio_ring_inorder
+       -rm noring.o noring
 
 .PHONY: all clean
index 34e94c4..d83707a 100644 (file)
@@ -1,2 +1,6 @@
 Partial implementation of various ring layouts, useful to tune virtio design.
 Uses shared memory heavily.
+
+Typical use:
+
+# sh run-on-all.sh perf stat -r 10 --log-fd 1 -- ./ring
diff --git a/tools/virtio/ringtest/noring.c b/tools/virtio/ringtest/noring.c
new file mode 100644 (file)
index 0000000..eda2f48
--- /dev/null
@@ -0,0 +1,69 @@
+#define _GNU_SOURCE
+#include "main.h"
+#include <assert.h>
+
+/* stub implementation: useful for measuring overhead */
+void alloc_ring(void)
+{
+}
+
+/* guest side */
+int add_inbuf(unsigned len, void *buf, void *datap)
+{
+       return 0;
+}
+
+/*
+ * skb_array API provides no way for producer to find out whether a given
+ * buffer was consumed.  Our tests merely require that a successful get_buf
+ * implies that add_inbuf succeed in the past, and that add_inbuf will succeed,
+ * fake it accordingly.
+ */
+void *get_buf(unsigned *lenp, void **bufp)
+{
+       return "Buffer";
+}
+
+void poll_used(void)
+{
+}
+
+void disable_call()
+{
+       assert(0);
+}
+
+bool enable_call()
+{
+       assert(0);
+}
+
+void kick_available(void)
+{
+       assert(0);
+}
+
+/* host side */
+void disable_kick()
+{
+       assert(0);
+}
+
+bool enable_kick()
+{
+       assert(0);
+}
+
+void poll_avail(void)
+{
+}
+
+bool use_buf(unsigned *lenp, void **bufp)
+{
+       return true;
+}
+
+void call_used(void)
+{
+       assert(0);
+}
index 52b0f71..2e69ca8 100755 (executable)
@@ -3,10 +3,10 @@
 #use last CPU for host. Why not the first?
 #many devices tend to use cpu0 by default so
 #it tends to be busier
-HOST_AFFINITY=$(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n|tail -1)
+HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
 
 #run command on all cpus
-for cpu in $(cd /dev/cpu; ls|grep -v '[a-z]'|sort -n);
+for cpu in $(seq 0 $HOST_AFFINITY)
 do
        #Don't run guest and host on same CPU
        #It actually works ok if using signalling
index 1889163..7cf6e17 100644 (file)
@@ -492,7 +492,7 @@ static void slab_stats(struct slabinfo *s)
                        s->deactivate_to_head + s->deactivate_to_tail + s->deactivate_bypass;
 
        if (total) {
-               printf("\nSlab Deactivation             Ocurrences  %%\n");
+               printf("\nSlab Deactivation             Occurrences %%\n");
                printf("-------------------------------------------------\n");
                printf("Slab full                     %7lu  %3lu%%\n",
                        s->deactivate_full, (s->deactivate_full * 100) / total);
index 02e98f3..48bd520 100644 (file)
@@ -2941,7 +2941,7 @@ static long kvm_vm_ioctl(struct file *filp,
                if (copy_from_user(&routing, argp, sizeof(routing)))
                        goto out;
                r = -EINVAL;
-               if (routing.nr >= KVM_MAX_IRQ_ROUTES)
+               if (routing.nr > KVM_MAX_IRQ_ROUTES)
                        goto out;
                if (routing.flags)
                        goto out;