Merge branch 'x86-cpufeature-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 5 Aug 2014 00:12:45 +0000 (17:12 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 5 Aug 2014 00:12:45 +0000 (17:12 -0700)
Pull x86 cpufeature updates from Ingo Molnar:
 "The main changes in this cycle were:

   - Continued cleanups of CPU bugs mis-marked as 'missing features', by
     Borislav Petkov.

   - Detect the xsaves/xrstors feature and releated cleanup, by Fenghua
     Yu"

* 'x86-cpufeature-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, cpu: Kill cpu_has_mp
  x86, amd: Cleanup init_amd
  x86/cpufeature: Add bug flags to /proc/cpuinfo
  x86, cpufeature: Convert more "features" to bugs
  x86/xsaves: Detect xsaves/xrstors feature
  x86/cpufeature.h: Reformat x86 feature macros

2182 files changed:
.mailmap
CREDITS
Documentation/Changes
Documentation/DocBook/gadget.tmpl
Documentation/DocBook/genericirq.tmpl
Documentation/DocBook/kernel-locking.tmpl
Documentation/DocBook/libata.tmpl
Documentation/DocBook/media/Makefile
Documentation/DocBook/media_api.tmpl
Documentation/DocBook/mtdnand.tmpl
Documentation/DocBook/regulator.tmpl
Documentation/DocBook/uio-howto.tmpl
Documentation/DocBook/usb.tmpl
Documentation/DocBook/writing-an-alsa-driver.tmpl
Documentation/RCU/RTFP.txt
Documentation/RCU/rcuref.txt
Documentation/accounting/getdelays.c
Documentation/acpi/enumeration.txt
Documentation/arm64/booting.txt
Documentation/arm64/memory.txt
Documentation/cgroups/cgroups.txt
Documentation/cgroups/unified-hierarchy.txt
Documentation/cpu-freq/intel-pstate.txt
Documentation/devicetree/bindings/arm/armada-38x.txt
Documentation/devicetree/bindings/arm/exynos/power_domain.txt
Documentation/devicetree/bindings/arm/l2cc.txt
Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
Documentation/devicetree/bindings/arm/samsung/pmu.txt
Documentation/devicetree/bindings/ata/ahci-platform.txt
Documentation/devicetree/bindings/ata/ahci-st.txt [new file with mode: 0644]
Documentation/devicetree/bindings/ata/imx-sata.txt [new file with mode: 0644]
Documentation/devicetree/bindings/ata/tegra-sata.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/clk-palmas-clk32kg-clocks.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/clock-bindings.txt
Documentation/devicetree/bindings/clock/clps711x-clock.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/qcom,gcc.txt
Documentation/devicetree/bindings/clock/qcom,mmcc.txt
Documentation/devicetree/bindings/clock/rockchip,rk3188-cru.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/rockchip.txt
Documentation/devicetree/bindings/clock/st/st,clkgen-divmux.txt
Documentation/devicetree/bindings/clock/st/st,clkgen-mux.txt
Documentation/devicetree/bindings/clock/st/st,clkgen-pll.txt
Documentation/devicetree/bindings/clock/st/st,clkgen-prediv.txt
Documentation/devicetree/bindings/clock/st/st,clkgen-vcc.txt
Documentation/devicetree/bindings/clock/st/st,clkgen.txt
Documentation/devicetree/bindings/clock/st/st,flexgen.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/st/st,quadfs.txt
Documentation/devicetree/bindings/clock/sunxi.txt
Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
Documentation/devicetree/bindings/crypto/amd-ccp.txt [new file with mode: 0644]
Documentation/devicetree/bindings/crypto/qcom-qce.txt [new file with mode: 0644]
Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
Documentation/devicetree/bindings/i2c/i2c-rk3x.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/email-clients.txt
Documentation/filesystems/caching/operations.txt
Documentation/hwmon/ntc_thermistor
Documentation/input/event-codes.txt
Documentation/ioctl/ioctl-number.txt
Documentation/kbuild/makefiles.txt
Documentation/kernel-parameters.txt
Documentation/laptops/00-INDEX
Documentation/laptops/freefall.c [new file with mode: 0644]
Documentation/laptops/hpfall.c [deleted file]
Documentation/memory-barriers.txt
Documentation/memory-hotplug.txt
Documentation/ptp/testptp.c
Documentation/sound/alsa/HD-Audio-Models.txt
Documentation/sysctl/kernel.txt
Documentation/sysctl/vm.txt
Documentation/thermal/nouveau_thermal
Documentation/trace/ftrace-design.txt
Documentation/trace/ftrace.txt
Documentation/trace/postprocess/trace-vmscan-postprocess.pl
Documentation/virtual/kvm/api.txt
MAINTAINERS
Makefile
arch/alpha/include/asm/processor.h
arch/arc/include/asm/cache.h
arch/arc/include/asm/processor.h
arch/arc/include/uapi/asm/ptrace.h
arch/arc/kernel/ctx_sw_asm.S
arch/arc/kernel/devtree.c
arch/arc/kernel/head.S
arch/arc/kernel/perf_event.c
arch/arc/kernel/ptrace.c
arch/arc/kernel/smp.c
arch/arc/kernel/vmlinux.lds.S
arch/arc/mm/cache_arc700.c
arch/arm/Kconfig
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-evm.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/am335x-igep0033.dtsi
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/armada-375-db.dts
arch/arm/boot/dts/armada-380.dtsi
arch/arm/boot/dts/armada-385-db.dts
arch/arm/boot/dts/armada-385-rd.dts
arch/arm/boot/dts/armada-385.dtsi
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
arch/arm/boot/dts/at91sam9261.dtsi
arch/arm/boot/dts/at91sam9261ek.dts
arch/arm/boot/dts/at91sam9n12.dtsi
arch/arm/boot/dts/at91sam9x5.dtsi
arch/arm/boot/dts/dra7-evm.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra7xx-clocks.dtsi
arch/arm/boot/dts/exynos4.dtsi
arch/arm/boot/dts/exynos4210.dtsi
arch/arm/boot/dts/exynos4x12.dtsi
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/exynos5420.dtsi
arch/arm/boot/dts/hi3620.dtsi
arch/arm/boot/dts/imx51-babbage.dts
arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
arch/arm/boot/dts/imx53-m53evk.dts
arch/arm/boot/dts/imx6dl-hummingboard.dts
arch/arm/boot/dts/imx6q-gw51xx.dts
arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
arch/arm/boot/dts/imx6qdl-microsom.dtsi
arch/arm/boot/dts/imx6sl.dtsi
arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
arch/arm/boot/dts/omap3-beagle-xm.dts
arch/arm/boot/dts/omap3-evm-common.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/r8a7791.dtsi
arch/arm/boot/dts/ste-nomadik-s8815.dts
arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
arch/arm/boot/dts/stih415.dtsi
arch/arm/boot/dts/stih416-b2020-revE.dts [deleted file]
arch/arm/boot/dts/stih416-b2020e.dts [new file with mode: 0644]
arch/arm/boot/dts/stih416.dtsi
arch/arm/common/scoop.c
arch/arm/configs/bcm_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/mvebu_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/crypto/aesbs-glue.c
arch/arm/include/asm/ftrace.h
arch/arm/include/asm/mach/arch.h
arch/arm/include/asm/mcpm.h
arch/arm/include/asm/processor.h
arch/arm/include/asm/thread_info.h
arch/arm/kernel/devtree.c
arch/arm/kernel/iwmmxt.S
arch/arm/kernel/kgdb.c
arch/arm/kernel/kprobes-test-arm.c
arch/arm/kernel/kprobes-test.c
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/probes-arm.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/topology.c
arch/arm/mach-bcm/Kconfig
arch/arm/mach-berlin/Kconfig
arch/arm/mach-cns3xxx/Kconfig
arch/arm/mach-davinci/Kconfig
arch/arm/mach-exynos/Kconfig
arch/arm/mach-exynos/common.h
arch/arm/mach-exynos/exynos.c
arch/arm/mach-exynos/firmware.c
arch/arm/mach-exynos/hotplug.c
arch/arm/mach-exynos/mcpm-exynos.c
arch/arm/mach-exynos/platsmp.c
arch/arm/mach-exynos/pm.c
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-highbank/Kconfig
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/clk-gate2.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/clk-imx6sl.c
arch/arm/mach-integrator/Kconfig
arch/arm/mach-integrator/impd1.c
arch/arm/mach-integrator/integrator_ap.c
arch/arm/mach-integrator/integrator_cp.c
arch/arm/mach-keystone/Kconfig
arch/arm/mach-moxart/Kconfig
arch/arm/mach-mvebu/Kconfig
arch/arm/mach-mvebu/Makefile
arch/arm/mach-mvebu/board-v7.c
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-mvebu/headsmp-a9.S
arch/arm/mach-mvebu/pmsu.c
arch/arm/mach-mvebu/pmsu_ll.S [new file with mode: 0644]
arch/arm/mach-nomadik/Kconfig
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/clkt_dpll.c
arch/arm/mach-omap2/cm-regbits-34xx.h
arch/arm/mach-omap2/cm33xx.h
arch/arm/mach-omap2/common.h
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/dsp.c
arch/arm/mach-omap2/gpmc-nand.c
arch/arm/mach-omap2/gpmc.c
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/mux.c
arch/arm/mach-omap2/omap4-common.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_54xx_data.c
arch/arm/mach-omap2/omap_hwmod_7xx_data.c
arch/arm/mach-omap2/prm-regbits-34xx.h
arch/arm/mach-omap2/soc.h
arch/arm/mach-prima2/Kconfig
arch/arm/mach-qcom/Kconfig
arch/arm/mach-rockchip/Kconfig
arch/arm/mach-s3c24xx/Kconfig
arch/arm/mach-s3c64xx/Kconfig
arch/arm/mach-s5p64x0/Kconfig
arch/arm/mach-s5pc100/Kconfig
arch/arm/mach-s5pv210/Kconfig
arch/arm/mach-sa1100/collie.c
arch/arm/mach-shmobile/Kconfig
arch/arm/mach-spear/Kconfig
arch/arm/mach-sti/Kconfig
arch/arm/mach-sunxi/sunxi.c
arch/arm/mach-tegra/Kconfig
arch/arm/mach-u300/Kconfig
arch/arm/mach-ux500/Kconfig
arch/arm/mach-vexpress/Kconfig
arch/arm/mach-vt8500/Kconfig
arch/arm/mach-zynq/Kconfig
arch/arm/mm/Kconfig
arch/arm/mm/cache-l2x0.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/idmap.c
arch/arm/mm/mmu.c
arch/arm/mm/nommu.c
arch/arm/mm/proc-arm925.S
arch/arm/plat-samsung/Kconfig
arch/arm/xen/grant-table.c
arch/arm64/Kconfig
arch/arm64/Kconfig.debug
arch/arm64/Makefile
arch/arm64/boot/dts/apm-mustang.dts
arch/arm64/boot/dts/apm-storm.dtsi
arch/arm64/configs/defconfig
arch/arm64/crypto/Makefile
arch/arm64/crypto/aes-glue.c
arch/arm64/crypto/ghash-ce-core.S
arch/arm64/crypto/ghash-ce-glue.c
arch/arm64/include/asm/Kbuild
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/cachetype.h
arch/arm64/include/asm/cpu.h [new file with mode: 0644]
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/dma-mapping.h
arch/arm64/include/asm/fpsimdmacros.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/page.h
arch/arm64/include/asm/pgalloc.h
arch/arm64/include/asm/pgtable-2level-hwdef.h [deleted file]
arch/arm64/include/asm/pgtable-2level-types.h [deleted file]
arch/arm64/include/asm/pgtable-3level-hwdef.h [deleted file]
arch/arm64/include/asm/pgtable-3level-types.h [deleted file]
arch/arm64/include/asm/pgtable-hwdef.h
arch/arm64/include/asm/pgtable-types.h [new file with mode: 0644]
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/ptrace.h
arch/arm64/include/asm/stackprotector.h [new file with mode: 0644]
arch/arm64/include/asm/syscall.h
arch/arm64/include/asm/sysreg.h [new file with mode: 0644]
arch/arm64/include/asm/thread_info.h
arch/arm64/include/asm/tlb.h
arch/arm64/include/asm/tlbflush.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/include/uapi/asm/posix_types.h [new file with mode: 0644]
arch/arm64/include/uapi/asm/sigcontext.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/cpu_ops.c
arch/arm64/kernel/cpuinfo.c [new file with mode: 0644]
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/efi-entry.S
arch/arm64/kernel/efi-stub.c
arch/arm64/kernel/entry-fpsimd.S
arch/arm64/kernel/entry-ftrace.S
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/hyp-stub.S
arch/arm64/kernel/image.h [new file with mode: 0644]
arch/arm64/kernel/kuser32.S
arch/arm64/kernel/process.c
arch/arm64/kernel/psci.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/signal32.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/suspend.c
arch/arm64/kernel/sys_compat.c
arch/arm64/kernel/topology.c
arch/arm64/kernel/traps.c
arch/arm64/kernel/vdso.c
arch/arm64/kernel/vdso/Makefile
arch/arm64/kernel/vdso/vdso.lds.S
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/mm/copypage.c
arch/arm64/mm/fault.c
arch/arm64/mm/flush.c
arch/arm64/mm/init.c
arch/arm64/mm/ioremap.c
arch/arm64/mm/mmu.c
arch/avr32/include/asm/processor.h
arch/blackfin/Kconfig
arch/blackfin/configs/BF609-EZKIT_defconfig
arch/blackfin/include/asm/processor.h
arch/blackfin/kernel/ftrace-entry.S
arch/blackfin/kernel/perf_event.c
arch/blackfin/kernel/vmlinux.lds.S
arch/blackfin/mach-bf533/boards/blackstamp.c
arch/blackfin/mach-bf537/boards/cm_bf537e.c
arch/blackfin/mach-bf537/boards/cm_bf537u.c
arch/blackfin/mach-bf537/boards/tcm_bf537.c
arch/blackfin/mach-bf548/boards/ezkit.c
arch/blackfin/mach-bf561/boards/acvilon.c
arch/blackfin/mach-bf561/boards/cm_bf561.c
arch/blackfin/mach-bf561/boards/ezkit.c
arch/blackfin/mach-bf609/boards/ezkit.c
arch/blackfin/mach-bf609/include/mach/pm.h
arch/blackfin/mach-bf609/pm.c
arch/blackfin/mach-common/ints-priority.c
arch/c6x/include/asm/processor.h
arch/cris/include/asm/processor.h
arch/hexagon/include/asm/processor.h
arch/ia64/hp/common/sba_iommu.c
arch/ia64/include/asm/processor.h
arch/ia64/include/uapi/asm/fcntl.h
arch/ia64/pci/fixup.c
arch/ia64/sn/kernel/bte.c
arch/ia64/sn/kernel/setup.c
arch/m32r/include/asm/processor.h
arch/m68k/include/asm/processor.h
arch/m68k/include/asm/sun3_pgalloc.h
arch/m68k/kernel/head.S
arch/m68k/kernel/time.c
arch/metag/Kconfig
arch/metag/include/asm/processor.h
arch/metag/kernel/ftrace_stub.S
arch/metag/kernel/perf/perf_event.c
arch/microblaze/Kconfig
arch/microblaze/include/asm/processor.h
arch/microblaze/kernel/ftrace.c
arch/microblaze/kernel/mcount.S
arch/mips/Kconfig
arch/mips/include/asm/kvm_host.h
arch/mips/include/asm/processor.h
arch/mips/include/asm/r4kcache.h
arch/mips/include/asm/sigcontext.h
arch/mips/include/asm/uasm.h
arch/mips/include/uapi/asm/inst.h
arch/mips/include/uapi/asm/sigcontext.h
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/ftrace.c
arch/mips/kernel/irq-msc01.c
arch/mips/kernel/mcount.S
arch/mips/kernel/pm-cps.c
arch/mips/kernel/r4k_fpu.S
arch/mips/kernel/signal.c
arch/mips/kernel/signal32.c
arch/mips/kernel/smp-cps.c
arch/mips/kvm/Makefile
arch/mips/kvm/callback.c [new file with mode: 0644]
arch/mips/kvm/commpage.c [new file with mode: 0644]
arch/mips/kvm/commpage.h [new file with mode: 0644]
arch/mips/kvm/dyntrans.c [new file with mode: 0644]
arch/mips/kvm/emulate.c [new file with mode: 0644]
arch/mips/kvm/interrupt.c [new file with mode: 0644]
arch/mips/kvm/interrupt.h [new file with mode: 0644]
arch/mips/kvm/kvm_cb.c [deleted file]
arch/mips/kvm/kvm_locore.S [deleted file]
arch/mips/kvm/kvm_mips.c [deleted file]
arch/mips/kvm/kvm_mips_comm.h [deleted file]
arch/mips/kvm/kvm_mips_commpage.c [deleted file]
arch/mips/kvm/kvm_mips_dyntrans.c [deleted file]
arch/mips/kvm/kvm_mips_emul.c [deleted file]
arch/mips/kvm/kvm_mips_int.c [deleted file]
arch/mips/kvm/kvm_mips_int.h [deleted file]
arch/mips/kvm/kvm_mips_opcode.h [deleted file]
arch/mips/kvm/kvm_mips_stats.c [deleted file]
arch/mips/kvm/kvm_tlb.c [deleted file]
arch/mips/kvm/kvm_trap_emul.c [deleted file]
arch/mips/kvm/locore.S [new file with mode: 0644]
arch/mips/kvm/mips.c [new file with mode: 0644]
arch/mips/kvm/opcode.h [new file with mode: 0644]
arch/mips/kvm/stats.c [new file with mode: 0644]
arch/mips/kvm/tlb.c [new file with mode: 0644]
arch/mips/kvm/trace.h
arch/mips/kvm/trap_emul.c [new file with mode: 0644]
arch/mips/math-emu/ieee754.c
arch/mips/mm/uasm-micromips.c
arch/mips/mm/uasm-mips.c
arch/mips/mm/uasm.c
arch/mips/net/bpf_jit.c
arch/mn10300/include/asm/processor.h
arch/openrisc/include/asm/processor.h
arch/parisc/Kconfig
arch/parisc/include/asm/processor.h
arch/parisc/include/uapi/asm/signal.h
arch/parisc/kernel/ftrace.c
arch/parisc/kernel/hardware.c
arch/parisc/kernel/sys_parisc32.c
arch/parisc/kernel/syscall_table.S
arch/parisc/mm/init.c
arch/powerpc/Kconfig
arch/powerpc/Kconfig.debug
arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi
arch/powerpc/include/asm/code-patching.h
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/perf_event_server.h
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/swab.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/ftrace.c
arch/powerpc/kernel/idle_power7.S
arch/powerpc/kernel/iomap.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/pci-common.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/prom_init_check.sh
arch/powerpc/kernel/rtas_flash.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv_interrupts.S
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_interrupts.S
arch/powerpc/kvm/book3s_rmhandlers.S
arch/powerpc/kvm/book3s_rtas.c
arch/powerpc/kvm/e500_mmu_host.c
arch/powerpc/lib/mem_64.S
arch/powerpc/lib/sstep.c
arch/powerpc/mm/mmu_context_nohash.c
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/hv-24x7.c
arch/powerpc/perf/hv-gpci.c
arch/powerpc/perf/power8-pmu.c
arch/powerpc/platforms/cell/cbe_thermal.c
arch/powerpc/platforms/cell/spu_syscalls.c
arch/powerpc/platforms/cell/spufs/Makefile
arch/powerpc/platforms/cell/spufs/syscalls.c
arch/powerpc/platforms/powernv/Makefile
arch/powerpc/platforms/powernv/opal-elog.c
arch/powerpc/platforms/powernv/opal-takeover.S [deleted file]
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/reconfig.c
arch/powerpc/sysdev/dart_iommu.c
arch/s390/Kconfig
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/defconfig
arch/s390/include/asm/kvm_host.h
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/processor.h
arch/s390/include/asm/switch_to.h
arch/s390/include/uapi/asm/Kbuild
arch/s390/include/uapi/asm/kvm_perf.h [new file with mode: 0644]
arch/s390/include/uapi/asm/sie.h
arch/s390/include/uapi/asm/ucontext.h
arch/s390/kernel/compat_linux.h
arch/s390/kernel/head.S
arch/s390/kernel/mcount.S
arch/s390/kernel/mcount64.S
arch/s390/kernel/perf_cpum_cf.c
arch/s390/kernel/ptrace.c
arch/s390/kvm/diag.c
arch/s390/kvm/intercept.c
arch/s390/kvm/interrupt.c
arch/s390/kvm/kvm-s390.c
arch/s390/kvm/kvm-s390.h
arch/s390/kvm/sigp.c
arch/s390/pci/pci.c
arch/score/include/asm/processor.h
arch/sh/Kconfig
arch/sh/Makefile
arch/sh/include/asm/processor.h
arch/sh/kernel/ftrace.c
arch/sh/kernel/perf_event.c
arch/sh/lib/mcount.S
arch/sparc/Kconfig
arch/sparc/crypto/aes_glue.c
arch/sparc/include/asm/atomic_32.h
arch/sparc/include/asm/atomic_64.h
arch/sparc/include/asm/auxio.h
arch/sparc/include/asm/auxio_32.h
arch/sparc/include/asm/auxio_64.h
arch/sparc/include/asm/bitext.h
arch/sparc/include/asm/bitops_32.h
arch/sparc/include/asm/bitops_64.h
arch/sparc/include/asm/btext.h
arch/sparc/include/asm/bug.h
arch/sparc/include/asm/cacheflush_32.h
arch/sparc/include/asm/cacheflush_64.h
arch/sparc/include/asm/checksum_32.h
arch/sparc/include/asm/checksum_64.h
arch/sparc/include/asm/cmpxchg_32.h
arch/sparc/include/asm/cmpxchg_64.h
arch/sparc/include/asm/cpudata.h
arch/sparc/include/asm/cpudata_64.h
arch/sparc/include/asm/delay_32.h
arch/sparc/include/asm/delay_64.h
arch/sparc/include/asm/device.h
arch/sparc/include/asm/dma-mapping.h
arch/sparc/include/asm/ebus_dma.h
arch/sparc/include/asm/floppy_32.h
arch/sparc/include/asm/floppy_64.h
arch/sparc/include/asm/ftrace.h
arch/sparc/include/asm/highmem.h
arch/sparc/include/asm/hvtramp.h
arch/sparc/include/asm/hypervisor.h
arch/sparc/include/asm/idprom.h
arch/sparc/include/asm/io-unit.h
arch/sparc/include/asm/io_32.h
arch/sparc/include/asm/io_64.h
arch/sparc/include/asm/iommu_32.h
arch/sparc/include/asm/iommu_64.h
arch/sparc/include/asm/irq_32.h
arch/sparc/include/asm/irq_64.h
arch/sparc/include/asm/irqflags_32.h
arch/sparc/include/asm/kdebug_64.h
arch/sparc/include/asm/kgdb.h
arch/sparc/include/asm/kprobes.h
arch/sparc/include/asm/ldc.h
arch/sparc/include/asm/leon.h
arch/sparc/include/asm/leon_pci.h
arch/sparc/include/asm/mc146818rtc.h
arch/sparc/include/asm/mdesc.h
arch/sparc/include/asm/mmu_64.h
arch/sparc/include/asm/mmu_context_64.h
arch/sparc/include/asm/nmi.h
arch/sparc/include/asm/oplib_32.h
arch/sparc/include/asm/oplib_64.h
arch/sparc/include/asm/page.h
arch/sparc/include/asm/page_64.h
arch/sparc/include/asm/pci_64.h
arch/sparc/include/asm/pcic.h
arch/sparc/include/asm/pcr.h
arch/sparc/include/asm/pgalloc_32.h
arch/sparc/include/asm/pgalloc_64.h
arch/sparc/include/asm/pgtable_32.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/processor_32.h
arch/sparc/include/asm/processor_64.h
arch/sparc/include/asm/prom.h
arch/sparc/include/asm/ptrace.h
arch/sparc/include/asm/setup.h
arch/sparc/include/asm/sfp-machine_32.h
arch/sparc/include/asm/smp_32.h
arch/sparc/include/asm/smp_64.h
arch/sparc/include/asm/spitfire.h
arch/sparc/include/asm/stacktrace.h
arch/sparc/include/asm/starfire.h
arch/sparc/include/asm/string_32.h
arch/sparc/include/asm/string_64.h
arch/sparc/include/asm/switch_to_32.h
arch/sparc/include/asm/switch_to_64.h
arch/sparc/include/asm/syscalls.h
arch/sparc/include/asm/timer_32.h
arch/sparc/include/asm/timer_64.h
arch/sparc/include/asm/tlb_64.h
arch/sparc/include/asm/tlbflush_64.h
arch/sparc/include/asm/topology_64.h
arch/sparc/include/asm/trap_block.h
arch/sparc/include/asm/uaccess.h
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_64.h
arch/sparc/include/asm/vio.h
arch/sparc/include/asm/visasm.h
arch/sparc/include/asm/xor_64.h
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/Makefile
arch/sparc/kernel/audit.c
arch/sparc/kernel/auxio_32.c
arch/sparc/kernel/btext.c
arch/sparc/kernel/compat_audit.c
arch/sparc/kernel/cpu.c
arch/sparc/kernel/cpumap.h
arch/sparc/kernel/devices.c
arch/sparc/kernel/entry.h
arch/sparc/kernel/iommu.c
arch/sparc/kernel/iommu_common.h
arch/sparc/kernel/ioport.c
arch/sparc/kernel/irq.h
arch/sparc/kernel/irq_32.c
arch/sparc/kernel/kernel.h
arch/sparc/kernel/kgdb_64.c
arch/sparc/kernel/kprobes.c
arch/sparc/kernel/leon_kernel.c
arch/sparc/kernel/leon_pci.c
arch/sparc/kernel/leon_pci_grpci1.c
arch/sparc/kernel/leon_pci_grpci2.c
arch/sparc/kernel/leon_pmc.c
arch/sparc/kernel/leon_smp.c
arch/sparc/kernel/of_device_common.c
arch/sparc/kernel/pci.c
arch/sparc/kernel/pci_impl.h
arch/sparc/kernel/pci_sun4v.h
arch/sparc/kernel/pcic.c
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/process_32.c
arch/sparc/kernel/process_64.c
arch/sparc/kernel/prom.h
arch/sparc/kernel/prom_64.c
arch/sparc/kernel/psycho_common.h
arch/sparc/kernel/ptrace_32.c
arch/sparc/kernel/setup_32.c
arch/sparc/kernel/signal32.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/smp_32.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/sun4d_irq.c
arch/sparc/kernel/sys32.S
arch/sparc/kernel/sys_sparc32.c
arch/sparc/kernel/sys_sparc_32.c
arch/sparc/kernel/sys_sparc_64.c
arch/sparc/kernel/systbls.h
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/sparc/kernel/tadpole.c [deleted file]
arch/sparc/kernel/time_32.c
arch/sparc/kernel/traps_32.c
arch/sparc/kernel/traps_64.c
arch/sparc/kernel/unaligned_32.c
arch/sparc/kernel/unaligned_64.c
arch/sparc/kernel/windows.c
arch/sparc/lib/Makefile
arch/sparc/lib/mcount.S
arch/sparc/math-emu/sfp-util_32.h
arch/sparc/math-emu/sfp-util_64.h
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/sparc/mm/init_32.c
arch/sparc/mm/init_64.c
arch/sparc/mm/init_64.h
arch/sparc/mm/io-unit.c
arch/sparc/mm/iommu.c
arch/sparc/mm/leon_mm.c
arch/sparc/mm/mm_32.h [new file with mode: 0644]
arch/sparc/mm/srmmu.c
arch/sparc/mm/srmmu.h [deleted file]
arch/sparc/mm/tsb.c
arch/sparc/prom/misc_64.c
arch/tile/Kconfig
arch/tile/include/asm/processor.h
arch/tile/kernel/mcount_64.S
arch/um/kernel/tlb.c
arch/um/kernel/trap.c
arch/um/os-Linux/skas/process.c
arch/unicore32/Kconfig
arch/unicore32/include/asm/io.h
arch/unicore32/include/asm/pgtable.h
arch/unicore32/include/asm/processor.h
arch/unicore32/include/asm/ptrace.h
arch/unicore32/kernel/clock.c
arch/unicore32/kernel/ksyms.c
arch/unicore32/kernel/ksyms.h
arch/unicore32/kernel/module.c
arch/unicore32/kernel/process.c
arch/unicore32/kernel/setup.c
arch/unicore32/mm/alignment.c
arch/unicore32/mm/proc-syms.c
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/code16gcc.h
arch/x86/boot/compressed/aslr.c
arch/x86/boot/header.S
arch/x86/boot/tools/build.c
arch/x86/crypto/Makefile
arch/x86/crypto/aes_ctrby8_avx-x86_64.S [new file with mode: 0644]
arch/x86/crypto/aesni-intel_glue.c
arch/x86/crypto/crc32c-pcl-intel-asm_64.S
arch/x86/crypto/des3_ede-asm_64.S [new file with mode: 0644]
arch/x86/crypto/des3_ede_glue.c [new file with mode: 0644]
arch/x86/crypto/sha512_ssse3_glue.c
arch/x86/include/asm/barrier.h
arch/x86/include/asm/cmpxchg.h
arch/x86/include/asm/cmpxchg_32.h
arch/x86/include/asm/cmpxchg_64.h
arch/x86/include/asm/ftrace.h
arch/x86/include/asm/irq.h
arch/x86/include/asm/irqflags.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mc146818rtc.h
arch/x86/include/asm/mutex_32.h
arch/x86/include/asm/percpu.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/ptrace.h
arch/x86/include/asm/qrwlock.h
arch/x86/include/asm/vga.h
arch/x86/include/asm/vmx.h
arch/x86/include/uapi/asm/Kbuild
arch/x86/include/uapi/asm/kvm.h
arch/x86/include/uapi/asm/kvm_perf.h [new file with mode: 0644]
arch/x86/include/uapi/asm/msr-index.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/hw_nmi.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_amd_uncore.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/espfix_64.c
arch/x86/kernel/ftrace.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/mcount_64.S
arch/x86/kernel/paravirt_patch_64.c
arch/x86/kernel/resource.c
arch/x86/kernel/signal.c
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/kvm/cpuid.h
arch/x86/kvm/emulate.c
arch/x86/kvm/lapic.c
arch/x86/kvm/mmutrace.h
arch/x86/kvm/pmu.c
arch/x86/kvm/svm.c
arch/x86/kvm/trace.h
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h
arch/x86/pci/fixup.c
arch/x86/pci/i386.c
arch/x86/power/cpu.c
arch/x86/um/asm/processor.h
arch/x86/vdso/Makefile
arch/x86/vdso/vclock_gettime.c
arch/x86/vdso/vdso-fakesections.c
arch/x86/vdso/vdso-layout.lds.S
arch/x86/vdso/vdso.lds.S
arch/x86/vdso/vdso2c.c
arch/x86/vdso/vdso2c.h
arch/x86/vdso/vdso32/vdso-fakesections.c [new file with mode: 0644]
arch/x86/vdso/vdsox32.lds.S
arch/x86/vdso/vma.c
arch/x86/xen/enlighten.c
arch/x86/xen/grant-table.c
arch/x86/xen/setup.c
arch/x86/xen/xen-ops.h
arch/xtensa/include/asm/processor.h
arch/xtensa/kernel/vectors.S
arch/xtensa/kernel/vmlinux.lds.S
arch/xtensa/mm/init.c
block/bio.c
block/blk-cgroup.c
block/blk-cgroup.h
block/blk-core.c
block/blk-flush.c
block/blk-merge.c
block/blk-mq-tag.c
block/blk-mq-tag.h
block/blk-mq.c
block/blk-tag.c
block/blk-throttle.c
block/blk.h
block/compat_ioctl.c
block/elevator.c
crypto/Kconfig
crypto/Makefile
crypto/af_alg.c
crypto/algapi.c
crypto/cryptd.c
crypto/des_generic.c
crypto/drbg.c [new file with mode: 0644]
crypto/eseqiv.c
crypto/gcm.c
crypto/lzo.c
crypto/seqiv.c
crypto/tcrypt.c
crypto/testmgr.c
crypto/testmgr.h
drivers/acpi/ac.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpi_pnp.c
drivers/acpi/battery.c
drivers/acpi/ec.c
drivers/acpi/osl.c
drivers/acpi/resource.c
drivers/acpi/tables.c
drivers/acpi/video.c
drivers/acpi/video_detect.c
drivers/ata/Kconfig
drivers/ata/Makefile
drivers/ata/acard-ahci.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_da850.c
drivers/ata/ahci_imx.c
drivers/ata/ahci_mvebu.c
drivers/ata/ahci_platform.c
drivers/ata/ahci_st.c
drivers/ata/ahci_sunxi.c
drivers/ata/ahci_tegra.c [new file with mode: 0644]
drivers/ata/ahci_xgene.c
drivers/ata/libahci.c
drivers/ata/libahci_platform.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/pata_ep93xx.c
drivers/ata/pata_samsung_cf.c
drivers/ata/sata_fsl.c
drivers/ata/sata_highbank.c
drivers/ata/sata_sil24.c
drivers/base/dma-contiguous.c
drivers/base/platform.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_receiver.c
drivers/block/floppy.c
drivers/block/null_blk.c
drivers/block/rbd.c
drivers/block/zram/zram_drv.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_h5.c
drivers/bus/Kconfig
drivers/char/hw_random/core.c
drivers/char/hw_random/virtio-rng.c
drivers/char/i8k.c
drivers/char/random.c
drivers/clk/Kconfig
drivers/clk/Makefile
drivers/clk/at91/clk-main.c
drivers/clk/clk-clps711x.c [new file with mode: 0644]
drivers/clk/clk-composite.c
drivers/clk/clk-conf.c [new file with mode: 0644]
drivers/clk/clk-palmas.c [new file with mode: 0644]
drivers/clk/clk-ppc-corenet.c
drivers/clk/clk-s2mps11.c
drivers/clk/clk.c
drivers/clk/clkdev.c
drivers/clk/qcom/Kconfig
drivers/clk/qcom/Makefile
drivers/clk/qcom/clk-pll.c
drivers/clk/qcom/clk-pll.h
drivers/clk/qcom/clk-rcg.c
drivers/clk/qcom/clk-rcg.h
drivers/clk/qcom/common.c
drivers/clk/qcom/common.h
drivers/clk/qcom/gcc-apq8084.c [new file with mode: 0644]
drivers/clk/qcom/gcc-ipq806x.c [new file with mode: 0644]
drivers/clk/qcom/gcc-msm8960.c
drivers/clk/qcom/mmcc-apq8084.c [new file with mode: 0644]
drivers/clk/qcom/mmcc-msm8960.c
drivers/clk/qcom/mmcc-msm8974.c
drivers/clk/rockchip/Makefile
drivers/clk/rockchip/clk-pll.c [new file with mode: 0644]
drivers/clk/rockchip/clk-rk3188.c [new file with mode: 0644]
drivers/clk/rockchip/clk-rk3288.c [new file with mode: 0644]
drivers/clk/rockchip/clk.c [new file with mode: 0644]
drivers/clk/rockchip/clk.h [new file with mode: 0644]
drivers/clk/rockchip/softrst.c [new file with mode: 0644]
drivers/clk/samsung/Makefile
drivers/clk/samsung/clk-exynos-clkout.c [new file with mode: 0644]
drivers/clk/samsung/clk-exynos3250.c
drivers/clk/samsung/clk-exynos4.c
drivers/clk/samsung/clk-exynos5250.c
drivers/clk/samsung/clk-exynos5260.c
drivers/clk/samsung/clk-exynos5410.c
drivers/clk/samsung/clk-exynos5420.c
drivers/clk/samsung/clk-exynos5440.c
drivers/clk/samsung/clk-s3c2410.c
drivers/clk/samsung/clk-s3c2412.c
drivers/clk/samsung/clk-s3c2443.c
drivers/clk/samsung/clk-s3c64xx.c
drivers/clk/samsung/clk.c
drivers/clk/samsung/clk.h
drivers/clk/spear/spear1310_clock.c
drivers/clk/spear/spear1340_clock.c
drivers/clk/spear/spear3xx_clock.c
drivers/clk/st/Makefile
drivers/clk/st/clk-flexgen.c [new file with mode: 0644]
drivers/clk/st/clkgen-fsyn.c
drivers/clk/st/clkgen-mux.c
drivers/clk/st/clkgen-pll.c
drivers/clk/sunxi/Makefile
drivers/clk/sunxi/clk-a20-gmac.c
drivers/clk/sunxi/clk-factors.c
drivers/clk/sunxi/clk-factors.h
drivers/clk/sunxi/clk-sun6i-apb0-gates.c
drivers/clk/sunxi/clk-sun6i-apb0.c
drivers/clk/sunxi/clk-sun6i-ar100.c
drivers/clk/sunxi/clk-sun8i-apb0.c [new file with mode: 0644]
drivers/clk/sunxi/clk-sunxi.c
drivers/clk/tegra/clk-pll.c
drivers/clk/tegra/clk-tegra-periph.c
drivers/clk/tegra/clk-tegra114.c
drivers/clk/tegra/clk-tegra124.c
drivers/clk/tegra/clk.c
drivers/clk/ti/apll.c
drivers/clk/ti/clk-7xx.c
drivers/clk/ti/dpll.c
drivers/clk/ti/mux.c
drivers/clocksource/exynos_mct.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Makefile
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/sa1110-cpufreq.c
drivers/cpuidle/cpuidle-armada-370-xp.c
drivers/crypto/Kconfig
drivers/crypto/Makefile
drivers/crypto/amcc/crypto4xx_core.c
drivers/crypto/atmel-sha.c
drivers/crypto/atmel-tdes.c
drivers/crypto/caam/caamalg.c
drivers/crypto/caam/caamhash.c
drivers/crypto/caam/caamrng.c
drivers/crypto/caam/ctrl.c
drivers/crypto/caam/desc.h
drivers/crypto/caam/intern.h
drivers/crypto/caam/jr.c
drivers/crypto/caam/regs.h
drivers/crypto/ccp/Makefile
drivers/crypto/ccp/ccp-dev.c
drivers/crypto/ccp/ccp-dev.h
drivers/crypto/ccp/ccp-ops.c
drivers/crypto/ccp/ccp-pci.c
drivers/crypto/ccp/ccp-platform.c [new file with mode: 0644]
drivers/crypto/nx/nx-842.c
drivers/crypto/qat/Kconfig [new file with mode: 0644]
drivers/crypto/qat/Makefile [new file with mode: 0644]
drivers/crypto/qat/qat_common/Makefile [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_accel_devices.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_accel_engine.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_aer.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_cfg.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_cfg.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_cfg_common.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_cfg_strings.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_cfg_user.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_common_drv.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_ctl_drv.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_dev_mgr.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_init.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_transport.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_transport.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_transport_access_macros.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_transport_debug.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/adf_transport_internal.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/icp_qat_fw.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/icp_qat_fw_la.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/icp_qat_hal.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/icp_qat_hw.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/icp_qat_uclo.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/qat_algs.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/qat_crypto.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/qat_crypto.h [new file with mode: 0644]
drivers/crypto/qat/qat_common/qat_hal.c [new file with mode: 0644]
drivers/crypto/qat/qat_common/qat_uclo.c [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/Makefile [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/adf_admin.c [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/adf_drv.c [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/adf_drv.h [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/adf_isr.c [new file with mode: 0644]
drivers/crypto/qat/qat_dh895xcc/qat_admin.c [new file with mode: 0644]
drivers/crypto/qce/Makefile [new file with mode: 0644]
drivers/crypto/qce/ablkcipher.c [new file with mode: 0644]
drivers/crypto/qce/cipher.h [new file with mode: 0644]
drivers/crypto/qce/common.c [new file with mode: 0644]
drivers/crypto/qce/common.h [new file with mode: 0644]
drivers/crypto/qce/core.c [new file with mode: 0644]
drivers/crypto/qce/core.h [new file with mode: 0644]
drivers/crypto/qce/dma.c [new file with mode: 0644]
drivers/crypto/qce/dma.h [new file with mode: 0644]
drivers/crypto/qce/regs-v5.h [new file with mode: 0644]
drivers/crypto/qce/sha.c [new file with mode: 0644]
drivers/crypto/qce/sha.h [new file with mode: 0644]
drivers/crypto/ux500/cryp/cryp_core.c
drivers/dma/cppi41.c
drivers/dma/imx-sdma.c
drivers/edac/Kconfig
drivers/edac/Makefile
drivers/edac/edac_module.c
drivers/edac/ie31200_edac.c [new file with mode: 0644]
drivers/edac/mce_amd.c
drivers/edac/x38_edac.c
drivers/firewire/Kconfig
drivers/firewire/ohci.c
drivers/firmware/efi/efi-pstore.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/fdt.c
drivers/gpio/gpio-mcp23s08.c
drivers/gpio/gpio-rcar.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_modeset_lock.c
drivers/gpu/drm/exynos/exynos_drm_dpi.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/exynos/regs-mixer.h
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dsi_cmd.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/hdmi/hdmi.h
drivers/gpu/drm/msm/hdmi/hdmi_connector.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/msm/msm_mmu.h
drivers/gpu/drm/nouveau/Makefile
drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
drivers/gpu/drm/nouveau/core/engine/disp/base.c
drivers/gpu/drm/nouveau/core/engine/disp/dport.c
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h
drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fbcon.h
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/qxl/qxl_irq.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/cypress_dpm.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_reg.h
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/ni_dpm.c
drivers/gpu/drm/radeon/r500_reg.h
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770_dpm.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/trinity_dpm.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/hid/Kconfig
drivers/hid/hid-ids.h
drivers/hid/hid-rmi.c
drivers/hid/hid-sensor-hub.c
drivers/hid/usbhid/hid-quirks.c
drivers/hv/connection.c
drivers/hv/hv_fcopy.c
drivers/hv/hv_kvp.c
drivers/hv/hv_util.c
drivers/hwmon/Kconfig
drivers/hwmon/adc128d818.c
drivers/hwmon/adm1021.c
drivers/hwmon/adm1029.c
drivers/hwmon/adm1031.c
drivers/hwmon/adt7470.c
drivers/hwmon/amc6821.c
drivers/hwmon/da9052-hwmon.c
drivers/hwmon/da9055-hwmon.c
drivers/hwmon/emc2103.c
drivers/hwmon/gpio-fan.c
drivers/hwmon/ntc_thermistor.c
drivers/hwmon/smsc47m192.c
drivers/hwmon/w83l786ng.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/Makefile
drivers/i2c/busses/i2c-rk3x.c [new file with mode: 0644]
drivers/i2c/busses/i2c-sun6i-p2wi.c [new file with mode: 0644]
drivers/i2c/i2c-core.c
drivers/i2c/muxes/Kconfig
drivers/ide/Kconfig
drivers/ide/ide-probe.c
drivers/iio/accel/bma180.c
drivers/iio/accel/hid-sensor-accel-3d.c
drivers/iio/accel/mma8452.c
drivers/iio/adc/ad799x.c
drivers/iio/adc/at91_adc.c
drivers/iio/adc/men_z188_adc.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/adc/twl4030-madc.c
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
drivers/iio/gyro/hid-sensor-gyro-3d.c
drivers/iio/industrialio-buffer.c
drivers/iio/industrialio-event.c
drivers/iio/inkern.c
drivers/iio/light/hid-sensor-als.c
drivers/iio/light/hid-sensor-prox.c
drivers/iio/light/tcs3472.c
drivers/iio/magnetometer/ak8975.c
drivers/iio/magnetometer/hid-sensor-magn-3d.c
drivers/iio/pressure/hid-sensor-press.c
drivers/iio/pressure/mpl3115.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/mlx5/qp.c
drivers/input/input.c
drivers/input/keyboard/st-keyscan.c
drivers/input/misc/sirfsoc-onkey.c
drivers/input/mouse/synaptics.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/tablet/wacom_wac.c
drivers/input/touchscreen/ti_am335x_tsc.c
drivers/iommu/amd_iommu_v2.c
drivers/iommu/fsl_pamu.c
drivers/iommu/fsl_pamu_domain.c
drivers/iommu/intel-iommu.c
drivers/irqchip/Kconfig
drivers/irqchip/Makefile
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-brcmstb-l2.c
drivers/irqchip/irq-gic-common.c [new file with mode: 0644]
drivers/irqchip/irq-gic-common.h [new file with mode: 0644]
drivers/irqchip/irq-gic-v3.c [new file with mode: 0644]
drivers/irqchip/irq-gic.c
drivers/irqchip/spear-shirq.c
drivers/isdn/gigaset/bas-gigaset.c
drivers/isdn/hisax/Kconfig
drivers/isdn/hisax/l3ni1.c
drivers/isdn/i4l/isdn_ppp.c
drivers/macintosh/smu.c
drivers/md/dm-bufio.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-cache-target.c
drivers/md/dm-crypt.c
drivers/md/dm-io.c
drivers/md/dm-mpath.c
drivers/md/dm-snap.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-zero.c
drivers/md/dm.c
drivers/md/md.c
drivers/media/dvb-frontends/si2168.c
drivers/media/dvb-frontends/si2168_priv.h
drivers/media/dvb-frontends/tda10071.c
drivers/media/dvb-frontends/tda10071_priv.h
drivers/media/pci/saa7134/saa7134-empress.c
drivers/media/platform/davinci/vpif_capture.c
drivers/media/platform/davinci/vpif_display.c
drivers/media/tuners/si2157.c
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
drivers/media/usb/gspca/pac7302.c
drivers/media/usb/hdpvr/hdpvr-video.c
drivers/media/v4l2-core/v4l2-dv-timings.c
drivers/memstick/host/rtsx_pci_ms.c
drivers/mfd/Kconfig
drivers/mfd/ab8500-core.c
drivers/misc/Kconfig
drivers/misc/vexpress-syscfg.c
drivers/misc/vmw_balloon.c
drivers/mtd/chips/cfi_cmdset_0001.c
drivers/mtd/devices/elm.c
drivers/mtd/nand/nand_base.c
drivers/mtd/ubi/fastmap.c
drivers/net/bonding/bond_main.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/slcan.c
drivers/net/ethernet/allwinner/sun4i-emac.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/dec/tulip/timer.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_hw.h
drivers/net/ethernet/intel/igb/e1000_i210.c
drivers/net/ethernet/intel/igb/e1000_i210.h
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/tile/tilegx.c
drivers/net/fddi/defxx.c
drivers/net/hyperv/netvsc.c
drivers/net/ieee802154/at86rf230.c
drivers/net/phy/at803x.c
drivers/net/phy/dp83640.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/ppp/ppp_generic.c
drivers/net/ppp/pppoe.c
drivers/net/slip/slip.c
drivers/net/slip/slip.h
drivers/net/usb/cdc_ether.c
drivers/net/usb/hso.c
drivers/net/usb/huawei_cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/smsc95xx.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxlan.c
drivers/net/wan/farsync.c
drivers/net/wan/x25_asy.c
drivers/net/wireless/ath/ath10k/core.c
drivers/net/wireless/ath/ath10k/htt_rx.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/b43/Kconfig
drivers/net/wireless/b43/main.c
drivers/net/wireless/b43/xmit.c
drivers/net/wireless/brcm80211/brcmfmac/usb.c
drivers/net/wireless/iwlwifi/dvm/rxon.c
drivers/net/wireless/iwlwifi/iwl-fw.h
drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/pcie/drv.c
drivers/net/wireless/mwifiex/11n_aggr.c
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/main.c
drivers/net/wireless/mwifiex/pcie.c
drivers/net/wireless/mwifiex/sta_tx.c
drivers/net/wireless/mwifiex/tdls.c
drivers/net/wireless/mwifiex/txrx.c
drivers/net/wireless/mwifiex/uap_txrx.c
drivers/net/wireless/mwifiex/util.h
drivers/net/wireless/rt2x00/rt2500pci.c
drivers/net/wireless/rt2x00/rt2800usb.c
drivers/net/wireless/rt2x00/rt2x00.h
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/net/wireless/rt2x00/rt2x00mac.c
drivers/net/wireless/rt2x00/rt2x00usb.h
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/of/base.c
drivers/of/fdt.c
drivers/of/of_mdio.c
drivers/of/platform.c
drivers/parport/Kconfig
drivers/pci/host/pci-host-generic.c
drivers/pci/host/pci-mvebu.c
drivers/pci/host/pci-tegra.c
drivers/pci/host/pcie-rcar.c
drivers/pci/hotplug/cpqphp_sysfs.c
drivers/pci/hotplug/pciehp.h
drivers/pci/hotplug/pciehp_core.c
drivers/pci/hotplug/pciehp_hpc.c
drivers/pci/msi.c
drivers/pci/pci-label.c
drivers/pci/pci.c
drivers/pci/pcie/portdrv_pci.c
drivers/pci/quirks.c
drivers/pci/setup-bus.c
drivers/pci/setup-res.c
drivers/phy/Kconfig
drivers/phy/phy-core.c
drivers/phy/phy-omap-usb2.c
drivers/phy/phy-samsung-usb2.c
drivers/pinctrl/berlin/berlin.c
drivers/pinctrl/pinctrl-st.c
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/pnp/pnpacpi/core.c
drivers/ptp/Kconfig
drivers/rapidio/devices/tsi721_dma.c
drivers/regulator/as3722-regulator.c
drivers/regulator/bcm590xx-regulator.c
drivers/regulator/ltc3589.c
drivers/regulator/palmas-regulator.c
drivers/regulator/tps65218-regulator.c
drivers/remoteproc/Kconfig
drivers/rtc/rtc-puv3.c
drivers/s390/block/dcssblk.c
drivers/s390/char/Makefile
drivers/s390/char/raw3270.c
drivers/s390/char/sclp_vt220.c
drivers/s390/char/vmlogrdr.c
drivers/s390/char/vmwatchdog.c [deleted file]
drivers/s390/cio/airq.c
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/cio.c
drivers/s390/cio/device.c
drivers/s390/cio/qdio_debug.c
drivers/s390/cio/qdio_debug.h
drivers/s390/cio/qdio_main.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/zcrypt_api.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/mvsas/mv_94xx.c
drivers/scsi/mvsas/mv_94xx.h
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_trace.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/sd.c
drivers/scsi/virtio_scsi.c
drivers/spi/spi-pxa2xx-dma.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-qup.c
drivers/spi/spi-sh-sci.c
drivers/spi/spi.c
drivers/staging/android/timed_output.c
drivers/staging/comedi/Kconfig
drivers/staging/iio/Kconfig
drivers/staging/iio/adc/ad7291.c
drivers/staging/iio/adc/mxs-lradc.c
drivers/staging/iio/light/tsl2x7x_core.c
drivers/staging/imx-drm/parallel-display.c
drivers/staging/media/omap4iss/Kconfig
drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
drivers/staging/rtl8723au/os_dep/os_intfs.c
drivers/staging/rtl8723au/os_dep/usb_intf.c
drivers/staging/tidspbridge/core/tiomap3430.c
drivers/staging/vt6655/bssdb.c
drivers/staging/vt6655/device_main.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/loopback/tcm_loop.c
drivers/target/target_core_device.c
drivers/target/target_core_tpg.c
drivers/tc/tc.c
drivers/thermal/imx_thermal.c
drivers/thermal/of-thermal.c
drivers/thermal/thermal_hwmon.c
drivers/thermal/ti-soc-thermal/ti-bandgap.c
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_early.c
drivers/tty/serial/altera_uart.c
drivers/tty/serial/amba-pl010.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/arc_uart.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/bcm63xx_uart.c
drivers/tty/serial/bfin_uart.c
drivers/tty/serial/dz.c
drivers/tty/serial/earlycon.c
drivers/tty/serial/efm32-uart.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/imx.c
drivers/tty/serial/ip22zilog.c
drivers/tty/serial/m32r_sio.c
drivers/tty/serial/max310x.c
drivers/tty/serial/mcf.c
drivers/tty/serial/mfd.c
drivers/tty/serial/mpsc.c
drivers/tty/serial/msm_serial.c
drivers/tty/serial/mxs-auart.c
drivers/tty/serial/netx-serial.c
drivers/tty/serial/pmac_zilog.c
drivers/tty/serial/pnx8xxx_uart.c
drivers/tty/serial/pxa.c
drivers/tty/serial/samsung.c
drivers/tty/serial/sb1250-duart.c
drivers/tty/serial/sccnxp.c
drivers/tty/serial/serial_ks8695.c
drivers/tty/serial/serial_txx9.c
drivers/tty/serial/sirfsoc_uart.c
drivers/tty/serial/st-asc.c
drivers/tty/serial/sunsab.c
drivers/tty/serial/sunsu.c
drivers/tty/serial/sunzilog.c
drivers/tty/serial/ucc_uart.c
drivers/tty/serial/vr41xx_siu.c
drivers/tty/serial/zs.c
drivers/tty/vt/vt.c
drivers/uio/uio.c
drivers/usb/chipidea/udc.c
drivers/usb/core/hub.c
drivers/usb/core/hub.h
drivers/usb/core/port.c
drivers/usb/dwc3/Kconfig
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/configfs.h
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/f_rndis.c
drivers/usb/gadget/gr_udc.c
drivers/usb/gadget/inode.c
drivers/usb/gadget/u_ether.c
drivers/usb/host/Kconfig
drivers/usb/host/pci-quirks.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/misc/usbtest.c
drivers/usb/musb/musb_am335x.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_cppi41.c
drivers/usb/musb/musb_dsps.c
drivers/usb/musb/ux500.c
drivers/usb/phy/phy-msm-usb.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/storage/scsiglue.c
drivers/usb/storage/unusual_devs.h
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/video/console/dummycon.c
drivers/video/console/vgacon.c
drivers/video/fbdev/atmel_lcdfb.c
drivers/video/fbdev/bfin_adv7393fb.c
drivers/video/fbdev/efifb.c
drivers/video/fbdev/offb.c
drivers/video/fbdev/omap2/dss/omapdss-boot-init.c
drivers/video/fbdev/vt8500lcdfb.c
drivers/w1/masters/mxc_w1.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/diag288_wdt.c [new file with mode: 0644]
drivers/xen/balloon.c
drivers/xen/grant-table.c
drivers/xen/manage.c
drivers/zorro/names.c
firmware/Makefile
fs/afs/main.c
fs/aio.c
fs/autofs4/inode.c
fs/btrfs/compression.c
fs/btrfs/ctree.h
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/extent_map.c
fs/btrfs/extent_map.h
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/locking.c
fs/btrfs/ordered-data.c
fs/btrfs/print-tree.c
fs/btrfs/raid56.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/sysfs.c
fs/btrfs/sysfs.h
fs/btrfs/transaction.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/btrfs/zlib.c
fs/buffer.c
fs/cifs/cifs_unicode.c
fs/cifs/cifsfs.c
fs/cifs/connect.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/link.c
fs/cifs/misc.c
fs/coredump.c
fs/direct-io.c
fs/eventpoll.c
fs/ext4/balloc.c
fs/ext4/extents_status.c
fs/ext4/ialloc.c
fs/ext4/indirect.c
fs/ext4/mballoc.c
fs/ext4/super.c
fs/f2fs/data.c
fs/f2fs/dir.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/inode.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/segment.c
fs/f2fs/super.c
fs/fs-writeback.c
fs/fscache/cookie.c
fs/fscache/internal.h
fs/fscache/main.c
fs/fscache/page.c
fs/fuse/dev.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/inode.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/lock_dlm.c
fs/gfs2/ops_fstype.c
fs/gfs2/recovery.c
fs/gfs2/rgrp.c
fs/gfs2/super.c
fs/inode.c
fs/jbd2/transaction.c
fs/kernfs/file.c
fs/kernfs/mount.c
fs/locks.c
fs/mbcache.c
fs/namei.c
fs/nfs/direct.c
fs/nfs/file.c
fs/nfs/filelayout/filelayoutdev.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs3acl.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4namespace.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4state.c
fs/nfs/pagelist.c
fs/nfs/pnfs.c
fs/nfs/write.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/dlm/dlmthread.c
fs/ocfs2/dlm/dlmunlock.c
fs/ocfs2/namei.c
fs/ocfs2/ocfs2_trace.h
fs/ocfs2/refcounttree.c
fs/ocfs2/super.c
fs/open.c
fs/proc/stat.c
fs/quota/dquot.c
fs/seq_file.c
fs/xattr.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap.h
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_bmap_util.h
fs/xfs/xfs_btree.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_sb.c
include/acpi/processor.h
include/acpi/video.h
include/asm-generic/io-64-nonatomic-hi-lo.h
include/asm-generic/io-64-nonatomic-lo-hi.h
include/asm-generic/percpu.h
include/asm-generic/vmlinux.lds.h
include/crypto/aead.h
include/crypto/algapi.h
include/crypto/des.h
include/crypto/drbg.h [new file with mode: 0644]
include/crypto/hash.h
include/crypto/internal/skcipher.h
include/crypto/scatterwalk.h
include/crypto/skcipher.h
include/drm/i915_pciids.h
include/drm/i915_powerwell.h
include/dt-bindings/clock/clps711x-clock.h [new file with mode: 0644]
include/dt-bindings/clock/exynos4.h
include/dt-bindings/clock/exynos5250.h
include/dt-bindings/clock/exynos5420.h
include/dt-bindings/clock/exynos5440.h
include/dt-bindings/clock/imx6sl-clock.h
include/dt-bindings/clock/qcom,gcc-apq8084.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,gcc-ipq806x.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,gcc-msm8960.h
include/dt-bindings/clock/qcom,mmcc-apq8084.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,mmcc-msm8960.h
include/dt-bindings/clock/rk3066a-cru.h [new file with mode: 0644]
include/dt-bindings/clock/rk3188-cru-common.h [new file with mode: 0644]
include/dt-bindings/clock/rk3188-cru.h [new file with mode: 0644]
include/dt-bindings/clock/rk3288-cru.h [new file with mode: 0644]
include/dt-bindings/clock/stih415-clks.h
include/dt-bindings/clock/stih416-clks.h
include/dt-bindings/mfd/palmas.h [new file with mode: 0644]
include/dt-bindings/pinctrl/dra.h
include/dt-bindings/reset/qcom,gcc-apq8084.h [new file with mode: 0644]
include/dt-bindings/reset/qcom,gcc-ipq806x.h [new file with mode: 0644]
include/dt-bindings/reset/qcom,gcc-msm8960.h
include/dt-bindings/reset/qcom,mmcc-apq8084.h [new file with mode: 0644]
include/dt-bindings/reset/qcom,mmcc-msm8960.h
include/linux/ahci_platform.h
include/linux/bio.h
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/cgroup.h
include/linux/clk-provider.h
include/linux/clk/clk-conf.h [new file with mode: 0644]
include/linux/cpufreq.h
include/linux/crypto.h
include/linux/elevator.h
include/linux/fs.h
include/linux/ftrace.h
include/linux/ftrace_event.h
include/linux/hugetlb.h
include/linux/init_task.h
include/linux/irq_work.h
include/linux/irqchip/arm-gic-v3.h [new file with mode: 0644]
include/linux/kernfs.h
include/linux/kthread.h
include/linux/libata.h
include/linux/mlx4/device.h
include/linux/msi.h
include/linux/mutex.h
include/linux/nmi.h
include/linux/of_fdt.h
include/linux/of_mdio.h
include/linux/osq_lock.h [new file with mode: 0644]
include/linux/page-flags.h
include/linux/pagemap.h
include/linux/pci.h
include/linux/pci_ids.h
include/linux/percpu-defs.h
include/linux/percpu-refcount.h
include/linux/percpu.h
include/linux/phy.h
include/linux/platform_data/ata-samsung_cf.h
include/linux/profile.h
include/linux/ptrace.h
include/linux/rcupdate.h
include/linux/regulator/consumer.h
include/linux/rtmutex.h
include/linux/rwsem-spinlock.h
include/linux/rwsem.h
include/linux/sched.h
include/linux/seqlock.h
include/linux/socket.h
include/linux/sunrpc/sched.h
include/linux/suspend.h
include/linux/tick.h
include/linux/trace_seq.h
include/linux/uio.h
include/linux/usb_usual.h
include/linux/wait.h
include/linux/writeback.h
include/net/ip.h
include/net/neighbour.h
include/net/netfilter/nf_tables.h
include/net/netns/ieee802154_6lowpan.h
include/net/netns/nftables.h
include/net/sock.h
include/scsi/scsi_cmnd.h
include/scsi/scsi_device.h
include/sound/core.h
include/trace/ftrace.h
include/trace/syscall.h
include/uapi/linux/audit.h
include/uapi/linux/btrfs.h
include/uapi/linux/fuse.h
include/uapi/linux/kvm.h
include/uapi/linux/perf_event.h
include/uapi/linux/usb/functionfs.h
include/uapi/sound/compress_offload.h
include/uapi/sound/compress_params.h
include/xen/grant_table.h
init/Kconfig
kernel/Kconfig.locks
kernel/cgroup.c
kernel/cgroup_freezer.c
kernel/context_tracking.c
kernel/cpu.c
kernel/cpuset.c
kernel/events/core.c
kernel/events/uprobes.c
kernel/fork.c
kernel/futex.c
kernel/irq/irqdesc.c
kernel/irq_work.c
kernel/kexec.c
kernel/kprobes.c
kernel/kthread.c
kernel/locking/lockdep.c
kernel/locking/mcs_spinlock.c
kernel/locking/mcs_spinlock.h
kernel/locking/mutex.c
kernel/locking/qrwlock.c
kernel/locking/rtmutex-debug.c
kernel/locking/rtmutex-debug.h
kernel/locking/rtmutex.c
kernel/locking/rtmutex.h
kernel/locking/rtmutex_common.h
kernel/locking/rwsem-spinlock.c
kernel/locking/rwsem-xadd.c
kernel/locking/rwsem.c
kernel/module.c
kernel/power/hibernate.c
kernel/power/main.c
kernel/power/process.c
kernel/power/suspend.c
kernel/power/user.c
kernel/printk/printk.c
kernel/ptrace.c
kernel/rcu/rcu.h
kernel/rcu/rcutorture.c
kernel/rcu/srcu.c
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/rcu/update.c
kernel/sched/core.c
kernel/sched/cpuacct.c
kernel/sched/deadline.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/idle.c
kernel/sched/idle_task.c
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/wait.c
kernel/signal.c
kernel/smp.c
kernel/sysctl.c
kernel/time/alarmtimer.c
kernel/time/clockevents.c
kernel/time/sched_clock.c
kernel/time/tick-sched.c
kernel/torture.c
kernel/trace/Kconfig
kernel/trace/Makefile
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_clock.c
kernel/trace/trace_event_perf.c
kernel/trace/trace_events.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_output.c
kernel/trace/trace_output.h
kernel/trace/trace_seq.c [new file with mode: 0644]
kernel/trace/trace_uprobe.c
kernel/tracepoint.c
kernel/watchdog.c
kernel/workqueue.c
lib/Kconfig.debug
lib/cpumask.c
lib/iovec.c
lib/lockref.c
lib/lz4/lz4_decompress.c
lib/lzo/lzo1x_decompress_safe.c
lib/percpu-refcount.c
lib/swiotlb.c
mm/filemap.c
mm/huge_memory.c
mm/hugetlb.c
mm/hugetlb_cgroup.c
mm/ksm.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/mempolicy.c
mm/migrate.c
mm/msync.c
mm/nommu.c
mm/page-writeback.c
mm/page_alloc.c
mm/percpu.c
mm/rmap.c
mm/shmem.c
mm/slab.c
mm/slab_common.c
mm/slub.c
mm/truncate.c
net/8021q/vlan_core.c
net/8021q/vlan_dev.c
net/appletalk/ddp.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/soft-interface.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/bluetooth/hci_conn.c
net/bluetooth/hci_core.c
net/bluetooth/hci_event.c
net/bluetooth/l2cap_core.c
net/bluetooth/l2cap_sock.c
net/bluetooth/mgmt.c
net/bluetooth/smp.c
net/compat.c
net/core/dev.c
net/core/dst.c
net/core/filter.c
net/core/iovec.c
net/core/neighbour.c
net/core/netclassid_cgroup.c
net/core/netprio_cgroup.c
net/core/skbuff.c
net/dns_resolver/dns_query.c
net/ipv4/af_inet.c
net/ipv4/gre_demux.c
net/ipv4/gre_offload.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/ip_options.c
net/ipv4/ip_tunnel.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_memcontrol.c
net/ipv4/tcp_offload.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv6/ip6_output.c
net/ipv6/mcast.c
net/ipv6/tcpv6_offload.c
net/ipv6/udp.c
net/l2tp/l2tp_ppp.c
net/mac80211/cfg.c
net/mac80211/tx.c
net/mac80211/util.c
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_nat_core.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netfilter/nft_compat.c
net/netfilter/nft_nat.c
net/netlink/af_netlink.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/flow.c
net/openvswitch/flow.h
net/openvswitch/flow_table.c
net/openvswitch/flow_table.h
net/openvswitch/vport-gre.c
net/sched/cls_u32.c
net/sctp/associola.c
net/sctp/sysctl.c
net/sctp/ulpevent.c
net/sunrpc/auth.c
net/sunrpc/sched.c
net/tipc/bcast.c
net/tipc/msg.c
net/wireless/core.h
net/wireless/nl80211.c
net/wireless/reg.c
net/wireless/trace.h
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
samples/trace_events/trace-events-sample.h
scripts/checkpatch.pl
scripts/get_maintainer.pl
scripts/kernel-doc
scripts/package/builddeb
scripts/package/buildtar
scripts/recordmcount.h
security/device_cgroup.c
security/keys/gc.c
security/keys/request_key.c
sound/core/control.c
sound/core/init.c
sound/firewire/bebob/bebob_maudio.c
sound/pci/hda/hda_auto_parser.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_i915.c
sound/pci/hda/hda_i915.h
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_local.h
sound/pci/hda/hda_priv.h
sound/pci/hda/hda_tegra.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/soc/codecs/Kconfig
sound/soc/codecs/Makefile
sound/soc/codecs/sigmadsp-i2c.c [new file with mode: 0644]
sound/soc/codecs/sigmadsp-regmap.c [new file with mode: 0644]
sound/soc/codecs/sigmadsp.c
sound/soc/codecs/sigmadsp.h
sound/soc/fsl/fsl_dma.c
sound/soc/fsl/fsl_spdif.c
sound/soc/fsl/imx-pcm-dma.c
sound/soc/pxa/Kconfig
sound/soc/sh/rcar/core.c
sound/soc/soc-dapm.c
sound/usb/card.c
sound/usb/endpoint.c
sound/usb/endpoint.h
tools/lib/lockdep/include/liblockdep/mutex.h
tools/lib/lockdep/include/liblockdep/rwlock.h
tools/lib/lockdep/preload.c
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-parse.h
tools/lib/traceevent/event-plugin.c
tools/lib/traceevent/plugin_cfg80211.c
tools/lib/traceevent/plugin_function.c
tools/lib/traceevent/plugin_jbd2.c
tools/lib/traceevent/plugin_kvm.c
tools/perf/Documentation/perf-bench.txt
tools/perf/Documentation/perf-inject.txt
tools/perf/Documentation/perf-kvm.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perf-timechart.txt
tools/perf/Documentation/perf-trace.txt
tools/perf/Documentation/perf.txt
tools/perf/MANIFEST
tools/perf/Makefile.perf
tools/perf/arch/powerpc/Makefile
tools/perf/arch/powerpc/util/header.c
tools/perf/arch/powerpc/util/skip-callchain-idx.c [new file with mode: 0644]
tools/perf/arch/s390/Makefile
tools/perf/arch/s390/util/header.c [new file with mode: 0644]
tools/perf/arch/s390/util/kvm-stat.c [new file with mode: 0644]
tools/perf/arch/x86/Makefile
tools/perf/arch/x86/tests/dwarf-unwind.c
tools/perf/arch/x86/util/kvm-stat.c [new file with mode: 0644]
tools/perf/arch/x86/util/tsc.c
tools/perf/arch/x86/util/tsc.h
tools/perf/arch/x86/util/unwind-libunwind.c
tools/perf/bench/bench.h
tools/perf/bench/futex-requeue.c
tools/perf/bench/futex-wake.c
tools/perf/bench/mem-memcpy.c
tools/perf/bench/mem-memset.c
tools/perf/bench/sched-messaging.c
tools/perf/builtin-bench.c
tools/perf/builtin-buildid-cache.c
tools/perf/builtin-evlist.c
tools/perf/builtin-help.c
tools/perf/builtin-inject.c
tools/perf/builtin-kvm.c
tools/perf/builtin-probe.c
tools/perf/builtin-record.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-timechart.c
tools/perf/builtin-trace.c
tools/perf/config/Makefile
tools/perf/config/feature-checks/Makefile
tools/perf/config/feature-checks/test-all.c
tools/perf/config/feature-checks/test-sync-compare-and-swap.c [new file with mode: 0644]
tools/perf/perf-sys.h
tools/perf/perf.c
tools/perf/scripts/perl/bin/failed-syscalls-record
tools/perf/scripts/perl/failed-syscalls.pl
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
tools/perf/scripts/python/bin/sctop-record
tools/perf/scripts/python/bin/syscall-counts-by-pid-record
tools/perf/scripts/python/bin/syscall-counts-record
tools/perf/scripts/python/check-perf-trace.py
tools/perf/scripts/python/failed-syscalls-by-pid.py
tools/perf/scripts/python/futex-contention.py
tools/perf/scripts/python/net_dropmonitor.py
tools/perf/scripts/python/netdev-times.py
tools/perf/scripts/python/sched-migration.py
tools/perf/scripts/python/sctop.py
tools/perf/scripts/python/syscall-counts-by-pid.py
tools/perf/scripts/python/syscall-counts.py
tools/perf/tests/attr/base-record
tools/perf/tests/attr/base-stat
tools/perf/tests/bp_signal.c
tools/perf/tests/bp_signal_overflow.c
tools/perf/tests/builtin-test.c
tools/perf/tests/dso-data.c
tools/perf/tests/dwarf-unwind.c
tools/perf/tests/evsel-roundtrip-name.c
tools/perf/tests/evsel-tp-sched.c
tools/perf/tests/make
tools/perf/tests/open-syscall-tp-fields.c
tools/perf/tests/parse-events.c
tools/perf/tests/parse-no-sample-id-all.c
tools/perf/tests/perf-time-to-tsc.c
tools/perf/tests/rdpmc.c
tools/perf/tests/sample-parsing.c
tools/perf/tests/tests.h
tools/perf/tests/thread-mg-share.c
tools/perf/ui/browser.c
tools/perf/ui/browser.h
tools/perf/ui/browsers/hists.c
tools/perf/ui/stdio/hist.c
tools/perf/util/callchain.c
tools/perf/util/callchain.h
tools/perf/util/cloexec.c [new file with mode: 0644]
tools/perf/util/cloexec.h [new file with mode: 0644]
tools/perf/util/config.c
tools/perf/util/data.c
tools/perf/util/debug.c
tools/perf/util/debug.h
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/evlist.c
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/include/linux/kernel.h
tools/perf/util/kvm-stat.h [new file with mode: 0644]
tools/perf/util/machine.c
tools/perf/util/machine.h
tools/perf/util/map.c
tools/perf/util/map.h
tools/perf/util/parse-options.h
tools/perf/util/perf_regs.c
tools/perf/util/perf_regs.h
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/perf/util/pstack.c
tools/perf/util/python.c
tools/perf/util/record.c
tools/perf/util/scripting-engines/trace-event-perl.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/svghelper.c
tools/perf/util/svghelper.h
tools/perf/util/symbol-elf.c
tools/perf/util/symbol-minimal.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/thread.c
tools/perf/util/thread.h
tools/perf/util/trace-event-info.c
tools/perf/util/trace-event-read.c
tools/perf/util/tsc.c [new file with mode: 0644]
tools/perf/util/tsc.h [new file with mode: 0644]
tools/perf/util/unwind-libdw.c
tools/perf/util/unwind-libunwind.c
tools/perf/util/util.c
tools/perf/util/util.h
tools/perf/util/vdso.c
tools/perf/util/vdso.h
tools/testing/ktest/ktest.pl
tools/testing/ktest/sample.conf
tools/testing/selftests/cpu-hotplug/Makefile
tools/testing/selftests/ipc/msgque.c
tools/testing/selftests/memory-hotplug/Makefile
tools/testing/selftests/powerpc/tm/Makefile
tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
tools/testing/selftests/rcutorture/bin/kvm-test-1-run.sh
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/configs/rcu/TREE01
tools/testing/selftests/rcutorture/configs/rcu/TREE02
tools/testing/selftests/rcutorture/configs/rcu/TREE02-T
tools/testing/selftests/rcutorture/configs/rcu/TREE03
tools/testing/selftests/rcutorture/configs/rcu/TREE04
tools/testing/selftests/rcutorture/configs/rcu/TREE05
tools/testing/selftests/rcutorture/configs/rcu/TREE06
tools/testing/selftests/rcutorture/configs/rcu/TREE07
tools/testing/selftests/rcutorture/configs/rcu/TREE08
tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
tools/testing/selftests/rcutorture/configs/rcu/TREE09
tools/testing/selftests/rcutorture/configs/rcu/v0.0/P5-U-T-NH-sd-SMP-hp
tools/testing/selftests/rcutorture/configs/rcu/v3.12/P5-U-T-NH-sd-SMP-hp
tools/testing/selftests/rcutorture/configs/rcu/v3.3/P5-U-T-NH-sd-SMP-hp
tools/testing/selftests/rcutorture/configs/rcu/v3.5/P5-U-T-NH-sd-SMP-hp
tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
tools/thermal/tmon/Makefile
tools/thermal/tmon/tmon.c
tools/usb/ffs-test.c
virt/kvm/arm/vgic.c
virt/kvm/ioapic.c
virt/kvm/irq_comm.c

index df1baba..1ad6873 100644 (file)
--- a/.mailmap
+++ b/.mailmap
@@ -62,6 +62,11 @@ Jeff Garzik <jgarzik@pretzel.yyz.us>
 Jens Axboe <axboe@suse.de>
 Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
 John Stultz <johnstul@us.ibm.com>
+<josh@joshtriplett.org> <josh@freedesktop.org>
+<josh@joshtriplett.org> <josh@kernel.org>
+<josh@joshtriplett.org> <josht@linux.vnet.ibm.com>
+<josh@joshtriplett.org> <josht@us.ibm.com>
+<josh@joshtriplett.org> <josht@vnet.ibm.com>
 Juha Yrjola <at solidboot.com>
 Juha Yrjola <juha.yrjola@nokia.com>
 Juha Yrjola <juha.yrjola@solidboot.com>
diff --git a/CREDITS b/CREDITS
index c322dcf..a80b667 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -9,6 +9,10 @@
                        Linus
 ----------
 
+M: Matt Mackal
+E: mpm@selenic.com
+D: SLOB slab allocator
+
 N: Matti Aarnio
 E: mea@nic.funet.fi
 D: Alpha systems hacking, IPv6 and other network related stuff
@@ -3507,10 +3511,11 @@ S: MacGregor A.C.T 2615
 S: Australia
 
 N: Josh Triplett
-E: josh@freedesktop.org
-P: 1024D/D0FE7AFB B24A 65C9 1D71 2AC2 DE87  CA26 189B 9946 D0FE 7AFB
-D: rcutorture maintainer
+E: josh@joshtriplett.org
+P: 4096R/8AFF873D 758E 5042 E397 4BA3 3A9C  1E67 0ED9 A3DF 8AFF 873D
+D: RCU and rcutorture
 D: lock annotations, finding and fixing lock bugs
+D: kernel tinification
 
 N: Winfried Trümper
 E: winni@xpilot.org
index 2254db0..227bec8 100644 (file)
@@ -280,12 +280,9 @@ that is possible.
 mcelog
 ------
 
-In Linux 2.6.31+ the i386 kernel needs to run the mcelog utility
-as a regular cronjob similar to the x86-64 kernel to process and log
-machine check events when CONFIG_X86_NEW_MCE is enabled. Machine check
-events are errors reported by the CPU. Processing them is strongly encouraged.
-All x86-64 kernels since 2.6.4 require the mcelog utility to
-process machine checks.
+On x86 kernels the mcelog utility is needed to process and log machine check
+events when CONFIG_X86_MCE is enabled. Machine check events are errors reported
+by the CPU. Processing them is strongly encouraged.
 
 Getting updated software
 ========================
index 4017f14..2c425d7 100644 (file)
@@ -708,7 +708,7 @@ hardware level details could be very different.
 
 <para>Systems need specialized hardware support to implement OTG,
 notably including a special <emphasis>Mini-AB</emphasis> jack
-and associated transciever to support <emphasis>Dual-Role</emphasis>
+and associated transceiver to support <emphasis>Dual-Role</emphasis>
 operation:
 they can act either as a host, using the standard
 Linux-USB host side driver stack,
index 46347f6..59fb5c0 100644 (file)
        <para>
        Each interrupt is described by an interrupt descriptor structure
        irq_desc. The interrupt is referenced by an 'unsigned int' numeric
-       value which selects the corresponding interrupt decription structure
+       value which selects the corresponding interrupt description structure
        in the descriptor structures array.
        The descriptor structure contains status information and pointers
        to the interrupt flow method and the interrupt chip structure
@@ -470,7 +470,7 @@ if (desc->irq_data.chip->irq_eoi)
      <para>
        To avoid copies of identical implementations of IRQ chips the
        core provides a configurable generic interrupt chip
-       implementation. Developers should check carefuly whether the
+       implementation. Developers should check carefully whether the
        generic chip fits their needs before implementing the same
        functionality slightly differently themselves.
      </para>
index 19f2a5a..e584ee1 100644 (file)
@@ -1760,7 +1760,7 @@ as it would be on UP.
 </para>
 
 <para>
-There is a furthur optimization possible here: remember our original
+There is a further optimization possible here: remember our original
 cache code, where there were no reference counts and the caller simply
 held the lock whenever using the object?  This is still possible: if
 you hold the lock, no one can delete the object, so you don't need to
index deb71ba..d7fcdc5 100644 (file)
@@ -677,7 +677,7 @@ and other resources, etc.
 
        <listitem>
        <para>
-       ATA_QCFLAG_ACTIVE is clared from qc->flags.
+       ATA_QCFLAG_ACTIVE is cleared from qc->flags.
        </para>
        </listitem>
 
@@ -708,7 +708,7 @@ and other resources, etc.
 
           <listitem>
           <para>
-          qc->waiting is claread &amp; completed (in that order).
+          qc->waiting is cleared &amp; completed (in that order).
           </para>
           </listitem>
 
@@ -1163,7 +1163,7 @@ and other resources, etc.
 
        <para>
        Once sense data is acquired, this type of errors can be
-       handled similary to other SCSI errors.  Note that sense data
+       handled similarly to other SCSI errors.  Note that sense data
        may indicate ATA bus error (e.g. Sense Key 04h HARDWARE ERROR
        &amp;&amp; ASC/ASCQ 47h/00h SCSI PARITY ERROR).  In such
        cases, the error should be considered as an ATA bus error and
index 1d27f0a..639e748 100644 (file)
@@ -202,8 +202,8 @@ $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
 
 $(MEDIA_OBJ_DIR)/v4l2.xml: $(OBJIMGFILES)
        @$($(quiet)gen_xml)
-       @(ln -sf $(MEDIA_SRC_DIR)/v4l/*xml $(MEDIA_OBJ_DIR)/)
-       @(ln -sf $(MEDIA_SRC_DIR)/dvb/*xml $(MEDIA_OBJ_DIR)/)
+       @(ln -sf `cd $(MEDIA_SRC_DIR) && /bin/pwd`/v4l/*xml $(MEDIA_OBJ_DIR)/)
+       @(ln -sf `cd $(MEDIA_SRC_DIR) && /bin/pwd`/dvb/*xml $(MEDIA_OBJ_DIR)/)
 
 $(MEDIA_OBJ_DIR)/videodev2.h.xml: $(srctree)/include/uapi/linux/videodev2.h $(MEDIA_OBJ_DIR)/v4l2.xml
        @$($(quiet)gen_xml)
index 4decb46..03f9a1f 100644 (file)
@@ -68,7 +68,7 @@
                several digital tv standards. While it is called as DVB API,
                in fact it covers several different video standards including
                DVB-T, DVB-S, DVB-C and ATSC. The API is currently being updated
-               to documment support also for DVB-S2, ISDB-T and ISDB-S.</para>
+               to document support also for DVB-S2, ISDB-T and ISDB-S.</para>
        <para>The third part covers the Remote Controller API.</para>
        <para>The fourth part covers the Media Controller API.</para>
        <para>For additional information and for the latest development code,
index cd11926..7da8f04 100644 (file)
@@ -91,7 +91,7 @@
                <listitem><para>
                [MTD Interface]</para><para>
                These functions provide the interface to the MTD kernel API. 
-               They are not replacable and provide functionality
+               They are not replaceable and provide functionality
                which is complete hardware independent.
                </para></listitem>
                <listitem><para>
                </para></listitem>
                <listitem><para>
                [GENERIC]</para><para>
-               Generic functions are not replacable and provide functionality
+               Generic functions are not replaceable and provide functionality
                which is complete hardware independent.
                </para></listitem>
                <listitem><para>
                [DEFAULT]</para><para>
                Default functions provide hardware related functionality which is suitable
                for most of the implementations. These functions can be replaced by the
-               board driver if neccecary. Those functions are called via pointers in the
+               board driver if necessary. Those functions are called via pointers in the
                NAND chip description structure. The board driver can set the functions which
                should be replaced by board dependent functions before calling nand_scan().
                If the function pointer is NULL on entry to nand_scan() then the pointer
@@ -264,7 +264,7 @@ static void board_hwcontrol(struct mtd_info *mtd, int cmd)
                        is set up nand_scan() is called. This function tries to
                        detect and identify then chip. If a chip is found all the
                        internal data fields are initialized accordingly.
-                       The structure(s) have to be zeroed out first and then filled with the neccecary 
+                       The structure(s) have to be zeroed out first and then filled with the necessary
                        information about the device.
                </para>
                <programlisting>
@@ -327,7 +327,7 @@ module_init(board_init);
        <sect1 id="Exit_function">
                <title>Exit function</title>
                <para>
-                       The exit function is only neccecary if the driver is
+                       The exit function is only necessary if the driver is
                        compiled as a module. It releases all resources which
                        are held by the chip driver and unregisters the partitions
                        in the MTD layer.
@@ -494,7 +494,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
                                in this case. See rts_from4.c and diskonchip.c for 
                                implementation reference. In those cases we must also
                                use bad block tables on FLASH, because the ECC layout is
-                               interferring with the bad block marker positions.
+                               interfering with the bad block marker positions.
                                See bad block table support for details.
                        </para>
                </sect2>
@@ -542,7 +542,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
                <para>  
                        nand_scan() calls the function nand_default_bbt(). 
                        nand_default_bbt() selects appropriate default
-                       bad block table desriptors depending on the chip information
+                       bad block table descriptors depending on the chip information
                        which was retrieved by nand_scan().
                </para>
                <para>
@@ -554,7 +554,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
                <sect2 id="Flash_based_tables">
                        <title>Flash based tables</title>
                        <para>
-                               It may be desired or neccecary to keep a bad block table in FLASH. 
+                               It may be desired or necessary to keep a bad block table in FLASH.
                                For AG-AND chips this is mandatory, as they have no factory marked
                                bad blocks. They have factory marked good blocks. The marker pattern
                                is erased when the block is erased to be reused. So in case of
@@ -565,10 +565,10 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
                                of the blocks.
                        </para>
                        <para>
-                               The blocks in which the tables are stored are procteted against
+                               The blocks in which the tables are stored are protected against
                                accidental access by marking them bad in the memory bad block
                                table. The bad block table management functions are allowed
-                               to circumvernt this protection.
+                               to circumvent this protection.
                        </para>
                        <para>
                                The simplest way to activate the FLASH based bad block table support 
@@ -592,7 +592,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
                                User defined tables are created by filling out a 
                                nand_bbt_descr structure and storing the pointer in the
                                nand_chip structure member bbt_td before calling nand_scan(). 
-                               If a mirror table is neccecary a second structure must be
+                               If a mirror table is necessary a second structure must be
                                created and a pointer to this structure must be stored
                                in bbt_md inside the nand_chip structure. If the bbt_md 
                                member is set to NULL then only the main table is used
@@ -666,7 +666,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
                                <para>
                                For automatic placement some blocks must be reserved for
                                bad block table storage. The number of reserved blocks is defined 
-                               in the maxblocks member of the babd block table description structure.
+                               in the maxblocks member of the bad block table description structure.
                                Reserving 4 blocks for mirrored tables should be a reasonable number. 
                                This also limits the number of blocks which are scanned for the bad
                                block table ident pattern.
@@ -1068,11 +1068,11 @@ in this page</entry>
   <chapter id="filesystems">
        <title>Filesystem support</title>
        <para>
-               The NAND driver provides all neccecary functions for a
+               The NAND driver provides all necessary functions for a
                filesystem via the MTD interface.
        </para>
        <para>
-               Filesystems must be aware of the NAND pecularities and
+               Filesystems must be aware of the NAND peculiarities and
                restrictions. One major restrictions of NAND Flash is, that you cannot 
                write as often as you want to a page. The consecutive writes to a page, 
                before erasing it again, are restricted to 1-3 writes, depending on the 
@@ -1222,7 +1222,7 @@ in this page</entry>
 #define NAND_BBT_VERSION       0x00000100
 /* Create a bbt if none axists */
 #define NAND_BBT_CREATE                0x00000200
-/* Write bbt if neccecary */
+/* Write bbt if necessary */
 #define NAND_BBT_WRITE         0x00001000
 /* Read and write back block contents when writing bbt */
 #define NAND_BBT_SAVECONTENT   0x00002000
index 346e552..3b08a08 100644 (file)
        release regulators.  Functions are
        provided to <link linkend='API-regulator-enable'>enable</link>
        and <link linkend='API-regulator-disable'>disable</link> the
-       reguator and to get and set the runtime parameters of the
+       regulator and to get and set the runtime parameters of the
        regulator.
      </para>
      <para>
index 9561815..bbe9c1f 100644 (file)
@@ -766,10 +766,10 @@ framework to set up sysfs files for this region. Simply leave it alone.
        <para>
        The dynamic memory regions will be allocated when the UIO device file,
        <varname>/dev/uioX</varname> is opened.
-       Simiar to static memory resources, the memory region information for
+       Similar to static memory resources, the memory region information for
        dynamic regions is then visible via sysfs at
        <varname>/sys/class/uio/uioX/maps/mapY/*</varname>.
-       The dynmaic memory regions will be freed when the UIO device file is
+       The dynamic memory regions will be freed when the UIO device file is
        closed. When no processes are holding the device file open, the address
        returned to userspace is ~0.
        </para>
index 8d57c18..85fc0e2 100644 (file)
 
        <listitem><para>The Linux USB API supports synchronous calls for
        control and bulk messages.
-       It also supports asynchnous calls for all kinds of data transfer,
+       It also supports asynchronous calls for all kinds of data transfer,
        using request structures called "URBs" (USB Request Blocks).
        </para></listitem>
 
index d0056a4..6f639d9 100644 (file)
@@ -5696,7 +5696,7 @@ struct _snd_pcm_runtime {
        suspending the PCM operations via
        <function>snd_pcm_suspend_all()</function> or
        <function>snd_pcm_suspend()</function>.  It means that the PCM
-       streams are already stoppped when the register snapshot is
+       streams are already stopped when the register snapshot is
        taken.  But, remember that you don't have to restart the PCM
        stream in the resume callback. It'll be restarted via 
        trigger call with <constant>SNDRV_PCM_TRIGGER_RESUME</constant>
index 2f0fcb2..f29bcbc 100644 (file)
@@ -2451,8 +2451,8 @@ lot of {Linux} into your technology!!!"
 ,month="February"
 ,year="2010"
 ,note="Available:
-\url{http://kerneltrap.com/mailarchive/linux-netdev/2010/2/26/6270589}
-[Viewed March 20, 2011]"
+\url{http://thread.gmane.org/gmane.linux.network/153338}
+[Viewed June 9, 2014]"
 ,annotation={
        Use a pair of list_head structures to support RCU-protected
        resizable hash tables.
index 141d531..613033f 100644 (file)
@@ -1,5 +1,14 @@
 Reference-count design for elements of lists/arrays protected by RCU.
 
+
+Please note that the percpu-ref feature is likely your first
+stop if you need to combine reference counts and RCU.  Please see
+include/linux/percpu-refcount.h for more information.  However, in
+those unusual cases where percpu-ref would consume too much memory,
+please read on.
+
+------------------------------------------------------------------------
+
 Reference counting on elements of lists which are protected by traditional
 reader/writer spinlocks or semaphores are straightforward:
 
index c6a06b7..f405780 100644 (file)
@@ -314,6 +314,7 @@ int main(int argc, char *argv[])
                        break;
                case 'm':
                        strncpy(cpumask, optarg, sizeof(cpumask));
+                       cpumask[sizeof(cpumask) - 1] = '\0';
                        maskset = 1;
                        printf("cpumask %s maskset %d\n", cpumask, maskset);
                        break;
index fd786ea..e182be5 100644 (file)
@@ -60,12 +60,6 @@ If the driver needs to perform more complex initialization like getting and
 configuring GPIOs it can get its ACPI handle and extract this information
 from ACPI tables.
 
-Currently the kernel is not able to automatically determine from which ACPI
-device it should make the corresponding platform device so we need to add
-the ACPI device explicitly to acpi_platform_device_ids list defined in
-drivers/acpi/acpi_platform.c. This limitation is only for the platform
-devices, SPI and I2C devices are created automatically as described below.
-
 DMA support
 ~~~~~~~~~~~
 DMA controllers enumerated via ACPI should be registered in the system to
index 37fc4f6..85af34d 100644 (file)
@@ -72,27 +72,54 @@ The decompressed kernel image contains a 64-byte header as follows:
 
   u32 code0;                   /* Executable code */
   u32 code1;                   /* Executable code */
-  u64 text_offset;             /* Image load offset */
-  u64 res0     = 0;            /* reserved */
-  u64 res1     = 0;            /* reserved */
+  u64 text_offset;             /* Image load offset, little endian */
+  u64 image_size;              /* Effective Image size, little endian */
+  u64 flags;                   /* kernel flags, little endian */
   u64 res2     = 0;            /* reserved */
   u64 res3     = 0;            /* reserved */
   u64 res4     = 0;            /* reserved */
   u32 magic    = 0x644d5241;   /* Magic number, little endian, "ARM\x64" */
-  u32 res5 = 0;                /* reserved */
+  u32 res5;                    /* reserved (used for PE COFF offset) */
 
 
 Header notes:
 
+- As of v3.17, all fields are little endian unless stated otherwise.
+
 - code0/code1 are responsible for branching to stext.
+
 - when booting through EFI, code0/code1 are initially skipped.
   res5 is an offset to the PE header and the PE header has the EFI
-  entry point (efi_stub_entry). When the stub has done its work, it
+  entry point (efi_stub_entry).  When the stub has done its work, it
   jumps to code0 to resume the normal boot process.
 
-The image must be placed at the specified offset (currently 0x80000)
-from the start of the system RAM and called there. The start of the
-system RAM must be aligned to 2MB.
+- Prior to v3.17, the endianness of text_offset was not specified.  In
+  these cases image_size is zero and text_offset is 0x80000 in the
+  endianness of the kernel.  Where image_size is non-zero image_size is
+  little-endian and must be respected.  Where image_size is zero,
+  text_offset can be assumed to be 0x80000.
+
+- The flags field (introduced in v3.17) is a little-endian 64-bit field
+  composed as follows:
+  Bit 0:       Kernel endianness.  1 if BE, 0 if LE.
+  Bits 1-63:   Reserved.
+
+- When image_size is zero, a bootloader should attempt to keep as much
+  memory as possible free for use by the kernel immediately after the
+  end of the kernel image. The amount of space required will vary
+  depending on selected features, and is effectively unbound.
+
+The Image must be placed text_offset bytes from a 2MB aligned base
+address near the start of usable system RAM and called there. Memory
+below that base address is currently unusable by Linux, and therefore it
+is strongly recommended that this location is the start of system RAM.
+At least image_size bytes from the start of the image must be free for
+use by the kernel.
+
+Any memory described to the kernel (even that below the 2MB aligned base
+address) which is not marked as reserved from the kernel e.g. with a
+memreserve region in the device tree) will be considered as available to
+the kernel.
 
 Before jumping into the kernel, the following conditions must be met:
 
index d50fa61..344e85c 100644 (file)
@@ -2,18 +2,18 @@
                     ==============================
 
 Author: Catalin Marinas <catalin.marinas@arm.com>
-Date  : 20 February 2012
 
 This document describes the virtual memory layout used by the AArch64
 Linux kernel. The architecture allows up to 4 levels of translation
 tables with a 4KB page size and up to 3 levels with a 64KB page size.
 
-AArch64 Linux uses 3 levels of translation tables with the 4KB page
-configuration, allowing 39-bit (512GB) virtual addresses for both user
-and kernel. With 64KB pages, only 2 levels of translation tables are
-used but the memory layout is the same.
+AArch64 Linux uses either 3 levels or 4 levels of translation tables
+with the 4KB page configuration, allowing 39-bit (512GB) or 48-bit
+(256TB) virtual addresses, respectively, for both user and kernel. With
+64KB pages, only 2 levels of translation tables, allowing 42-bit (4TB)
+virtual address, are used but the memory layout is the same.
 
-User addresses have bits 63:39 set to 0 while the kernel addresses have
+User addresses have bits 63:48 set to 0 while the kernel addresses have
 the same bits set to 1. TTBRx selection is given by bit 63 of the
 virtual address. The swapper_pg_dir contains only kernel (global)
 mappings while the user pgd contains only user (non-global) mappings.
@@ -21,58 +21,40 @@ The swapper_pgd_dir address is written to TTBR1 and never written to
 TTBR0.
 
 
-AArch64 Linux memory layout with 4KB pages:
+AArch64 Linux memory layout with 4KB pages + 3 levels:
 
 Start                  End                     Size            Use
 -----------------------------------------------------------------------
 0000000000000000       0000007fffffffff         512GB          user
+ffffff8000000000       ffffffffffffffff         512GB          kernel
 
-ffffff8000000000       ffffffbbfffeffff        ~240GB          vmalloc
 
-ffffffbbffff0000       ffffffbbffffffff          64KB          [guard page]
+AArch64 Linux memory layout with 4KB pages + 4 levels:
 
-ffffffbc00000000       ffffffbdffffffff           8GB          vmemmap
-
-ffffffbe00000000       ffffffbffbbfffff          ~8GB          [guard, future vmmemap]
-
-ffffffbffa000000       ffffffbffaffffff          16MB          PCI I/O space
-
-ffffffbffb000000       ffffffbffbbfffff          12MB          [guard]
-
-ffffffbffbc00000       ffffffbffbdfffff           2MB          fixed mappings
-
-ffffffbffbe00000       ffffffbffbffffff           2MB          [guard]
-
-ffffffbffc000000       ffffffbfffffffff          64MB          modules
-
-ffffffc000000000       ffffffffffffffff         256GB          kernel logical memory map
+Start                  End                     Size            Use
+-----------------------------------------------------------------------
+0000000000000000       0000ffffffffffff         256TB          user
+ffff000000000000       ffffffffffffffff         256TB          kernel
 
 
-AArch64 Linux memory layout with 64KB pages:
+AArch64 Linux memory layout with 64KB pages + 2 levels:
 
 Start                  End                     Size            Use
 -----------------------------------------------------------------------
 0000000000000000       000003ffffffffff           4TB          user
+fffffc0000000000       ffffffffffffffff           4TB          kernel
 
-fffffc0000000000       fffffdfbfffeffff          ~2TB          vmalloc
 
-fffffdfbffff0000       fffffdfbffffffff          64KB          [guard page]
+AArch64 Linux memory layout with 64KB pages + 3 levels:
 
-fffffdfc00000000       fffffdfdffffffff           8GB          vmemmap
-
-fffffdfe00000000       fffffdfffbbfffff          ~8GB          [guard, future vmmemap]
-
-fffffdfffa000000       fffffdfffaffffff          16MB          PCI I/O space
-
-fffffdfffb000000       fffffdfffbbfffff          12MB          [guard]
-
-fffffdfffbc00000       fffffdfffbdfffff           2MB          fixed mappings
-
-fffffdfffbe00000       fffffdfffbffffff           2MB          [guard]
+Start                  End                     Size            Use
+-----------------------------------------------------------------------
+0000000000000000       0000ffffffffffff         256TB          user
+ffff000000000000       ffffffffffffffff         256TB          kernel
 
-fffffdfffc000000       fffffdffffffffff          64MB          modules
 
-fffffe0000000000       ffffffffffffffff           2TB          kernel logical memory map
+For details of the virtual kernel memory layout please see the kernel
+booting log.
 
 
 Translation table lookup with 4KB pages:
@@ -86,7 +68,7 @@ Translation table lookup with 4KB pages:
  |                 |         |         |         +-> [20:12] L3 index
  |                 |         |         +-----------> [29:21] L2 index
  |                 |         +---------------------> [38:30] L1 index
- |                 +-------------------------------> [47:39] L0 index (not used)
+ |                 +-------------------------------> [47:39] L0 index
  +-------------------------------------------------> [63] TTBR0/1
 
 
@@ -99,10 +81,11 @@ Translation table lookup with 64KB pages:
  |                 |    |               |              v
  |                 |    |               |            [15:0]  in-page offset
  |                 |    |               +----------> [28:16] L3 index
- |                 |    +--------------------------> [41:29] L2 index (only 38:29 used)
- |                 +-------------------------------> [47:42] L1 index (not used)
+ |                 |    +--------------------------> [41:29] L2 index
+ |                 +-------------------------------> [47:42] L1 index
  +-------------------------------------------------> [63] TTBR0/1
 
+
 When using KVM, the hypervisor maps kernel pages in EL2, at a fixed
 offset from the kernel VA (top 24bits of the kernel VA set to zero):
 
index 821de56..10c949b 100644 (file)
@@ -599,6 +599,20 @@ fork. If this method returns 0 (success) then this should remain valid
 while the caller holds cgroup_mutex and it is ensured that either
 attach() or cancel_attach() will be called in future.
 
+void css_reset(struct cgroup_subsys_state *css)
+(cgroup_mutex held by caller)
+
+An optional operation which should restore @css's configuration to the
+initial state.  This is currently only used on the unified hierarchy
+when a subsystem is disabled on a cgroup through
+"cgroup.subtree_control" but should remain enabled because other
+subsystems depend on it.  cgroup core makes such a css invisible by
+removing the associated interface files and invokes this callback so
+that the hidden subsystem can return to the initial neutral state.
+This prevents unexpected resource control from a hidden css and
+ensures that the configuration is in the initial state when it is made
+visible again later.
+
 void cancel_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 (cgroup_mutex held by caller)
 
index 324b182..4f45632 100644 (file)
@@ -94,12 +94,35 @@ change soon.
 
  mount -t cgroup -o __DEVEL__sane_behavior cgroup $MOUNT_POINT
 
-All controllers which are not bound to other hierarchies are
-automatically bound to unified hierarchy and show up at the root of
-it.  Controllers which are enabled only in the root of unified
-hierarchy can be bound to other hierarchies at any time.  This allows
-mixing unified hierarchy with the traditional multiple hierarchies in
-a fully backward compatible way.
+All controllers which support the unified hierarchy and are not bound
+to other hierarchies are automatically bound to unified hierarchy and
+show up at the root of it.  Controllers which are enabled only in the
+root of unified hierarchy can be bound to other hierarchies.  This
+allows mixing unified hierarchy with the traditional multiple
+hierarchies in a fully backward compatible way.
+
+For development purposes, the following boot parameter makes all
+controllers to appear on the unified hierarchy whether supported or
+not.
+
+ cgroup__DEVEL__legacy_files_on_dfl
+
+A controller can be moved across hierarchies only after the controller
+is no longer referenced in its current hierarchy.  Because per-cgroup
+controller states are destroyed asynchronously and controllers may
+have lingering references, a controller may not show up immediately on
+the unified hierarchy after the final umount of the previous
+hierarchy.  Similarly, a controller should be fully disabled to be
+moved out of the unified hierarchy and it may take some time for the
+disabled controller to become available for other hierarchies;
+furthermore, due to dependencies among controllers, other controllers
+may need to be disabled too.
+
+While useful for development and manual configurations, dynamically
+moving controllers between the unified and other hierarchies is
+strongly discouraged for production use.  It is recommended to decide
+the hierarchies and controller associations before starting using the
+controllers.
 
 
 2-2. cgroup.subtree_control
index e742d21..a69ffe1 100644 (file)
@@ -15,10 +15,13 @@ New sysfs files for controlling P state selection have been added to
 /sys/devices/system/cpu/intel_pstate/
 
       max_perf_pct: limits the maximum P state that will be requested by
-      the driver stated as a percentage of the available performance.
+      the driver stated as a percentage of the available performance. The
+      available (P states) performance may be reduced by the no_turbo
+      setting described below.
 
       min_perf_pct: limits the minimum P state that will be  requested by
-      the driver stated as a percentage of the available performance.
+      the driver stated as a percentage of the max (non-turbo)
+      performance level.
 
       no_turbo: limits the driver to selecting P states below the turbo
       frequency range.
index 11f2330..ad9f8ed 100644 (file)
@@ -6,5 +6,15 @@ following property:
 
 Required root node property:
 
- - compatible: must contain either "marvell,armada380" or
-   "marvell,armada385" depending on the variant of the SoC being used.
+ - compatible: must contain "marvell,armada380"
+
+In addition, boards using the Marvell Armada 385 SoC shall have the
+following property before the previous one:
+
+Required root node property:
+
+compatible: must contain "marvell,armada385"
+
+Example:
+
+compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada380";
index 5216b41..8b4f7b7 100644 (file)
@@ -9,6 +9,18 @@ Required Properties:
 - reg: physical base address of the controller and length of memory mapped
     region.
 
+Optional Properties:
+- clocks: List of clock handles. The parent clocks of the input clocks to the
+       devices in this power domain are set to oscclk before power gating
+       and restored back after powering on a domain. This is required for
+       all domains which are powered on and off and not required for unused
+       domains.
+- clock-names: The following clocks can be specified:
+       - oscclk: Oscillator clock.
+       - pclkN, clkN: Pairs of parent of input clock and input clock to the
+               devices in this power domain. Maximum of 4 pairs (N = 0 to 3)
+               are supported currently.
+
 Node of a device using power domains must have a samsung,power-domain property
 defined with a phandle to respective power domain.
 
@@ -19,6 +31,14 @@ Example:
                reg = <0x10023C00 0x10>;
        };
 
+       mfc_pd: power-domain@10044060 {
+               compatible = "samsung,exynos4210-pd";
+               reg = <0x10044060 0x20>;
+               clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_SW_ACLK333>,
+                       <&clock CLK_MOUT_USER_ACLK333>;
+               clock-names = "oscclk", "pclk0", "clk0";
+       };
+
 Example of the node using power domain:
 
        node {
index b513cb8..af527ee 100644 (file)
@@ -40,6 +40,9 @@ Optional properties:
 - arm,filter-ranges : <start length> Starting address and length of window to
   filter. Addresses in the filter window are directed to the M1 port. Other
   addresses will go to the M0 port.
+- arm,io-coherent : indicates that the system is operating in an hardware
+  I/O coherent mode. Valid only when the arm,pl310-cache compatible
+  string is used.
 - interrupts : 1 combined interrupt.
 - cache-id-part: cache id part number to be used if it is not present
   on hardware
index 5d49f2b..832fe8c 100644 (file)
@@ -48,7 +48,7 @@ adc@12D10000 {
 
        /* NTC thermistor is a hwmon device */
        ncp15wb473@0 {
-               compatible = "ntc,ncp15wb473";
+               compatible = "murata,ncp15wb473";
                pullup-uv = <1800000>;
                pullup-ohm = <47000>;
                pulldown-ohm = <0>;
index 2a4ab04..f9865e7 100644 (file)
@@ -12,8 +12,38 @@ Properties:
 
  - reg : offset and length of the register set.
 
+ - #clock-cells : must be <1>, since PMU requires once cell as clock specifier.
+               The single specifier cell is used as index to list of clocks
+               provided by PMU, which is currently:
+                       0 : SoC clock output (CLKOUT pin)
+
+ - clock-names : list of clock names for particular CLKOUT mux inputs in
+               following format:
+                       "clkoutN", where N is a decimal number corresponding to
+                       CLKOUT mux control bits value for given input, e.g.
+                               "clkout0", "clkout7", "clkout15".
+
+ - clocks : list of phandles and specifiers to all input clocks listed in
+               clock-names property.
+
 Example :
 pmu_system_controller: system-controller@10040000 {
        compatible = "samsung,exynos5250-pmu", "syscon";
        reg = <0x10040000 0x5000>;
+       #clock-cells = <1>;
+       clock-names = "clkout0", "clkout1", "clkout2", "clkout3",
+                       "clkout4", "clkout8", "clkout9";
+       clocks = <&clock CLK_OUT_DMC>, <&clock CLK_OUT_TOP>,
+               <&clock CLK_OUT_LEFTBUS>, <&clock CLK_OUT_RIGHTBUS>,
+               <&clock CLK_OUT_CPU>, <&clock CLK_XXTI>,
+               <&clock CLK_XUSBXTI>;
+};
+
+Example of clock consumer :
+
+usb3503: usb3503@08 {
+       /* ... */
+       clock-names = "refclk";
+       clocks = <&pmu_system_controller 0>;
+       /* ... */
 };
index c96d8dc..4ab09f2 100644 (file)
@@ -3,28 +3,43 @@
 SATA nodes are defined to describe on-chip Serial ATA controllers.
 Each SATA controller should have its own node.
 
+It is possible, but not required, to represent each port as a sub-node.
+It allows to enable each port independently when dealing with multiple
+PHYs.
+
 Required properties:
 - compatible        : compatible string, one of:
   - "allwinner,sun4i-a10-ahci"
-  - "fsl,imx53-ahci"
-  - "fsl,imx6q-ahci"
   - "hisilicon,hisi-ahci"
   - "ibm,476gtr-ahci"
   - "marvell,armada-380-ahci"
   - "snps,dwc-ahci"
   - "snps,exynos5440-ahci"
   - "snps,spear-ahci"
+  - "generic-ahci"
 - interrupts        : <interrupt mapping for SATA IRQ>
 - reg               : <registers mapping>
 
+Please note that when using "generic-ahci" you must also specify a SoC specific
+compatible:
+       compatible = "manufacturer,soc-model-ahci", "generic-ahci";
+
 Optional properties:
 - dma-coherent      : Present if dma operations are coherent
 - clocks            : a list of phandle + clock specifier pairs
 - target-supply     : regulator for SATA target power
+- phys              : reference to the SATA PHY node
+- phy-names         : must be "sata-phy"
+
+Required properties when using sub-nodes:
+- #address-cells    : number of cells to encode an address
+- #size-cells       : number of cells representing the size of an address
+
+
+Sub-nodes required properties:
+- reg               : the port number
+- phys              : reference to the SATA PHY node
 
-"fsl,imx53-ahci", "fsl,imx6q-ahci" required properties:
-- clocks            : must contain the sata, sata_ref and ahb clocks
-- clock-names       : must contain "ahb" for the ahb clock
 
 Examples:
         sata@ffe08000 {
@@ -40,3 +55,23 @@ Examples:
                clocks = <&pll6 0>, <&ahb_gates 25>;
                target-supply = <&reg_ahci_5v>;
        };
+
+With sub-nodes:
+       sata@f7e90000 {
+               compatible = "marvell,berlin2q-achi", "generic-ahci";
+               reg = <0xe90000 0x1000>;
+               interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
+               clocks = <&chip CLKID_SATA>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               sata0: sata-port@0 {
+                       reg = <0>;
+                       phys = <&sata_phy 0>;
+               };
+
+               sata1: sata-port@1 {
+                       reg = <1>;
+                       phys = <&sata_phy 1>;
+               };
+       };
diff --git a/Documentation/devicetree/bindings/ata/ahci-st.txt b/Documentation/devicetree/bindings/ata/ahci-st.txt
new file mode 100644 (file)
index 0000000..0574a77
--- /dev/null
@@ -0,0 +1,31 @@
+STMicroelectronics STi SATA controller
+
+This binding describes a SATA device.
+
+Required properties:
+ - compatible     : Must be "st,sti-ahci"
+ - reg            : Physical base addresses and length of register sets
+ - interrupts     : Interrupt associated with the SATA device
+ - interrupt-names :   Associated name must be; "hostc"
+ - resets         : The power-down and soft-reset lines of SATA IP
+ - reset-names    :   Associated names must be; "pwr-dwn" and "sw-rst"
+ - clocks         : The phandle for the clock
+ - clock-names    :   Associated name must be; "ahci_clk"
+ - phys                   : The phandle for the PHY device
+ - phy-names      :   Associated name must be; "ahci_phy"
+
+Example:
+
+       sata0: sata@fe380000 {
+               compatible      = "st,sti-ahci";
+               reg             = <0xfe380000 0x1000>;
+               interrupts      = <GIC_SPI 157 IRQ_TYPE_NONE>;
+               interrupt-names = "hostc";
+               phys            = <&miphy365x_phy MIPHY_PORT_0 MIPHY_TYPE_SATA>;
+               phy-names       = "ahci_phy";
+               resets          = <&powerdown STIH416_SATA0_POWERDOWN>,
+                                 <&softreset STIH416_SATA0_SOFTRESET>;
+               reset-names     = "pwr-dwn", "sw-rst";
+               clocks          = <&clk_s_a0_ls CLK_ICN_REG>;
+               clock-names     = "ahci_clk";
+       };
diff --git a/Documentation/devicetree/bindings/ata/imx-sata.txt b/Documentation/devicetree/bindings/ata/imx-sata.txt
new file mode 100644 (file)
index 0000000..fa511db
--- /dev/null
@@ -0,0 +1,36 @@
+* Freescale i.MX AHCI SATA Controller
+
+The Freescale i.MX SATA controller mostly conforms to the AHCI interface
+with some special extensions at integration level.
+
+Required properties:
+- compatible : should be one of the following:
+   - "fsl,imx53-ahci" for i.MX53 SATA controller
+   - "fsl,imx6q-ahci" for i.MX6Q SATA controller
+- interrupts : interrupt mapping for SATA IRQ
+- reg : registers mapping
+- clocks : list of clock specifiers, must contain an entry for each
+  required entry in clock-names
+- clock-names : should include "sata", "sata_ref" and "ahb" entries
+
+Optional properties:
+- fsl,transmit-level-mV : transmit voltage level, in millivolts.
+- fsl,transmit-boost-mdB : transmit boost level, in milli-decibels
+- fsl,transmit-atten-16ths : transmit attenuation, in 16ths
+- fsl,receive-eq-mdB : receive equalisation, in milli-decibels
+  Please refer to the technical documentation or the driver source code
+  for the list of legal values for these options.
+- fsl,no-spread-spectrum : disable spread-spectrum clocking on the SATA
+  link.
+
+Examples:
+
+sata@02200000 {
+       compatible = "fsl,imx6q-ahci";
+       reg = <0x02200000 0x4000>;
+       interrupts = <0 39 IRQ_TYPE_LEVEL_HIGH>;
+       clocks = <&clks IMX6QDL_CLK_SATA>,
+                <&clks IMX6QDL_CLK_SATA_REF_100M>,
+                <&clks IMX6QDL_CLK_AHB>;
+       clock-names = "sata", "sata_ref", "ahb";
+};
diff --git a/Documentation/devicetree/bindings/ata/tegra-sata.txt b/Documentation/devicetree/bindings/ata/tegra-sata.txt
new file mode 100644 (file)
index 0000000..946f207
--- /dev/null
@@ -0,0 +1,30 @@
+Tegra124 SoC SATA AHCI controller
+
+Required properties :
+- compatible : "nvidia,tegra124-ahci".
+- reg : Should contain 2 entries:
+  - AHCI register set (SATA BAR5)
+  - SATA register set
+- interrupts : Defines the interrupt used by SATA
+- clocks : Must contain an entry for each entry in clock-names.
+  See ../clocks/clock-bindings.txt for details.
+- clock-names : Must include the following entries:
+  - sata
+  - sata-oob
+  - cml1
+  - pll_e
+- resets : Must contain an entry for each entry in reset-names.
+  See ../reset/reset.txt for details.
+- reset-names : Must include the following entries:
+  - sata
+  - sata-oob
+  - sata-cold
+- phys : Must contain an entry for each entry in phy-names.
+  See ../phy/phy-bindings.txt for details.
+- phy-names : Must include the following entries:
+  - sata-phy : XUSB PADCTL SATA PHY
+- hvdd-supply : Defines the SATA HVDD regulator
+- vddio-supply : Defines the SATA VDDIO regulator
+- avdd-supply : Defines the SATA AVDD regulator
+- target-5v-supply : Defines the SATA 5V power regulator
+- target-12v-supply : Defines the SATA 12V power regulator
diff --git a/Documentation/devicetree/bindings/clock/clk-palmas-clk32kg-clocks.txt b/Documentation/devicetree/bindings/clock/clk-palmas-clk32kg-clocks.txt
new file mode 100644 (file)
index 0000000..4208886
--- /dev/null
@@ -0,0 +1,35 @@
+* Palmas 32KHz clocks *
+
+Palmas device has two clock output pins for 32KHz, KG and KG_AUDIO.
+
+This binding uses the common clock binding ./clock-bindings.txt.
+
+Required properties:
+- compatible : "ti,palmas-clk32kg" for clk32kg clock
+               "ti,palmas-clk32kgaudio" for clk32kgaudio clock
+- #clock-cells : shall be set to 0.
+
+Optional property:
+- ti,external-sleep-control: The external enable input pins controlled the
+       enable/disable of clocks.  The external enable input pins ENABLE1,
+       ENABLE2 and NSLEEP. The valid values for the external pins are:
+               PALMAS_EXT_CONTROL_PIN_ENABLE1 for ENABLE1 pin
+               PALMAS_EXT_CONTROL_PIN_ENABLE2 for ENABLE2 pin
+               PALMAS_EXT_CONTROL_PIN_NSLEEP for NSLEEP pin
+       Option 0 or missing this property means the clock is enabled/disabled
+       via register access and these pins do not have any control.
+       The macros of external control pins for DTS is defined at
+       dt-bindings/mfd/palmas.h
+
+Example:
+       #include <dt-bindings/mfd/palmas.h>
+       ...
+       palmas: tps65913@58 {
+               ...
+               clk32kg: palmas_clk32k@0 {
+                       compatible = "ti,palmas-clk32kg";
+                       #clock-cells = <0>;
+                       ti,external-sleep-control = <PALMAS_EXT_CONTROL_PIN_NSLEEP>;
+               };
+               ...
+       };
index f157878..06fc6d5 100644 (file)
@@ -131,3 +131,39 @@ clock signal, and a UART.
   ("pll" and "pll-switched").
 * The UART has its baud clock connected the external oscillator and its
   register clock connected to the PLL clock (the "pll-switched" signal)
+
+==Assigned clock parents and rates==
+
+Some platforms may require initial configuration of default parent clocks
+and clock frequencies. Such a configuration can be specified in a device tree
+node through assigned-clocks, assigned-clock-parents and assigned-clock-rates
+properties. The assigned-clock-parents property should contain a list of parent
+clocks in form of phandle and clock specifier pairs, the assigned-clock-parents
+property the list of assigned clock frequency values - corresponding to clocks
+listed in the assigned-clocks property.
+
+To skip setting parent or rate of a clock its corresponding entry should be
+set to 0, or can be omitted if it is not followed by any non-zero entry.
+
+    uart@a000 {
+        compatible = "fsl,imx-uart";
+        reg = <0xa000 0x1000>;
+        ...
+        clocks = <&osc 0>, <&pll 1>;
+        clock-names = "baud", "register";
+
+        assigned-clocks = <&clkcon 0>, <&pll 2>;
+        assigned-clock-parents = <&pll 2>;
+        assigned-clock-rates = <0>, <460800>;
+    };
+
+In this example the <&pll 2> clock is set as parent of clock <&clkcon 0> and
+the <&pll 2> clock is assigned a frequency value of 460800 Hz.
+
+Configuring a clock's parent and rate through the device node that consumes
+the clock can be done only for clocks that have a single user. Specifying
+conflicting parent or rate configuration in multiple consumer nodes for
+a shared clock is forbidden.
+
+Configuration of common clocks, which affect multiple consumer devices can
+be similarly specified in the clock provider node.
diff --git a/Documentation/devicetree/bindings/clock/clps711x-clock.txt b/Documentation/devicetree/bindings/clock/clps711x-clock.txt
new file mode 100644 (file)
index 0000000..ce5a747
--- /dev/null
@@ -0,0 +1,19 @@
+* Clock bindings for the Cirrus Logic CLPS711X CPUs
+
+Required properties:
+- compatible       : Shall contain "cirrus,clps711x-clk".
+- reg              : Address of the internal register set.
+- startup-frequency: Factory set CPU startup frequency in HZ.
+- #clock-cells     : Should be <1>.
+
+The clock consumer should specify the desired clock by having the clock
+ID in its "clocks" phandle cell. See include/dt-bindings/clock/clps711x-clock.h
+for the full list of CLPS711X clock IDs.
+
+Example:
+       clks: clks@80000000 {
+               #clock-cells = <1>;
+               compatible = "cirrus,ep7312-clk", "cirrus,clps711x-clk";
+               reg = <0x80000000 0xc000>;
+               startup-frequency = <73728000>;
+       };
index 9cfcb4f..aba3d25 100644 (file)
@@ -5,6 +5,8 @@ Required properties :
 - compatible : shall contain only one of the following:
 
                        "qcom,gcc-apq8064"
+                       "qcom,gcc-apq8084"
+                       "qcom,gcc-ipq8064"
                        "qcom,gcc-msm8660"
                        "qcom,gcc-msm8960"
                        "qcom,gcc-msm8974"
index d572e99..29ebf84 100644 (file)
@@ -4,6 +4,8 @@ Qualcomm Multimedia Clock & Reset Controller Binding
 Required properties :
 - compatible : shall contain only one of the following:
 
+                       "qcom,mmcc-apq8064"
+                       "qcom,mmcc-apq8084"
                        "qcom,mmcc-msm8660"
                        "qcom,mmcc-msm8960"
                        "qcom,mmcc-msm8974"
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3188-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3188-cru.txt
new file mode 100644 (file)
index 0000000..0c2bf5e
--- /dev/null
@@ -0,0 +1,61 @@
+* Rockchip RK3188/RK3066 Clock and Reset Unit
+
+The RK3188/RK3066 clock controller generates and supplies clock to various
+controllers within the SoC and also implements a reset controller for SoC
+peripherals.
+
+Required Properties:
+
+- compatible: should be "rockchip,rk3188-cru", "rockchip,rk3188a-cru" or
+                       "rockchip,rk3066a-cru"
+- reg: physical base address of the controller and length of memory mapped
+  region.
+- #clock-cells: should be 1.
+- #reset-cells: should be 1.
+
+Optional Properties:
+
+- rockchip,grf: phandle to the syscon managing the "general register files"
+  If missing pll rates are not changable, due to the missing pll lock status.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. All available clocks are defined as
+preprocessor macros in the dt-bindings/clock/rk3188-cru.h and
+dt-bindings/clock/rk3066-cru.h headers and can be used in device tree sources.
+Similar macros exist for the reset sources in these files.
+
+External clocks:
+
+There are several clocks that are generated outside the SoC. It is expected
+that they are defined using standard clock bindings with following
+clock-output-names:
+ - "xin24m" - crystal input - required,
+ - "xin32k" - rtc clock - optional,
+ - "xin27m" - 27mhz crystal input on rk3066 - optional,
+ - "ext_hsadc" - external HSADC clock - optional,
+ - "ext_cif0" - external camera clock - optional,
+ - "ext_rmii" - external RMII clock - optional,
+ - "ext_jtag" - externalJTAG clock - optional
+
+Example: Clock controller node:
+
+       cru: cru@20000000 {
+               compatible = "rockchip,rk3188-cru";
+               reg = <0x20000000 0x1000>;
+               rockchip,grf = <&grf>;
+
+               #clock-cells = <1>;
+               #reset-cells = <1>;
+       };
+
+Example: UART controller node that consumes the clock generated by the clock
+  controller:
+
+       uart0: serial@10124000 {
+               compatible = "snps,dw-apb-uart";
+               reg = <0x10124000 0x400>;
+               interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+               reg-shift = <2>;
+               reg-io-width = <1>;
+               clocks = <&cru SCLK_UART0>;
+       };
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3288-cru.txt
new file mode 100644 (file)
index 0000000..c9fbb76
--- /dev/null
@@ -0,0 +1,61 @@
+* Rockchip RK3288 Clock and Reset Unit
+
+The RK3288 clock controller generates and supplies clock to various
+controllers within the SoC and also implements a reset controller for SoC
+peripherals.
+
+Required Properties:
+
+- compatible: should be "rockchip,rk3288-cru"
+- reg: physical base address of the controller and length of memory mapped
+  region.
+- #clock-cells: should be 1.
+- #reset-cells: should be 1.
+
+Optional Properties:
+
+- rockchip,grf: phandle to the syscon managing the "general register files"
+  If missing pll rates are not changable, due to the missing pll lock status.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume. All available clocks are defined as
+preprocessor macros in the dt-bindings/clock/rk3288-cru.h headers and can be
+used in device tree sources. Similar macros exist for the reset sources in
+these files.
+
+External clocks:
+
+There are several clocks that are generated outside the SoC. It is expected
+that they are defined using standard clock bindings with following
+clock-output-names:
+ - "xin24m" - crystal input - required,
+ - "xin32k" - rtc clock - optional,
+ - "ext_i2s" - external I2S clock - optional,
+ - "ext_hsadc" - external HSADC clock - optional,
+ - "ext_edp_24m" - external display port clock - optional,
+ - "ext_vip" - external VIP clock - optional,
+ - "ext_isp" - external ISP clock - optional,
+ - "ext_jtag" - external JTAG clock - optional
+
+Example: Clock controller node:
+
+       cru: cru@20000000 {
+               compatible = "rockchip,rk3188-cru";
+               reg = <0x20000000 0x1000>;
+               rockchip,grf = <&grf>;
+
+               #clock-cells = <1>;
+               #reset-cells = <1>;
+       };
+
+Example: UART controller node that consumes the clock generated by the clock
+  controller:
+
+       uart0: serial@10124000 {
+               compatible = "snps,dw-apb-uart";
+               reg = <0x10124000 0x400>;
+               interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
+               reg-shift = <2>;
+               reg-io-width = <1>;
+               clocks = <&cru SCLK_UART0>;
+       };
index a891c82..22f6769 100644 (file)
@@ -6,6 +6,9 @@ This binding uses the common clock binding[1].
 
 == Gate clocks ==
 
+These bindings are deprecated!
+Please use the soc specific CRU bindings instead.
+
 The gate registers form a continuos block which makes the dt node
 structure a matter of taste, as either all gates can be put into
 one gate clock spanning all registers or they can be divided into
index ae56315..6247652 100644 (file)
@@ -24,26 +24,26 @@ Required properties:
 
 Example:
 
-       clockgenA@fd345000 {
+       clockgen-a@fd345000 {
                reg = <0xfd345000 0xb50>;
 
-               CLK_M_A1_DIV1: CLK_M_A1_DIV1 {
+               clk_m_a1_div1: clk-m-a1-div1 {
                        #clock-cells = <1>;
                        compatible = "st,clkgena-divmux-c32-odf1",
                                     "st,clkgena-divmux";
 
-                       clocks = <&CLK_M_A1_OSC_PREDIV>,
-                                <&CLK_M_A1_PLL0 1>, /* PLL0 PHI1 */
-                                <&CLK_M_A1_PLL1 1>; /* PLL1 PHI1 */
-
-                       clock-output-names = "CLK_M_RX_ICN_TS",
-                                            "CLK_M_RX_ICN_VDP_0",
-                                            "", /* Unused */
-                                            "CLK_M_PRV_T1_BUS",
-                                            "CLK_M_ICN_REG_12",
-                                            "CLK_M_ICN_REG_10",
-                                            "", /* Unused */
-                                            "CLK_M_ICN_ST231";
+                       clocks = <&clk_m_a1_osc_prediv>,
+                                <&clk_m_a1_pll0 1>, /* PLL0 PHI1 */
+                                <&clk_m_a1_pll1 1>; /* PLL1 PHI1 */
+
+                       clock-output-names = "clk-m-rx-icn-ts",
+                                            "clk-m-rx-icn-vdp-0",
+                                            "", /* unused */
+                                            "clk-m-prv-t1-bus",
+                                            "clk-m-icn-reg-12",
+                                            "clk-m-icn-reg-10",
+                                            "", /* unused */
+                                            "clk-m-icn-st231";
                };
        };
 
index 943e080..f1fa91c 100644 (file)
@@ -17,7 +17,7 @@ Required properties:
        "st,stih416-clkgenf-vcc-sd",    "st,clkgen-mux"
        "st,stih415-clkgen-a9-mux",     "st,clkgen-mux"
        "st,stih416-clkgen-a9-mux",     "st,clkgen-mux"
-
+       "st,stih407-clkgen-a9-mux",     "st,clkgen-mux"
 
 - #clock-cells : from common clock binding; shall be set to 0.
 
@@ -27,10 +27,10 @@ Required properties:
 
 Example:
 
-       CLK_M_HVA: CLK_M_HVA {
+       clk_m_hva: clk-m-hva@fd690868 {
                #clock-cells = <0>;
                compatible = "st,stih416-clkgenf-vcc-hva", "st,clkgen-mux";
                reg = <0xfd690868 4>;
 
-               clocks = <&CLOCKGEN_F 1>, <&CLK_M_A1_DIV0 3>;
+               clocks = <&clockgen_f 1>, <&clk_m_a1_div0 3>;
        };
index 81eb385..efb51cf 100644 (file)
@@ -19,11 +19,14 @@ Required properties:
        "st,stih415-plls-c32-ddr",      "st,clkgen-plls-c32"
        "st,stih416-plls-c32-a9",       "st,clkgen-plls-c32"
        "st,stih416-plls-c32-ddr",      "st,clkgen-plls-c32"
+       "st,stih407-plls-c32-a0",       "st,clkgen-plls-c32"
+       "st,stih407-plls-c32-a9",       "st,clkgen-plls-c32"
+       "st,stih407-plls-c32-c0_0",     "st,clkgen-plls-c32"
+       "st,stih407-plls-c32-c0_1",     "st,clkgen-plls-c32"
 
        "st,stih415-gpu-pll-c32",       "st,clkgengpu-pll-c32"
        "st,stih416-gpu-pll-c32",       "st,clkgengpu-pll-c32"
 
-
 - #clock-cells : From common clock binding; shall be set to 1.
 
 - clocks : From common clock binding
@@ -32,17 +35,17 @@ Required properties:
 
 Example:
 
-       clockgenA@fee62000 {
+       clockgen-a@fee62000 {
                reg = <0xfee62000 0xb48>;
 
-               CLK_S_A0_PLL: CLK_S_A0_PLL {
+               clk_s_a0_pll: clk-s-a0-pll {
                        #clock-cells = <1>;
                        compatible = "st,clkgena-plls-c65";
 
-                       clocks = <&CLK_SYSIN>;
+                       clocks = <&clk_sysin>;
 
-                       clock-output-names = "CLK_S_A0_PLL0_HS",
-                                            "CLK_S_A0_PLL0_LS",
-                                            "CLK_S_A0_PLL1";
+                       clock-output-names = "clk-s-a0-pll0-hs",
+                                            "clk-s-a0-pll0-ls",
+                                            "clk-s-a0-pll1";
                };
        };
index 566c9d7..604766c 100644 (file)
@@ -20,17 +20,17 @@ Required properties:
 
 Example:
 
-       clockgenA@fd345000 {
+       clockgen-a@fd345000 {
                reg = <0xfd345000 0xb50>;
 
-               CLK_M_A2_OSC_PREDIV: CLK_M_A2_OSC_PREDIV {
+               clk_m_a2_osc_prediv: clk-m-a2-osc-prediv {
                        #clock-cells = <0>;
                        compatible = "st,clkgena-prediv-c32",
                                     "st,clkgena-prediv";
 
-                       clocks = <&CLK_SYSIN>;
+                       clocks = <&clk_sysin>;
 
-                       clock-output-names = "CLK_M_A2_OSC_PREDIV";
+                       clock-output-names = "clk-m-a2-osc-prediv";
                };
        };
 
index 4e3ff28..109b3ed 100644 (file)
@@ -32,22 +32,30 @@ Required properties:
 
 Example:
 
-       CLOCKGEN_C_VCC: CLOCKGEN_C_VCC {
+       clockgen_c_vcc: clockgen-c-vcc@0xfe8308ac {
                #clock-cells = <1>;
                compatible = "st,stih416-clkgenc", "st,clkgen-vcc";
                reg = <0xfe8308ac 12>;
 
-               clocks = <&CLK_S_VCC_HD>, <&CLOCKGEN_C 1>,
-                       <&CLK_S_TMDS_FROMPHY>, <&CLOCKGEN_C 2>;
-
-               clock-output-names  =
-                       "CLK_S_PIX_HDMI",  "CLK_S_PIX_DVO",
-                       "CLK_S_OUT_DVO",   "CLK_S_PIX_HD",
-                       "CLK_S_HDDAC",     "CLK_S_DENC",
-                       "CLK_S_SDDAC",     "CLK_S_PIX_MAIN",
-                       "CLK_S_PIX_AUX",   "CLK_S_STFE_FRC_0",
-                       "CLK_S_REF_MCRU",  "CLK_S_SLAVE_MCRU",
-                       "CLK_S_TMDS_HDMI", "CLK_S_HDMI_REJECT_PLL",
-                       "CLK_S_THSENS";
+               clocks = <&clk_s_vcc_hd>,
+                        <&clockgen_c 1>,
+                        <&clk_s_tmds_fromphy>,
+                        <&clockgen_c 2>;
+
+               clock-output-names  = "clk-s-pix-hdmi",
+                                     "clk-s-pix-dvo",
+                                     "clk-s-out-dvo",
+                                     "clk-s-pix-hd",
+                                     "clk-s-hddac",
+                                     "clk-s-denc",
+                                     "clk-s-sddac",
+                                     "clk-s-pix-main",
+                                     "clk-s-pix-aux",
+                                     "clk-s-stfe-frc-0",
+                                     "clk-s-ref-mcru",
+                                     "clk-s-slave-mcru",
+                                     "clk-s-tmds-hdmi",
+                                     "clk-s-hdmi-reject-pll",
+                                     "clk-s-thsens";
        };
 
index 49ec5ae..78978f1 100644 (file)
@@ -24,60 +24,77 @@ address is common of all subnode.
                quadfs_node {
                        ...
                };
+
+               mux_node {
+                       ...
+               };
+
+               vcc_node {
+                       ...
+               };
+
+               flexgen_node {
+                       ...
+               };
                ...
        };
 
 This binding uses the common clock binding[1].
-Each subnode should use the binding discribe in [2]..[4]
+Each subnode should use the binding discribe in [2]..[7]
 
 [1] Documentation/devicetree/bindings/clock/clock-bindings.txt
-[2] Documentation/devicetree/bindings/clock/st,quadfs.txt
-[3] Documentation/devicetree/bindings/clock/st,quadfs.txt
-[4] Documentation/devicetree/bindings/clock/st,quadfs.txt
+[2] Documentation/devicetree/bindings/clock/st,clkgen-divmux.txt
+[3] Documentation/devicetree/bindings/clock/st,clkgen-mux.txt
+[4] Documentation/devicetree/bindings/clock/st,clkgen-pll.txt
+[5] Documentation/devicetree/bindings/clock/st,clkgen-prediv.txt
+[6] Documentation/devicetree/bindings/clock/st,vcc.txt
+[7] Documentation/devicetree/bindings/clock/st,quadfs.txt
+[8] Documentation/devicetree/bindings/clock/st,flexgen.txt
+
 
 Required properties:
 - reg : A Base address and length of the register set.
 
 Example:
 
-       clockgenA@fee62000 {
+       clockgen-a@fee62000 {
 
                reg = <0xfee62000 0xb48>;
 
-               CLK_S_A0_PLL: CLK_S_A0_PLL {
+               clk_s_a0_pll: clk-s-a0-pll {
                        #clock-cells = <1>;
                        compatible = "st,clkgena-plls-c65";
 
-                       clocks = <&CLK_SYSIN>;
+                       clocks = <&clk-sysin>;
 
-                       clock-output-names = "CLK_S_A0_PLL0_HS",
-                                            "CLK_S_A0_PLL0_LS",
-                                            "CLK_S_A0_PLL1";
+                       clock-output-names = "clk-s-a0-pll0-hs",
+                                            "clk-s-a0-pll0-ls",
+                                            "clk-s-a0-pll1";
                };
 
-               CLK_S_A0_OSC_PREDIV: CLK_S_A0_OSC_PREDIV {
+               clk_s_a0_osc_prediv: clk-s-a0-osc-prediv {
                        #clock-cells = <0>;
                        compatible = "st,clkgena-prediv-c65",
                                     "st,clkgena-prediv";
 
-                       clocks = <&CLK_SYSIN>;
+                       clocks = <&clk_sysin>;
 
-                       clock-output-names = "CLK_S_A0_OSC_PREDIV";
+                       clock-output-names = "clk-s-a0-osc-prediv";
                };
 
-               CLK_S_A0_HS: CLK_S_A0_HS {
+               clk_s_a0_hs: clk-s-a0-hs {
                        #clock-cells = <1>;
                        compatible = "st,clkgena-divmux-c65-hs",
                                     "st,clkgena-divmux";
 
-                       clocks = <&CLK_S_A0_OSC_PREDIV>,
-                                <&CLK_S_A0_PLL 0>, /* PLL0 HS */
-                                <&CLK_S_A0_PLL 2>; /* PLL1 */
+                       clocks = <&clk-s_a0_osc_prediv>,
+                                <&clk-s_a0_pll 0>, /* pll0 hs */
+                                <&clk-s_a0_pll 2>; /* pll1 */
 
-                       clock-output-names = "CLK_S_FDMA_0",
-                                            "CLK_S_FDMA_1",
-                                            ""; /* CLK_S_JIT_SENSE */
-                                            /* Fourth output unused */
+                       clock-output-names = "clk-s-fdma-0",
+                                            "clk-s-fdma-1",
+                                            ""; /* clk-s-jit-sense */
+                                            /* fourth output unused */
                };
        };
 
diff --git a/Documentation/devicetree/bindings/clock/st/st,flexgen.txt b/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
new file mode 100644 (file)
index 0000000..1d3ace0
--- /dev/null
@@ -0,0 +1,119 @@
+Binding for a type of flexgen structure found on certain
+STMicroelectronics consumer electronics SoC devices
+
+This structure includes:
+- a clock cross bar (represented by a mux element)
+- a pre and final dividers (represented by a divider and gate elements)
+
+Flexgen structure is a part of Clockgen[1].
+
+Please find an example below:
+
+    Clockgen block diagram
+    -------------------------------------------------------------------
+   |                     Flexgen stucture                              |
+   |                  ---------------------------------------------    |
+   |                 |    -------       --------       --------    |   |
+clk_sysin            |   |       |     |        |     |        |   |   |
+---|-----------------|-->|       |     |        |     |        |   |   |
+   | |               |   |       |     |        |     |        |   |   |
+   | |   -------     |   |       |     |Pre     |     |Final   |   |   |
+   | |  |PLL0   |    |   |       |     |Dividers|     |Dividers|   |   |
+   | |->|       |    |   |       |     |  x32   |     |  x32   |   |   |
+   | |  |  odf_0|----|-->|       |     |        |     |        |   |   |
+   | |  |       |    |   |       |     |        |     |        |   |   |
+   | |  |       |    |   |       |     |        |     |        |   |   |
+   | |  |       |    |   |       |     |        |     |        |   |   |
+   | |  |       |    |   |       |     |        |     |        |   |   |
+   | |   -------     |   |       |     |        |     |        |   |   |
+   | |               |   |       |     |        |     |        |   |   |
+   | |   -------     |   | Clock |     |        |     |        |   |   |
+   | |  |PLL1   |    |   |       |     |        |     |        |   |   |
+   | |->|       |    |   | Cross |     |        |     |        |   |   |
+   | |  |  odf_0|----|-->|       |     |        |     |        | CLK_DIV[31:0]
+   | |  |       |    |   | Bar   |====>|        |====>|        |===|=========>
+   | |  |       |    |   |       |     |        |     |        |   |   |
+   | |  |       |    |   |       |     |        |     |        |   |   |
+   | |  |       |    |   |       |     |        |     |        |   |   |
+   | |   -------     |   |       |     |        |     |        |   |   |
+   | |               |   |       |     |        |     |        |   |   |
+   | |   -------     |   |       |     |        |     |        |   |   |
+   | |  |QUADFS |    |   |       |     |        |     |        |   |   |
+   | |->|    ch0|----|-->|       |     |        |     |        |   |   |
+   |    |       |    |   |       |     |        |     |        |   |   |
+   |    |    ch1|----|-->|       |     |        |     |        |   |   |
+   |    |       |    |   |       |     |        |     |        |   |   |
+   |    |    ch2|----|-->|       |     | DIV    |     | DIV    |   |   |
+   |    |       |    |   |       |     |  1 to  |     |  1 to  |   |   |
+   |    |    ch3|----|-->|       |     |   1024 |     |     64 |   |   |
+   |     -------     |   |       |     |        |     |        |   |   |
+   |                 |    -------       --------       --------    |   |
+   |                   --------------------------------------------    |
+   |                                                                   |
+    -------------------------------------------------------------------
+
+This binding uses the common clock binding[2].
+
+[1] Documentation/devicetree/bindings/clock/st/st,clkgen.txt
+[2] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be:
+  "st,flexgen"
+
+- #clock-cells : from common clock binding; shall be set to 1 (multiple clock
+  outputs).
+
+- clocks : must be set to the parent's phandle. it's could be output clocks of
+  a quadsfs or/and a pll or/and clk_sysin (up to 7 clocks)
+
+- clock-output-names : List of strings used to name the clock outputs.
+
+Example:
+
+       clk_s_c0_flexgen: clk-s-c0-flexgen {
+
+               #clock-cells = <1>;
+               compatible = "st,flexgen";
+
+               clocks = <&clk_s_c0_pll0 0>,
+                        <&clk_s_c0_pll1 0>,
+                        <&clk_s_c0_quadfs 0>,
+                        <&clk_s_c0_quadfs 1>,
+                        <&clk_s_c0_quadfs 2>,
+                        <&clk_s_c0_quadfs 3>,
+                        <&clk_sysin>;
+
+               clock-output-names = "clk-icn-gpu",
+                                    "clk-fdma",
+                                    "clk-nand",
+                                    "clk-hva",
+                                    "clk-proc-stfe",
+                                    "clk-proc-tp",
+                                    "clk-rx-icn-dmu",
+                                    "clk-rx-icn-hva",
+                                    "clk-icn-cpu",
+                                    "clk-tx-icn-dmu",
+                                    "clk-mmc-0",
+                                    "clk-mmc-1",
+                                    "clk-jpegdec",
+                                    "clk-ext2fa9",
+                                    "clk-ic-bdisp-0",
+                                    "clk-ic-bdisp-1",
+                                    "clk-pp-dmu",
+                                    "clk-vid-dmu",
+                                    "clk-dss-lpc",
+                                    "clk-st231-aud-0",
+                                    "clk-st231-gp-1",
+                                    "clk-st231-dmu",
+                                    "clk-icn-lmi",
+                                    "clk-tx-icn-disp-1",
+                                    "clk-icn-sbc",
+                                    "clk-stfe-frc2",
+                                    "clk-eth-phy",
+                                    "clk-eth-ref-phyclk",
+                                    "clk-flash-promip",
+                                    "clk-main-disp",
+                                    "clk-aux-disp",
+                                    "clk-compo-dvp";
+       };
index ec86d62..cedeb9c 100644 (file)
@@ -15,6 +15,9 @@ Required properties:
   "st,stih416-quadfs432",      "st,quadfs"
   "st,stih416-quadfs660-E",    "st,quadfs"
   "st,stih416-quadfs660-F",    "st,quadfs"
+  "st,stih407-quadfs660-C",    "st,quadfs"
+  "st,stih407-quadfs660-D",    "st,quadfs"
+
 
 - #clock-cells : from common clock binding; shall be set to 1.
 
@@ -32,14 +35,14 @@ Required properties:
 
 Example:
 
-       CLOCKGEN_E: CLOCKGEN_E {
+       clockgen_e: clockgen-e@fd3208bc {
                 #clock-cells = <1>;
                 compatible = "st,stih416-quadfs660-E", "st,quadfs";
                 reg = <0xfd3208bc 0xB0>;
 
-                clocks = <&CLK_SYSIN>;
-                clock-output-names = "CLK_M_PIX_MDTP_0",
-                                        "CLK_M_PIX_MDTP_1",
-                                        "CLK_M_PIX_MDTP_2",
-                                        "CLK_M_MPELPC";
+                clocks = <&clk_sysin>;
+                clock-output-names = "clk-m-pix-mdtp-0",
+                                    "clk-m-pix-mdtp-1",
+                                    "clk-m-pix-mdtp-2",
+                                    "clk-m-mpelpc";
         };
index b9ec668..d3a5c3c 100644 (file)
@@ -9,11 +9,13 @@ Required properties:
        "allwinner,sun4i-a10-osc-clk" - for a gatable oscillator
        "allwinner,sun4i-a10-pll1-clk" - for the main PLL clock and PLL4
        "allwinner,sun6i-a31-pll1-clk" - for the main PLL clock on A31
+       "allwinner,sun8i-a23-pll1-clk" - for the main PLL clock on A23
        "allwinner,sun4i-a10-pll5-clk" - for the PLL5 clock
        "allwinner,sun4i-a10-pll6-clk" - for the PLL6 clock
        "allwinner,sun6i-a31-pll6-clk" - for the PLL6 clock on A31
        "allwinner,sun4i-a10-cpu-clk" - for the CPU multiplexer clock
        "allwinner,sun4i-a10-axi-clk" - for the AXI clock
+       "allwinner,sun8i-a23-axi-clk" - for the AXI clock on A23
        "allwinner,sun4i-a10-axi-gates-clk" - for the AXI gates
        "allwinner,sun4i-a10-ahb-clk" - for the AHB clock
        "allwinner,sun4i-a10-ahb-gates-clk" - for the AHB gates on A10
@@ -23,13 +25,16 @@ Required properties:
        "allwinner,sun6i-a31-ar100-clk" - for the AR100 on A31
        "allwinner,sun6i-a31-ahb1-mux-clk" - for the AHB1 multiplexer on A31
        "allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31
+       "allwinner,sun8i-a23-ahb1-gates-clk" - for the AHB1 gates on A23
        "allwinner,sun4i-a10-apb0-clk" - for the APB0 clock
        "allwinner,sun6i-a31-apb0-clk" - for the APB0 clock on A31
+       "allwinner,sun8i-a23-apb0-clk" - for the APB0 clock on A23
        "allwinner,sun4i-a10-apb0-gates-clk" - for the APB0 gates on A10
        "allwinner,sun5i-a13-apb0-gates-clk" - for the APB0 gates on A13
        "allwinner,sun5i-a10s-apb0-gates-clk" - for the APB0 gates on A10s
        "allwinner,sun6i-a31-apb0-gates-clk" - for the APB0 gates on A31
        "allwinner,sun7i-a20-apb0-gates-clk" - for the APB0 gates on A20
+       "allwinner,sun8i-a23-apb0-gates-clk" - for the APB0 gates on A23
        "allwinner,sun4i-a10-apb1-clk" - for the APB1 clock
        "allwinner,sun4i-a10-apb1-mux-clk" - for the APB1 clock muxing
        "allwinner,sun4i-a10-apb1-gates-clk" - for the APB1 gates on A10
@@ -37,8 +42,10 @@ Required properties:
        "allwinner,sun5i-a10s-apb1-gates-clk" - for the APB1 gates on A10s
        "allwinner,sun6i-a31-apb1-gates-clk" - for the APB1 gates on A31
        "allwinner,sun7i-a20-apb1-gates-clk" - for the APB1 gates on A20
+       "allwinner,sun8i-a23-apb1-gates-clk" - for the APB1 gates on A23
        "allwinner,sun6i-a31-apb2-div-clk" - for the APB2 gates on A31
        "allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31
+       "allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23
        "allwinner,sun4i-a10-mod0-clk" - for the module 0 family of clocks
        "allwinner,sun7i-a20-out-clk" - for the external output clocks
        "allwinner,sun7i-a20-gmac-clk" - for the GMAC clock module on A20/A31
index f055515..366690c 100644 (file)
@@ -8,10 +8,12 @@ Both required and optional properties listed below must be defined
 under node /cpus/cpu@0.
 
 Required properties:
-- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt
-  for details
+- None
 
 Optional properties:
+- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt for
+  details. OPPs *must* be supplied either via DT, i.e. this property, or
+  populated at runtime.
 - clock-latency: Specify the possible maximum transition latency for clock,
   in unit of nanoseconds.
 - voltage-tolerance: Specify the CPU voltage tolerance in percentage.
diff --git a/Documentation/devicetree/bindings/crypto/amd-ccp.txt b/Documentation/devicetree/bindings/crypto/amd-ccp.txt
new file mode 100644 (file)
index 0000000..8c61183
--- /dev/null
@@ -0,0 +1,19 @@
+* AMD Cryptographic Coprocessor driver (ccp)
+
+Required properties:
+- compatible: Should be "amd,ccp-seattle-v1a"
+- reg: Address and length of the register set for the device
+- interrupt-parent: Should be the phandle for the interrupt controller
+  that services interrupts for this device
+- interrupts: Should contain the CCP interrupt
+
+Optional properties:
+- dma-coherent: Present if dma operations are coherent
+
+Example:
+       ccp@e0100000 {
+               compatible = "amd,ccp-seattle-v1a";
+               reg = <0 0xe0100000 0 0x10000>;
+               interrupt-parent = <&gic>;
+               interrupts = <0 3 4>;
+       };
diff --git a/Documentation/devicetree/bindings/crypto/qcom-qce.txt b/Documentation/devicetree/bindings/crypto/qcom-qce.txt
new file mode 100644 (file)
index 0000000..fdd53b1
--- /dev/null
@@ -0,0 +1,25 @@
+Qualcomm crypto engine driver
+
+Required properties:
+
+- compatible  : should be "qcom,crypto-v5.1"
+- reg         : specifies base physical address and size of the registers map
+- clocks      : phandle to clock-controller plus clock-specifier pair
+- clock-names : "iface" clocks register interface
+                "bus" clocks data transfer interface
+                "core" clocks rest of the crypto block
+- dmas        : DMA specifiers for tx and rx dma channels. For more see
+                Documentation/devicetree/bindings/dma/dma.txt
+- dma-names   : DMA request names should be "rx" and "tx"
+
+Example:
+       crypto@fd45a000 {
+               compatible = "qcom,crypto-v5.1";
+               reg = <0xfd45a000 0x6000>;
+               clocks = <&gcc GCC_CE2_AHB_CLK>,
+                        <&gcc GCC_CE2_AXI_CLK>,
+                        <&gcc GCC_CE2_CLK>;
+               clock-names = "iface", "bus", "core";
+               dmas = <&cryptobam 2>, <&cryptobam 3>;
+               dma-names = "rx", "tx";
+       };
index c6f6667..b117b2e 100644 (file)
@@ -3,11 +3,19 @@ NTC Thermistor hwmon sensors
 
 Requires node properties:
 - "compatible" value : one of
-       "ntc,ncp15wb473"
-       "ntc,ncp18wb473"
-       "ntc,ncp21wb473"
-       "ntc,ncp03wb473"
-       "ntc,ncp15wl333"
+       "murata,ncp15wb473"
+       "murata,ncp18wb473"
+       "murata,ncp21wb473"
+       "murata,ncp03wb473"
+       "murata,ncp15wl333"
+
+/* Usage of vendor name "ntc" is deprecated */
+<DEPRECATED>   "ntc,ncp15wb473"
+<DEPRECATED>   "ntc,ncp18wb473"
+<DEPRECATED>   "ntc,ncp21wb473"
+<DEPRECATED>   "ntc,ncp03wb473"
+<DEPRECATED>   "ntc,ncp15wl333"
+
 - "pullup-uv"  Pull up voltage in micro volts
 - "pullup-ohm" Pull up resistor value in ohms
 - "pulldown-ohm" Pull down resistor value in ohms
@@ -21,7 +29,7 @@ Read more about iio bindings at
 
 Example:
        ncp15wb473@0 {
-               compatible = "ntc,ncp15wb473";
+               compatible = "murata,ncp15wb473";
                pullup-uv = <1800000>;
                pullup-ohm = <47000>;
                pulldown-ohm = <0>;
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
new file mode 100644 (file)
index 0000000..dde6c22
--- /dev/null
@@ -0,0 +1,42 @@
+* Rockchip RK3xxx I2C controller
+
+This driver interfaces with the native I2C controller present in Rockchip
+RK3xxx SoCs.
+
+Required properties :
+
+ - reg : Offset and length of the register set for the device
+ - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c" or
+               "rockchip,rk3288-i2c".
+ - interrupts : interrupt number
+ - clocks : parent clock
+
+Required on RK3066, RK3188 :
+
+ - rockchip,grf : the phandle of the syscon node for the general register
+                 file (GRF)
+ - on those SoCs an alias with the correct I2C bus ID (bit offset in the GRF)
+   is also required.
+
+Optional properties :
+
+ - clock-frequency : SCL frequency to use (in Hz). If omitted, 100kHz is used.
+
+Example:
+
+aliases {
+       i2c0 = &i2c0;
+}
+
+i2c0: i2c@2002d000 {
+       compatible = "rockchip,rk3188-i2c";
+       reg = <0x2002d000 0x1000>;
+       interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+       #address-cells = <1>;
+       #size-cells = <0>;
+
+       rockchip,grf = <&grf>;
+
+       clock-names = "i2c";
+       clocks = <&cru PCLK_I2C0>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt b/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt
new file mode 100644 (file)
index 0000000..6b76548
--- /dev/null
@@ -0,0 +1,41 @@
+
+* Allwinner P2WI (Push/Pull 2 Wire Interface) controller
+
+Required properties :
+
+ - reg             : Offset and length of the register set for the device.
+ - compatible      : Should one of the following:
+                     - "allwinner,sun6i-a31-p2wi"
+ - interrupts      : The interrupt line connected to the P2WI peripheral.
+ - clocks          : The gate clk connected to the P2WI peripheral.
+ - resets          : The reset line connected to the P2WI peripheral.
+
+Optional properties :
+
+ - clock-frequency : Desired P2WI bus clock frequency in Hz. If not set the
+default frequency is 100kHz
+
+A P2WI may contain one child node encoding a P2WI slave device.
+
+Slave device properties:
+  Required properties:
+   - reg           : the I2C slave address used during the initialization
+                     process to switch from I2C to P2WI mode
+
+Example:
+
+       p2wi@01f03400 {
+               compatible = "allwinner,sun6i-a31-p2wi";
+               reg = <0x01f03400 0x400>;
+               interrupts = <0 39 4>;
+               clocks = <&apb0_gates 3>;
+               clock-frequency = <6000000>;
+               resets = <&apb0_rst 3>;
+
+               axp221: pmic@68 {
+                       compatible = "x-powers,axp221";
+                       reg = <0x68>;
+
+                       /* ... */
+               };
+       };
index 64fd7de..b355660 100644 (file)
@@ -4,6 +4,13 @@ Required properties:
 
   - compatible: Must contain one of the following:
 
+    - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART.
+    - "renesas,scifb-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFB compatible UART.
+    - "renesas,scifa-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFA compatible UART.
+    - "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.
+    - "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.
+    - "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART.
+    - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
     - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
     - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
     - "renesas,scifa-r8a7790" for R8A7790 (R-Car H2) SCIFA compatible UART.
index b82a268..bee6ff2 100644 (file)
@@ -23,6 +23,12 @@ Optional properties:
 - spi-max-frequency: Specifies maximum SPI clock frequency,
                      Units - Hz. Definition as per
                      Documentation/devicetree/bindings/spi/spi-bus.txt
+- num-cs:      total number of chipselects
+- cs-gpios:    should specify GPIOs used for chipselects.
+               The gpios will be referred to as reg = <index> in the SPI child
+               nodes.  If unspecified, a single SPI device without a chip
+               select can be used.
+
 
 SPI slave nodes must be children of the SPI master node and can contain
 properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
index 4d7f375..46a311e 100644 (file)
@@ -83,6 +83,7 @@ mosaixtech    Mosaix Technologies, Inc.
 moxa   Moxa
 mpl    MPL AG
 mundoreader    Mundo Reader S.L.
+murata Murata Manufacturing Co., Ltd.
 mxicy  Macronix International Co., Ltd.
 national       National Semiconductor
 neonode                Neonode Inc.
index 4e30eba..9af538b 100644 (file)
@@ -1,6 +1,17 @@
 Email clients info for Linux
 ======================================================================
 
+Git
+----------------------------------------------------------------------
+These days most developers use `git send-email` instead of regular
+email clients.  The man page for this is quite good.  On the receiving
+end, maintainers use `git am` to apply the patches.
+
+If you are new to git then send your first patch to yourself.  Save it
+as raw text including all the headers.  Run `git am raw_email.txt` and
+then review the changelog with `git log`.  When that works then send
+the patch to the appropriate mailing list(s).
+
 General Preferences
 ----------------------------------------------------------------------
 Patches for the Linux kernel are submitted via email, preferably as
index bee2a5f..a1c052c 100644 (file)
@@ -90,7 +90,7 @@ operations:
      to be cleared before proceeding:
 
                wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
-                           fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+                           TASK_UNINTERRUPTIBLE);
 
 
  (2) The operation may be fast asynchronous (FSCACHE_OP_FAST), in which case it
index 3bfda94..057b770 100644 (file)
@@ -1,7 +1,7 @@
 Kernel driver ntc_thermistor
 =================
 
-Supported thermistors:
+Supported thermistors from Murata:
 * Murata NTC Thermistors NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, NCP15WL333
   Prefixes: 'ncp15wb473', 'ncp18wb473', 'ncp21wb473', 'ncp03wb473', 'ncp15wl333'
   Datasheet: Publicly available at Murata
@@ -15,9 +15,9 @@ Authors:
 Description
 -----------
 
-The NTC thermistor is a simple thermistor that requires users to provide the
-resistance and lookup the corresponding compensation table to get the
-temperature input.
+The NTC (Negative Temperature Coefficient) thermistor is a simple thermistor
+that requires users to provide the resistance and lookup the corresponding
+compensation table to get the temperature input.
 
 The NTC driver provides lookup tables with a linear approximation function
 and four circuit models with an option not to use any of the four models.
index f1ea2c6..c587a96 100644 (file)
@@ -281,6 +281,19 @@ gestures can normally be extracted from it.
 If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
 device.
 
+INPUT_PROP_TOPBUTTONPAD:
+-----------------------
+Some laptops, most notably the Lenovo *40 series provide a trackstick
+device but do not have physical buttons associated with the trackstick
+device. Instead, the top area of the touchpad is marked to show
+visual/haptic areas for left, middle, right buttons intended to be used
+with the trackstick.
+
+If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
+accordingly. This property does not affect kernel behavior.
+The kernel does not provide button emulation for such devices but treats
+them as any other INPUT_PROP_BUTTONPAD device.
+
 Guidelines:
 ==========
 The guidelines below ensure proper single-touch and multi-finger functionality.
index d7e43fa..7e240a7 100644 (file)
@@ -197,6 +197,7 @@ Code  Seq#(hex)     Include File            Comments
                                        <mailto:gregkh@linuxfoundation.org>
 'a'    all     linux/atm*.h, linux/sonet.h     ATM on linux
                                        <http://lrcwww.epfl.ch/>
+'a'    00-0F   drivers/crypto/qat/qat_common/adf_cfg_common.h  conflict! qat driver
 'b'    00-FF                           conflict! bit3 vme host bridge
                                        <mailto:natalia@nikhefk.nikhef.nl>
 'c'    all     linux/cm4000_cs.h       conflict!
index d567a7c..c600e2f 100644 (file)
@@ -1171,7 +1171,7 @@ When kbuild executes, the following steps are followed (roughly):
              obvious reason.
 
     dtc
-       Create flattend device tree blob object suitable for linking
+       Create flattened device tree blob object suitable for linking
        into vmlinux. Device tree blobs linked into vmlinux are placed
        in an init section in the image. Platform code *must* copy the
        blob to non-init memory prior to calling unflatten_device_tree().
index 6eaa9cd..90f6139 100644 (file)
@@ -1097,6 +1097,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        that can be changed at run time by the
                        set_graph_function file in the debugfs tracing directory.
 
+       ftrace_graph_notrace=[function-list]
+                       [FTRACE] Do not trace from the functions specified in
+                       function-list.  This list is a comma separated list of
+                       functions that can be changed at run time by the
+                       set_graph_notrace file in the debugfs tracing directory.
+
        gamecon.map[2|3]=
                        [HW,JOY] Multisystem joystick and NES/SNES/PSX pad
                        support via parallel port (up to 5 devices per port)
@@ -1474,6 +1480,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        js=             [HW,JOY] Analog joystick
                        See Documentation/input/joystick.txt.
 
+       kaslr/nokaslr   [X86]
+                       Enable/disable kernel and module base offset ASLR
+                       (Address Space Layout Randomization) if built into
+                       the kernel. When CONFIG_HIBERNATION is selected,
+                       kASLR is disabled by default. When kASLR is enabled,
+                       hibernation will be disabled.
+
        keepinitrd      [HW,ARM]
 
        kernelcore=nn[KMG]      [KNL,X86,IA-64,PPC] This parameter
@@ -2110,10 +2123,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        noapic          [SMP,APIC] Tells the kernel to not make use of any
                        IOAPICs that may be present in the system.
 
-       nokaslr         [X86]
-                       Disable kernel and module base offset ASLR (Address
-                       Space Layout Randomization) if built into the kernel.
-
        noautogroup     Disable scheduler automatic task group creation.
 
        nobats          [PPC] Do not use BATs for mapping kernel lowmem
@@ -2184,6 +2193,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        in certain environments such as networked servers or
                        real-time systems.
 
+       nohibernate     [HIBERNATION] Disable hibernation and resume.
+
        nohz=           [KNL] Boottime enable/disable dynamic ticks
                        Valid arguments: on, off
                        Default: on
@@ -2785,6 +2796,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        leaf rcu_node structure.  Useful for very large
                        systems.
 
+       rcutree.jiffies_till_sched_qs= [KNL]
+                       Set required age in jiffies for a
+                       given grace period before RCU starts
+                       soliciting quiescent-state help from
+                       rcu_note_context_switch().
+
        rcutree.jiffies_till_first_fqs= [KNL]
                        Set delay from grace-period initialization to
                        first attempt to force quiescent states.
@@ -2796,6 +2813,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        quiescent states.  Units are jiffies, minimum
                        value is one, and maximum value is HZ.
 
+       rcutree.rcu_nocb_leader_stride= [KNL]
+                       Set the number of NOCB kthread groups, which
+                       defaults to the square root of the number of
+                       CPUs.  Larger numbers reduces the wakeup overhead
+                       on the per-CPU grace-period kthreads, but increases
+                       that same overhead on each group's leader.
+
        rcutree.qhimark= [KNL]
                        Set threshold of queued RCU callbacks beyond which
                        batch limiting is disabled.
@@ -2980,6 +3004,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                noresume        Don't check if there's a hibernation image
                                present during boot.
                nocompress      Don't compress/decompress hibernation images.
+               no              Disable hibernation and resume.
 
        retain_initrd   [RAM] Keep initrd memory after extraction
 
@@ -3124,6 +3149,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        [KNL] Should the soft-lockup detector generate panics.
                        Format: <integer>
 
+       softlockup_all_cpu_backtrace=
+                       [KNL] Should the soft-lockup detector generate
+                       backtraces on all cpus.
+                       Format: <integer>
+
        sonypi.*=       [HW] Sony Programmable I/O Control Device driver
                        See Documentation/laptops/sonypi.txt
 
@@ -3515,7 +3545,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        the allocated input device; If set to 0, video driver
                        will only send out the event without touching backlight
                        brightness level.
-                       default: 0
+                       default: 1
 
        virtio_mmio.device=
                        [VMMIO] Memory mapped virtio (platform) device.
index d13b9a9..d399ae1 100644 (file)
@@ -8,8 +8,8 @@ disk-shock-protection.txt
        - information on hard disk shock protection.
 dslm.c
        - Simple Disk Sleep Monitor program
-hpfall.c
-       - (HP) laptop accelerometer program for disk protection.
+freefall.c
+       - (HP/DELL) laptop accelerometer program for disk protection.
 laptop-mode.txt
        - how to conserve battery power using laptop-mode.
 sony-laptop.txt
diff --git a/Documentation/laptops/freefall.c b/Documentation/laptops/freefall.c
new file mode 100644 (file)
index 0000000..aab2ff0
--- /dev/null
@@ -0,0 +1,177 @@
+/* Disk protection for HP/DELL machines.
+ *
+ * Copyright 2008 Eric Piel
+ * Copyright 2009 Pavel Machek <pavel@ucw.cz>
+ * Copyright 2012 Sonal Santan
+ * Copyright 2014 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * GPLv2.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sched.h>
+#include <syslog.h>
+
+static int noled;
+static char unload_heads_path[64];
+static char device_path[32];
+static const char app_name[] = "FREE FALL";
+
+static int set_unload_heads_path(char *device)
+{
+       char devname[64];
+
+       if (strlen(device) <= 5 || strncmp(device, "/dev/", 5) != 0)
+               return -EINVAL;
+       strncpy(devname, device + 5, sizeof(devname) - 1);
+       strncpy(device_path, device, sizeof(device_path) - 1);
+
+       snprintf(unload_heads_path, sizeof(unload_heads_path) - 1,
+                               "/sys/block/%s/device/unload_heads", devname);
+       return 0;
+}
+
+static int valid_disk(void)
+{
+       int fd = open(unload_heads_path, O_RDONLY);
+
+       if (fd < 0) {
+               perror(unload_heads_path);
+               return 0;
+       }
+
+       close(fd);
+       return 1;
+}
+
+static void write_int(char *path, int i)
+{
+       char buf[1024];
+       int fd = open(path, O_RDWR);
+
+       if (fd < 0) {
+               perror("open");
+               exit(1);
+       }
+
+       sprintf(buf, "%d", i);
+
+       if (write(fd, buf, strlen(buf)) != strlen(buf)) {
+               perror("write");
+               exit(1);
+       }
+
+       close(fd);
+}
+
+static void set_led(int on)
+{
+       if (noled)
+               return;
+       write_int("/sys/class/leds/hp::hddprotect/brightness", on);
+}
+
+static void protect(int seconds)
+{
+       const char *str = (seconds == 0) ? "Unparked" : "Parked";
+
+       write_int(unload_heads_path, seconds*1000);
+       syslog(LOG_INFO, "%s %s disk head\n", str, device_path);
+}
+
+static int on_ac(void)
+{
+       /* /sys/class/power_supply/AC0/online */
+       return 1;
+}
+
+static int lid_open(void)
+{
+       /* /proc/acpi/button/lid/LID/state */
+       return 1;
+}
+
+static void ignore_me(int signum)
+{
+       protect(0);
+       set_led(0);
+}
+
+int main(int argc, char **argv)
+{
+       int fd, ret;
+       struct stat st;
+       struct sched_param param;
+
+       if (argc == 1)
+               ret = set_unload_heads_path("/dev/sda");
+       else if (argc == 2)
+               ret = set_unload_heads_path(argv[1]);
+       else
+               ret = -EINVAL;
+
+       if (ret || !valid_disk()) {
+               fprintf(stderr, "usage: %s <device> (default: /dev/sda)\n",
+                               argv[0]);
+               exit(1);
+       }
+
+       fd = open("/dev/freefall", O_RDONLY);
+       if (fd < 0) {
+               perror("/dev/freefall");
+               return EXIT_FAILURE;
+       }
+
+       if (stat("/sys/class/leds/hp::hddprotect/brightness", &st))
+               noled = 1;
+
+       if (daemon(0, 0) != 0) {
+               perror("daemon");
+               return EXIT_FAILURE;
+       }
+
+       openlog(app_name, LOG_CONS | LOG_PID | LOG_NDELAY, LOG_LOCAL1);
+
+       param.sched_priority = sched_get_priority_max(SCHED_FIFO);
+       sched_setscheduler(0, SCHED_FIFO, &param);
+       mlockall(MCL_CURRENT|MCL_FUTURE);
+
+       signal(SIGALRM, ignore_me);
+
+       for (;;) {
+               unsigned char count;
+
+               ret = read(fd, &count, sizeof(count));
+               alarm(0);
+               if ((ret == -1) && (errno == EINTR)) {
+                       /* Alarm expired, time to unpark the heads */
+                       continue;
+               }
+
+               if (ret != sizeof(count)) {
+                       perror("read");
+                       break;
+               }
+
+               protect(21);
+               set_led(1);
+               if (1 || on_ac() || lid_open())
+                       alarm(2);
+               else
+                       alarm(20);
+       }
+
+       closelog();
+       close(fd);
+       return EXIT_SUCCESS;
+}
diff --git a/Documentation/laptops/hpfall.c b/Documentation/laptops/hpfall.c
deleted file mode 100644 (file)
index b85dbba..0000000
+++ /dev/null
@@ -1,146 +0,0 @@
-/* Disk protection for HP machines.
- *
- * Copyright 2008 Eric Piel
- * Copyright 2009 Pavel Machek <pavel@ucw.cz>
- *
- * GPLv2.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <string.h>
-#include <stdint.h>
-#include <errno.h>
-#include <signal.h>
-#include <sys/mman.h>
-#include <sched.h>
-
-char unload_heads_path[64];
-
-int set_unload_heads_path(char *device)
-{
-       char devname[64];
-
-       if (strlen(device) <= 5 || strncmp(device, "/dev/", 5) != 0)
-               return -EINVAL;
-       strncpy(devname, device + 5, sizeof(devname));
-
-       snprintf(unload_heads_path, sizeof(unload_heads_path) - 1,
-                               "/sys/block/%s/device/unload_heads", devname);
-       return 0;
-}
-int valid_disk(void)
-{
-       int fd = open(unload_heads_path, O_RDONLY);
-       if (fd < 0) {
-               perror(unload_heads_path);
-               return 0;
-       }
-
-       close(fd);
-       return 1;
-}
-
-void write_int(char *path, int i)
-{
-       char buf[1024];
-       int fd = open(path, O_RDWR);
-       if (fd < 0) {
-               perror("open");
-               exit(1);
-       }
-       sprintf(buf, "%d", i);
-       if (write(fd, buf, strlen(buf)) != strlen(buf)) {
-               perror("write");
-               exit(1);
-       }
-       close(fd);
-}
-
-void set_led(int on)
-{
-       write_int("/sys/class/leds/hp::hddprotect/brightness", on);
-}
-
-void protect(int seconds)
-{
-       write_int(unload_heads_path, seconds*1000);
-}
-
-int on_ac(void)
-{
-//     /sys/class/power_supply/AC0/online
-}
-
-int lid_open(void)
-{
-//     /proc/acpi/button/lid/LID/state
-}
-
-void ignore_me(void)
-{
-       protect(0);
-       set_led(0);
-}
-
-int main(int argc, char **argv)
-{
-       int fd, ret;
-       struct sched_param param;
-
-       if (argc == 1)
-               ret = set_unload_heads_path("/dev/sda");
-       else if (argc == 2)
-               ret = set_unload_heads_path(argv[1]);
-       else
-               ret = -EINVAL;
-
-       if (ret || !valid_disk()) {
-               fprintf(stderr, "usage: %s <device> (default: /dev/sda)\n",
-                               argv[0]);
-               exit(1);
-       }
-
-       fd = open("/dev/freefall", O_RDONLY);
-       if (fd < 0) {
-               perror("/dev/freefall");
-               return EXIT_FAILURE;
-       }
-
-       daemon(0, 0);
-       param.sched_priority = sched_get_priority_max(SCHED_FIFO);
-       sched_setscheduler(0, SCHED_FIFO, &param);
-       mlockall(MCL_CURRENT|MCL_FUTURE);
-
-       signal(SIGALRM, ignore_me);
-
-       for (;;) {
-               unsigned char count;
-
-               ret = read(fd, &count, sizeof(count));
-               alarm(0);
-               if ((ret == -1) && (errno == EINTR)) {
-                       /* Alarm expired, time to unpark the heads */
-                       continue;
-               }
-
-               if (ret != sizeof(count)) {
-                       perror("read");
-                       break;
-               }
-
-               protect(21);
-               set_led(1);
-               if (1 || on_ac() || lid_open())
-                       alarm(2);
-               else
-                       alarm(20);
-       }
-
-       close(fd);
-       return EXIT_SUCCESS;
-}
index f1dc4a2..a4de88f 100644 (file)
@@ -757,10 +757,14 @@ SMP BARRIER PAIRING
 When dealing with CPU-CPU interactions, certain types of memory barrier should
 always be paired.  A lack of appropriate pairing is almost certainly an error.
 
-A write barrier should always be paired with a data dependency barrier or read
-barrier, though a general barrier would also be viable.  Similarly a read
-barrier or a data dependency barrier should always be paired with at least an
-write barrier, though, again, a general barrier is viable:
+General barriers pair with each other, though they also pair with
+most other types of barriers, albeit without transitivity.  An acquire
+barrier pairs with a release barrier, but both may also pair with other
+barriers, including of course general barriers.  A write barrier pairs
+with a data dependency barrier, an acquire barrier, a release barrier,
+a read barrier, or a general barrier.  Similarly a read barrier or a
+data dependency barrier pairs with a write barrier, an acquire barrier,
+a release barrier, or a general barrier:
 
        CPU 1                 CPU 2
        ===============       ===============
@@ -1893,6 +1897,21 @@ between the STORE to indicate the event and the STORE to set TASK_RUNNING:
            <general barrier>             STORE current->state
        LOAD event_indicated
 
+To repeat, this write memory barrier is present if and only if something
+is actually awakened.  To see this, consider the following sequence of
+events, where X and Y are both initially zero:
+
+       CPU 1                           CPU 2
+       =============================== ===============================
+       X = 1;                          STORE event_indicated
+       smp_mb();                       wake_up();
+       Y = 1;                          wait_event(wq, Y == 1);
+       wake_up();                        load from Y sees 1, no memory barrier
+                                       load from X might see 0
+
+In contrast, if a wakeup does occur, CPU 2's load from X would be guaranteed
+to see 1.
+
 The available waker functions include:
 
        complete();
index f304edb..45134dc 100644 (file)
@@ -209,15 +209,12 @@ If memory device is found, memory hotplug code will be called.
 
 4.2 Notify memory hot-add event by hand
 ------------
-On powerpc, the firmware does not notify a memory hotplug event to the kernel.
-Therefore, "probe" interface is supported to notify the event to the kernel.
-This interface depends on CONFIG_ARCH_MEMORY_PROBE.
-
-CONFIG_ARCH_MEMORY_PROBE is supported on powerpc only. On x86, this config
-option is disabled by default since ACPI notifies a memory hotplug event to
-the kernel, which performs its hotplug operation as the result. Please
-enable this option if you need the "probe" interface for testing purposes
-on x86.
+On some architectures, the firmware may not notify the kernel of a memory
+hotplug event.  Therefore, the memory "probe" interface is supported to
+explicitly notify the kernel.  This interface depends on
+CONFIG_ARCH_MEMORY_PROBE and can be configured on powerpc, sh, and x86
+if hotplug is supported, although for x86 this should be handled by ACPI
+notification.
 
 Probe interface is located at
 /sys/devices/system/memory/probe
index f1ac2da..ba1d502 100644 (file)
@@ -17,6 +17,7 @@
  *  along with this program; if not, write to the Free Software
  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
+#define _GNU_SOURCE
 #include <errno.h>
 #include <fcntl.h>
 #include <inttypes.h>
 #define CLOCK_INVALID -1
 #endif
 
-/* When glibc offers the syscall, this will go away. */
+/* clock_adjtime is not available in GLIBC < 2.14 */
+#if !__GLIBC_PREREQ(2, 14)
 #include <sys/syscall.h>
 static int clock_adjtime(clockid_t id, struct timex *tx)
 {
        return syscall(__NR_clock_adjtime, id, tx);
 }
+#endif
 
 static clockid_t get_clockid(int fd)
 {
index 85c362d..d1ab5e1 100644 (file)
@@ -286,6 +286,11 @@ STAC92HD83*
   hp-inv-led   HP with broken BIOS for inverted mute LED
   auto         BIOS setup (default)
 
+STAC92HD95
+==========
+  hp-led       LED support for HP laptops
+  hp-bass      Bass HPF setup for HP Spectre 13
+
 STAC9872
 ========
   vaio         VAIO laptop without SPDIF
index 708bb7f..c14374e 100644 (file)
@@ -75,6 +75,7 @@ show up in /proc/sys/kernel:
 - shmall
 - shmmax                      [ sysv ipc ]
 - shmmni
+- softlockup_all_cpu_backtrace
 - stop-a                      [ SPARC only ]
 - sysrq                       ==> Documentation/sysrq.txt
 - sysctl_writes_strict
@@ -783,6 +784,22 @@ via the /proc/sys interface:
 
 ==============================================================
 
+softlockup_all_cpu_backtrace:
+
+This value controls the soft lockup detector thread's behavior
+when a soft lockup condition is detected as to whether or not
+to gather further debug information. If enabled, each cpu will
+be issued an NMI and instructed to capture stack trace.
+
+This feature is only applicable for architectures which support
+NMI.
+
+0: do nothing. This is the default behavior.
+
+1: on detection capture more debug information.
+
+==============================================================
+
 tainted:
 
 Non-zero if the kernel has been tainted.  Numeric values, which
index bd4b34c..4415aa9 100644 (file)
@@ -702,7 +702,8 @@ The batch value of each per cpu pagelist is also updated as a result.  It is
 set to pcp->high/4.  The upper limit of batch is (PAGE_SHIFT * 8)
 
 The initial value is zero.  Kernel does not use this value at boot time to set
-the high water marks for each per cpu page list.
+the high water marks for each per cpu page list.  If the user writes '0' to this
+sysctl, it will revert to this default behavior.
 
 ==============================================================
 
index efceb78..60bc293 100644 (file)
@@ -4,7 +4,7 @@ Kernel driver nouveau
 Supported chips:
 * NV43+
 
-Authors: Martin Peres (mupuf) <martin.peres@labri.fr>
+Authors: Martin Peres (mupuf) <martin.peres@free.fr>
 
 Description
 ---------
@@ -68,8 +68,9 @@ Your fan can be driven in different modes:
 
 NOTE: Be sure to use the manual mode if you want to drive the fan speed manually
 
-NOTE2: Not all fan management modes may be supported on all chipsets. We are
-working on it.
+NOTE2: When operating in manual mode outside the vbios-defined
+[PWM_min, PWM_max] range, the reported fan speed (RPM) may not be accurate
+depending on your hardware.
 
 Bug reports
 ---------
index 3f669b9..dd5f916 100644 (file)
@@ -102,30 +102,6 @@ extern void mcount(void);
 EXPORT_SYMBOL(mcount);
 
 
-HAVE_FUNCTION_TRACE_MCOUNT_TEST
--------------------------------
-
-This is an optional optimization for the normal case when tracing is turned off
-in the system.  If you do not enable this Kconfig option, the common ftrace
-code will take care of doing the checking for you.
-
-To support this feature, you only need to check the function_trace_stop
-variable in the mcount function.  If it is non-zero, there is no tracing to be
-done at all, so you can return.
-
-This additional pseudo code would simply be:
-void mcount(void)
-{
-       /* save any bare state needed in order to do initial checking */
-
-+      if (function_trace_stop)
-+              return;
-
-       extern void (*ftrace_trace_function)(unsigned long, unsigned long);
-       if (ftrace_trace_function != ftrace_stub)
-...
-
-
 HAVE_FUNCTION_GRAPH_TRACER
 --------------------------
 
@@ -328,8 +304,6 @@ void mcount(void)
 
 void ftrace_caller(void)
 {
-       /* implement HAVE_FUNCTION_TRACE_MCOUNT_TEST if you desire */
-
        /* save all state needed by the ABI (see paragraph above) */
 
        unsigned long frompc = ...;
index 2479b2a..4da4261 100644 (file)
@@ -1515,7 +1515,7 @@ Doing the same with chrt -r 5 and function-trace set.
   <idle>-0       3d.h4    1us+:      0:120:R   + [003]  2448: 94:R sleep
   <idle>-0       3d.h4    2us : ttwu_do_activate.constprop.87 <-try_to_wake_up
   <idle>-0       3d.h3    3us : check_preempt_curr <-ttwu_do_wakeup
-  <idle>-0       3d.h3    3us : resched_task <-check_preempt_curr
+  <idle>-0       3d.h3    3us : resched_curr <-check_preempt_curr
   <idle>-0       3dNh3    4us : task_woken_rt <-ttwu_do_wakeup
   <idle>-0       3dNh3    4us : _raw_spin_unlock <-try_to_wake_up
   <idle>-0       3dNh3    4us : sub_preempt_count <-_raw_spin_unlock
index 00e425f..78c9a7b 100644 (file)
@@ -47,7 +47,6 @@ use constant HIGH_KSWAPD_REWAKEUP             => 21;
 use constant HIGH_NR_SCANNED                   => 22;
 use constant HIGH_NR_TAKEN                     => 23;
 use constant HIGH_NR_RECLAIMED                 => 24;
-use constant HIGH_NR_CONTIG_DIRTY              => 25;
 
 my %perprocesspid;
 my %perprocess;
@@ -105,7 +104,7 @@ my $regex_direct_end_default = 'nr_reclaimed=([0-9]*)';
 my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
 my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
 my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)';
-my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) contig_taken=([0-9]*) contig_dirty=([0-9]*) contig_failed=([0-9]*)';
+my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) file=([0-9]*)';
 my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) zid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
 my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
 my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)';
@@ -200,7 +199,7 @@ $regex_lru_isolate = generate_traceevent_regex(
                        $regex_lru_isolate_default,
                        "isolate_mode", "order",
                        "nr_requested", "nr_scanned", "nr_taken",
-                       "contig_taken", "contig_dirty", "contig_failed");
+                       "file");
 $regex_lru_shrink_inactive = generate_traceevent_regex(
                        "vmscan/mm_vmscan_lru_shrink_inactive",
                        $regex_lru_shrink_inactive_default,
@@ -375,7 +374,6 @@ EVENT_PROCESS:
                        }
                        my $isolate_mode = $1;
                        my $nr_scanned = $4;
-                       my $nr_contig_dirty = $7;
 
                        # To closer match vmstat scanning statistics, only count isolate_both
                        # and isolate_inactive as scanning. isolate_active is rotation
@@ -385,7 +383,6 @@ EVENT_PROCESS:
                        if ($isolate_mode != 2) {
                                $perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
                        }
-                       $perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
                } elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") {
                        $details = $6;
                        if ($details !~ /$regex_lru_shrink_inactive/o) {
@@ -539,13 +536,6 @@ sub dump_stats {
                                }
                        }
                }
-               if ($stats{$process_pid}->{HIGH_NR_CONTIG_DIRTY}) {
-                       print "      ";
-                       my $count = $stats{$process_pid}->{HIGH_NR_CONTIG_DIRTY};
-                       if ($count != 0) {
-                               print "contig-dirty=$count ";
-                       }
-               }
 
                print "\n";
        }
index 0fe3649..68cda1f 100644 (file)
@@ -297,6 +297,15 @@ struct kvm_regs {
        __u64 rip, rflags;
 };
 
+/* mips */
+struct kvm_regs {
+       /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
+       __u64 gpr[32];
+       __u64 hi;
+       __u64 lo;
+       __u64 pc;
+};
+
 
 4.12 KVM_SET_REGS
 
@@ -378,7 +387,7 @@ struct kvm_translation {
 4.16 KVM_INTERRUPT
 
 Capability: basic
-Architectures: x86, ppc
+Architectures: x86, ppc, mips
 Type: vcpu ioctl
 Parameters: struct kvm_interrupt (in)
 Returns: 0 on success, -1 on error
@@ -423,6 +432,11 @@ c) KVM_INTERRUPT_SET_LEVEL
 Note that any value for 'irq' other than the ones stated above is invalid
 and incurs unexpected behavior.
 
+MIPS:
+
+Queues an external interrupt to be injected into the virtual CPU. A negative
+interrupt number dequeues the interrupt.
+
 
 4.17 KVM_DEBUG_GUEST
 
@@ -512,7 +526,7 @@ struct kvm_cpuid {
 4.21 KVM_SET_SIGNAL_MASK
 
 Capability: basic
-Architectures: x86
+Architectures: all
 Type: vcpu ioctl
 Parameters: struct kvm_signal_mask (in)
 Returns: 0 on success, -1 on error
@@ -974,7 +988,7 @@ for vm-wide capabilities.
 4.38 KVM_GET_MP_STATE
 
 Capability: KVM_CAP_MP_STATE
-Architectures: x86, ia64
+Architectures: x86, ia64, s390
 Type: vcpu ioctl
 Parameters: struct kvm_mp_state (out)
 Returns: 0 on success; -1 on error
@@ -988,24 +1002,32 @@ uniprocessor guests).
 
 Possible values are:
 
- - KVM_MP_STATE_RUNNABLE:        the vcpu is currently running
+ - KVM_MP_STATE_RUNNABLE:        the vcpu is currently running [x86, ia64]
  - KVM_MP_STATE_UNINITIALIZED:   the vcpu is an application processor (AP)
-                                 which has not yet received an INIT signal
+                                 which has not yet received an INIT signal [x86,
+                                 ia64]
  - KVM_MP_STATE_INIT_RECEIVED:   the vcpu has received an INIT signal, and is
-                                 now ready for a SIPI
+                                 now ready for a SIPI [x86, ia64]
  - KVM_MP_STATE_HALTED:          the vcpu has executed a HLT instruction and
-                                 is waiting for an interrupt
+                                 is waiting for an interrupt [x86, ia64]
  - KVM_MP_STATE_SIPI_RECEIVED:   the vcpu has just received a SIPI (vector
-                                 accessible via KVM_GET_VCPU_EVENTS)
+                                 accessible via KVM_GET_VCPU_EVENTS) [x86, ia64]
+ - KVM_MP_STATE_STOPPED:         the vcpu is stopped [s390]
+ - KVM_MP_STATE_CHECK_STOP:      the vcpu is in a special error state [s390]
+ - KVM_MP_STATE_OPERATING:       the vcpu is operating (running or halted)
+                                 [s390]
+ - KVM_MP_STATE_LOAD:            the vcpu is in a special load/startup state
+                                 [s390]
 
-This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
-irqchip, the multiprocessing state must be maintained by userspace.
+On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
+in-kernel irqchip, the multiprocessing state must be maintained by userspace on
+these architectures.
 
 
 4.39 KVM_SET_MP_STATE
 
 Capability: KVM_CAP_MP_STATE
-Architectures: x86, ia64
+Architectures: x86, ia64, s390
 Type: vcpu ioctl
 Parameters: struct kvm_mp_state (in)
 Returns: 0 on success; -1 on error
@@ -1013,8 +1035,9 @@ Returns: 0 on success; -1 on error
 Sets the vcpu's current "multiprocessing state"; see KVM_GET_MP_STATE for
 arguments.
 
-This ioctl is only useful after KVM_CREATE_IRQCHIP.  Without an in-kernel
-irqchip, the multiprocessing state must be maintained by userspace.
+On x86 and ia64, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
+in-kernel irqchip, the multiprocessing state must be maintained by userspace on
+these architectures.
 
 
 4.40 KVM_SET_IDENTITY_MAP_ADDR
@@ -1774,122 +1797,151 @@ and architecture specific registers. Each have their own range of operation
 and their own constants and width. To keep track of the implemented
 registers, find a list below:
 
-  Arch  |       Register        | Width (bits)
-        |                       |
-  PPC   | KVM_REG_PPC_HIOR      | 64
-  PPC   | KVM_REG_PPC_IAC1      | 64
-  PPC   | KVM_REG_PPC_IAC2      | 64
-  PPC   | KVM_REG_PPC_IAC3      | 64
-  PPC   | KVM_REG_PPC_IAC4      | 64
-  PPC   | KVM_REG_PPC_DAC1      | 64
-  PPC   | KVM_REG_PPC_DAC2      | 64
-  PPC   | KVM_REG_PPC_DABR      | 64
-  PPC   | KVM_REG_PPC_DSCR      | 64
-  PPC   | KVM_REG_PPC_PURR      | 64
-  PPC   | KVM_REG_PPC_SPURR     | 64
-  PPC   | KVM_REG_PPC_DAR       | 64
-  PPC   | KVM_REG_PPC_DSISR     | 32
-  PPC   | KVM_REG_PPC_AMR       | 64
-  PPC   | KVM_REG_PPC_UAMOR     | 64
-  PPC   | KVM_REG_PPC_MMCR0     | 64
-  PPC   | KVM_REG_PPC_MMCR1     | 64
-  PPC   | KVM_REG_PPC_MMCRA     | 64
-  PPC   | KVM_REG_PPC_MMCR2     | 64
-  PPC   | KVM_REG_PPC_MMCRS     | 64
-  PPC   | KVM_REG_PPC_SIAR      | 64
-  PPC   | KVM_REG_PPC_SDAR      | 64
-  PPC   | KVM_REG_PPC_SIER      | 64
-  PPC   | KVM_REG_PPC_PMC1      | 32
-  PPC   | KVM_REG_PPC_PMC2      | 32
-  PPC   | KVM_REG_PPC_PMC3      | 32
-  PPC   | KVM_REG_PPC_PMC4      | 32
-  PPC   | KVM_REG_PPC_PMC5      | 32
-  PPC   | KVM_REG_PPC_PMC6      | 32
-  PPC   | KVM_REG_PPC_PMC7      | 32
-  PPC   | KVM_REG_PPC_PMC8      | 32
-  PPC   | KVM_REG_PPC_FPR0      | 64
+  Arch  |           Register            | Width (bits)
+        |                               |
+  PPC   | KVM_REG_PPC_HIOR              | 64
+  PPC   | KVM_REG_PPC_IAC1              | 64
+  PPC   | KVM_REG_PPC_IAC2              | 64
+  PPC   | KVM_REG_PPC_IAC3              | 64
+  PPC   | KVM_REG_PPC_IAC4              | 64
+  PPC   | KVM_REG_PPC_DAC1              | 64
+  PPC   | KVM_REG_PPC_DAC2              | 64
+  PPC   | KVM_REG_PPC_DABR              | 64
+  PPC   | KVM_REG_PPC_DSCR              | 64
+  PPC   | KVM_REG_PPC_PURR              | 64
+  PPC   | KVM_REG_PPC_SPURR             | 64
+  PPC   | KVM_REG_PPC_DAR               | 64
+  PPC   | KVM_REG_PPC_DSISR             | 32
+  PPC   | KVM_REG_PPC_AMR               | 64
+  PPC   | KVM_REG_PPC_UAMOR             | 64
+  PPC   | KVM_REG_PPC_MMCR0             | 64
+  PPC   | KVM_REG_PPC_MMCR1             | 64
+  PPC   | KVM_REG_PPC_MMCRA             | 64
+  PPC   | KVM_REG_PPC_MMCR2             | 64
+  PPC   | KVM_REG_PPC_MMCRS             | 64
+  PPC   | KVM_REG_PPC_SIAR              | 64
+  PPC   | KVM_REG_PPC_SDAR              | 64
+  PPC   | KVM_REG_PPC_SIER              | 64
+  PPC   | KVM_REG_PPC_PMC1              | 32
+  PPC   | KVM_REG_PPC_PMC2              | 32
+  PPC   | KVM_REG_PPC_PMC3              | 32
+  PPC   | KVM_REG_PPC_PMC4              | 32
+  PPC   | KVM_REG_PPC_PMC5              | 32
+  PPC   | KVM_REG_PPC_PMC6              | 32
+  PPC   | KVM_REG_PPC_PMC7              | 32
+  PPC   | KVM_REG_PPC_PMC8              | 32
+  PPC   | KVM_REG_PPC_FPR0              | 64
+          ...
+  PPC   | KVM_REG_PPC_FPR31             | 64
+  PPC   | KVM_REG_PPC_VR0               | 128
           ...
-  PPC   | KVM_REG_PPC_FPR31     | 64
-  PPC   | KVM_REG_PPC_VR0       | 128
+  PPC   | KVM_REG_PPC_VR31              | 128
+  PPC   | KVM_REG_PPC_VSR0              | 128
           ...
-  PPC   | KVM_REG_PPC_VR31      | 128
-  PPC   | KVM_REG_PPC_VSR0      | 128
+  PPC   | KVM_REG_PPC_VSR31             | 128
+  PPC   | KVM_REG_PPC_FPSCR             | 64
+  PPC   | KVM_REG_PPC_VSCR              | 32
+  PPC   | KVM_REG_PPC_VPA_ADDR          | 64
+  PPC   | KVM_REG_PPC_VPA_SLB           | 128
+  PPC   | KVM_REG_PPC_VPA_DTL           | 128
+  PPC   | KVM_REG_PPC_EPCR              | 32
+  PPC   | KVM_REG_PPC_EPR               | 32
+  PPC   | KVM_REG_PPC_TCR               | 32
+  PPC   | KVM_REG_PPC_TSR               | 32
+  PPC   | KVM_REG_PPC_OR_TSR            | 32
+  PPC   | KVM_REG_PPC_CLEAR_TSR         | 32
+  PPC   | KVM_REG_PPC_MAS0              | 32
+  PPC   | KVM_REG_PPC_MAS1              | 32
+  PPC   | KVM_REG_PPC_MAS2              | 64
+  PPC   | KVM_REG_PPC_MAS7_3            | 64
+  PPC   | KVM_REG_PPC_MAS4              | 32
+  PPC   | KVM_REG_PPC_MAS6              | 32
+  PPC   | KVM_REG_PPC_MMUCFG            | 32
+  PPC   | KVM_REG_PPC_TLB0CFG           | 32
+  PPC   | KVM_REG_PPC_TLB1CFG           | 32
+  PPC   | KVM_REG_PPC_TLB2CFG           | 32
+  PPC   | KVM_REG_PPC_TLB3CFG           | 32
+  PPC   | KVM_REG_PPC_TLB0PS            | 32
+  PPC   | KVM_REG_PPC_TLB1PS            | 32
+  PPC   | KVM_REG_PPC_TLB2PS            | 32
+  PPC   | KVM_REG_PPC_TLB3PS            | 32
+  PPC   | KVM_REG_PPC_EPTCFG            | 32
+  PPC   | KVM_REG_PPC_ICP_STATE         | 64
+  PPC   | KVM_REG_PPC_TB_OFFSET         | 64
+  PPC   | KVM_REG_PPC_SPMC1             | 32
+  PPC   | KVM_REG_PPC_SPMC2             | 32
+  PPC   | KVM_REG_PPC_IAMR              | 64
+  PPC   | KVM_REG_PPC_TFHAR             | 64
+  PPC   | KVM_REG_PPC_TFIAR             | 64
+  PPC   | KVM_REG_PPC_TEXASR            | 64
+  PPC   | KVM_REG_PPC_FSCR              | 64
+  PPC   | KVM_REG_PPC_PSPB              | 32
+  PPC   | KVM_REG_PPC_EBBHR             | 64
+  PPC   | KVM_REG_PPC_EBBRR             | 64
+  PPC   | KVM_REG_PPC_BESCR             | 64
+  PPC   | KVM_REG_PPC_TAR               | 64
+  PPC   | KVM_REG_PPC_DPDES             | 64
+  PPC   | KVM_REG_PPC_DAWR              | 64
+  PPC   | KVM_REG_PPC_DAWRX             | 64
+  PPC   | KVM_REG_PPC_CIABR             | 64
+  PPC   | KVM_REG_PPC_IC                | 64
+  PPC   | KVM_REG_PPC_VTB               | 64
+  PPC   | KVM_REG_PPC_CSIGR             | 64
+  PPC   | KVM_REG_PPC_TACR              | 64
+  PPC   | KVM_REG_PPC_TCSCR             | 64
+  PPC   | KVM_REG_PPC_PID               | 64
+  PPC   | KVM_REG_PPC_ACOP              | 64
+  PPC   | KVM_REG_PPC_VRSAVE            | 32
+  PPC   | KVM_REG_PPC_LPCR              | 64
+  PPC   | KVM_REG_PPC_PPR               | 64
+  PPC   | KVM_REG_PPC_ARCH_COMPAT       | 32
+  PPC   | KVM_REG_PPC_DABRX             | 32
+  PPC   | KVM_REG_PPC_WORT              | 64
+  PPC   | KVM_REG_PPC_TM_GPR0           | 64
           ...
-  PPC   | KVM_REG_PPC_VSR31     | 128
-  PPC   | KVM_REG_PPC_FPSCR     | 64
-  PPC   | KVM_REG_PPC_VSCR      | 32
-  PPC   | KVM_REG_PPC_VPA_ADDR  | 64
-  PPC   | KVM_REG_PPC_VPA_SLB   | 128
-  PPC   | KVM_REG_PPC_VPA_DTL   | 128
-  PPC   | KVM_REG_PPC_EPCR     | 32
-  PPC   | KVM_REG_PPC_EPR      | 32
-  PPC   | KVM_REG_PPC_TCR      | 32
-  PPC   | KVM_REG_PPC_TSR      | 32
-  PPC   | KVM_REG_PPC_OR_TSR   | 32
-  PPC   | KVM_REG_PPC_CLEAR_TSR        | 32
-  PPC   | KVM_REG_PPC_MAS0     | 32
-  PPC   | KVM_REG_PPC_MAS1     | 32
-  PPC   | KVM_REG_PPC_MAS2     | 64
-  PPC   | KVM_REG_PPC_MAS7_3   | 64
-  PPC   | KVM_REG_PPC_MAS4     | 32
-  PPC   | KVM_REG_PPC_MAS6     | 32
-  PPC   | KVM_REG_PPC_MMUCFG   | 32
-  PPC   | KVM_REG_PPC_TLB0CFG  | 32
-  PPC   | KVM_REG_PPC_TLB1CFG  | 32
-  PPC   | KVM_REG_PPC_TLB2CFG  | 32
-  PPC   | KVM_REG_PPC_TLB3CFG  | 32
-  PPC   | KVM_REG_PPC_TLB0PS   | 32
-  PPC   | KVM_REG_PPC_TLB1PS   | 32
-  PPC   | KVM_REG_PPC_TLB2PS   | 32
-  PPC   | KVM_REG_PPC_TLB3PS   | 32
-  PPC   | KVM_REG_PPC_EPTCFG   | 32
-  PPC   | KVM_REG_PPC_ICP_STATE | 64
-  PPC   | KVM_REG_PPC_TB_OFFSET        | 64
-  PPC   | KVM_REG_PPC_SPMC1    | 32
-  PPC   | KVM_REG_PPC_SPMC2    | 32
-  PPC   | KVM_REG_PPC_IAMR     | 64
-  PPC   | KVM_REG_PPC_TFHAR    | 64
-  PPC   | KVM_REG_PPC_TFIAR    | 64
-  PPC   | KVM_REG_PPC_TEXASR   | 64
-  PPC   | KVM_REG_PPC_FSCR     | 64
-  PPC   | KVM_REG_PPC_PSPB     | 32
-  PPC   | KVM_REG_PPC_EBBHR    | 64
-  PPC   | KVM_REG_PPC_EBBRR    | 64
-  PPC   | KVM_REG_PPC_BESCR    | 64
-  PPC   | KVM_REG_PPC_TAR      | 64
-  PPC   | KVM_REG_PPC_DPDES    | 64
-  PPC   | KVM_REG_PPC_DAWR     | 64
-  PPC   | KVM_REG_PPC_DAWRX    | 64
-  PPC   | KVM_REG_PPC_CIABR    | 64
-  PPC   | KVM_REG_PPC_IC       | 64
-  PPC   | KVM_REG_PPC_VTB      | 64
-  PPC   | KVM_REG_PPC_CSIGR    | 64
-  PPC   | KVM_REG_PPC_TACR     | 64
-  PPC   | KVM_REG_PPC_TCSCR    | 64
-  PPC   | KVM_REG_PPC_PID      | 64
-  PPC   | KVM_REG_PPC_ACOP     | 64
-  PPC   | KVM_REG_PPC_VRSAVE   | 32
-  PPC   | KVM_REG_PPC_LPCR     | 64
-  PPC   | KVM_REG_PPC_PPR      | 64
-  PPC   | KVM_REG_PPC_ARCH_COMPAT 32
-  PPC   | KVM_REG_PPC_DABRX     | 32
-  PPC   | KVM_REG_PPC_WORT      | 64
-  PPC   | KVM_REG_PPC_TM_GPR0  | 64
+  PPC   | KVM_REG_PPC_TM_GPR31          | 64
+  PPC   | KVM_REG_PPC_TM_VSR0           | 128
           ...
-  PPC   | KVM_REG_PPC_TM_GPR31 | 64
-  PPC   | KVM_REG_PPC_TM_VSR0  | 128
+  PPC   | KVM_REG_PPC_TM_VSR63          | 128
+  PPC   | KVM_REG_PPC_TM_CR             | 64
+  PPC   | KVM_REG_PPC_TM_LR             | 64
+  PPC   | KVM_REG_PPC_TM_CTR            | 64
+  PPC   | KVM_REG_PPC_TM_FPSCR          | 64
+  PPC   | KVM_REG_PPC_TM_AMR            | 64
+  PPC   | KVM_REG_PPC_TM_PPR            | 64
+  PPC   | KVM_REG_PPC_TM_VRSAVE         | 64
+  PPC   | KVM_REG_PPC_TM_VSCR           | 32
+  PPC   | KVM_REG_PPC_TM_DSCR           | 64
+  PPC   | KVM_REG_PPC_TM_TAR            | 64
+        |                               |
+  MIPS  | KVM_REG_MIPS_R0               | 64
           ...
-  PPC   | KVM_REG_PPC_TM_VSR63 | 128
-  PPC   | KVM_REG_PPC_TM_CR    | 64
-  PPC   | KVM_REG_PPC_TM_LR    | 64
-  PPC   | KVM_REG_PPC_TM_CTR   | 64
-  PPC   | KVM_REG_PPC_TM_FPSCR | 64
-  PPC   | KVM_REG_PPC_TM_AMR   | 64
-  PPC   | KVM_REG_PPC_TM_PPR   | 64
-  PPC   | KVM_REG_PPC_TM_VRSAVE        | 64
-  PPC   | KVM_REG_PPC_TM_VSCR  | 32
-  PPC   | KVM_REG_PPC_TM_DSCR  | 64
-  PPC   | KVM_REG_PPC_TM_TAR   | 64
+  MIPS  | KVM_REG_MIPS_R31              | 64
+  MIPS  | KVM_REG_MIPS_HI               | 64
+  MIPS  | KVM_REG_MIPS_LO               | 64
+  MIPS  | KVM_REG_MIPS_PC               | 64
+  MIPS  | KVM_REG_MIPS_CP0_INDEX        | 32
+  MIPS  | KVM_REG_MIPS_CP0_CONTEXT      | 64
+  MIPS  | KVM_REG_MIPS_CP0_USERLOCAL    | 64
+  MIPS  | KVM_REG_MIPS_CP0_PAGEMASK     | 32
+  MIPS  | KVM_REG_MIPS_CP0_WIRED        | 32
+  MIPS  | KVM_REG_MIPS_CP0_HWRENA       | 32
+  MIPS  | KVM_REG_MIPS_CP0_BADVADDR     | 64
+  MIPS  | KVM_REG_MIPS_CP0_COUNT        | 32
+  MIPS  | KVM_REG_MIPS_CP0_ENTRYHI      | 64
+  MIPS  | KVM_REG_MIPS_CP0_COMPARE      | 32
+  MIPS  | KVM_REG_MIPS_CP0_STATUS       | 32
+  MIPS  | KVM_REG_MIPS_CP0_CAUSE        | 32
+  MIPS  | KVM_REG_MIPS_CP0_EPC          | 64
+  MIPS  | KVM_REG_MIPS_CP0_CONFIG       | 32
+  MIPS  | KVM_REG_MIPS_CP0_CONFIG1      | 32
+  MIPS  | KVM_REG_MIPS_CP0_CONFIG2      | 32
+  MIPS  | KVM_REG_MIPS_CP0_CONFIG3      | 32
+  MIPS  | KVM_REG_MIPS_CP0_CONFIG7      | 32
+  MIPS  | KVM_REG_MIPS_CP0_ERROREPC     | 64
+  MIPS  | KVM_REG_MIPS_COUNT_CTL        | 64
+  MIPS  | KVM_REG_MIPS_COUNT_RESUME     | 64
+  MIPS  | KVM_REG_MIPS_COUNT_HZ         | 64
 
 ARM registers are mapped using the lower 32 bits.  The upper 16 of that
 is the register group type, or coprocessor number:
@@ -1928,6 +1980,22 @@ arm64 CCSIDR registers are demultiplexed by CSSELR value:
 arm64 system registers have the following id bit patterns:
   0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
 
+
+MIPS registers are mapped using the lower 32 bits.  The upper 16 of that is
+the register group type:
+
+MIPS core registers (see above) have the following id bit patterns:
+  0x7030 0000 0000 <reg:16>
+
+MIPS CP0 registers (see KVM_REG_MIPS_CP0_* above) have the following id bit
+patterns depending on whether they're 32-bit or 64-bit registers:
+  0x7020 0000 0001 00 <reg:5> <sel:3>   (32-bit)
+  0x7030 0000 0001 00 <reg:5> <sel:3>   (64-bit)
+
+MIPS KVM control registers (see above) have the following id bit patterns:
+  0x7030 0000 0002 <reg:16>
+
+
 4.69 KVM_GET_ONE_REG
 
 Capability: KVM_CAP_ONE_REG
@@ -2415,7 +2483,7 @@ in VCPU matching underlying host.
 4.84 KVM_GET_REG_LIST
 
 Capability: basic
-Architectures: arm, arm64
+Architectures: arm, arm64, mips
 Type: vcpu ioctl
 Parameters: struct kvm_reg_list (in/out)
 Returns: 0 on success; -1 on error
@@ -2866,15 +2934,18 @@ The fields in each entry are defined as follows:
 6. Capabilities that can be enabled
 -----------------------------------
 
-There are certain capabilities that change the behavior of the virtual CPU when
-enabled. To enable them, please see section 4.37. Below you can find a list of
-capabilities and what their effect on the vCPU is when enabling them.
+There are certain capabilities that change the behavior of the virtual CPU or
+the virtual machine when enabled. To enable them, please see section 4.37.
+Below you can find a list of capabilities and what their effect on the vCPU or
+the virtual machine is when enabling them.
 
 The following information is provided along with the description:
 
   Architectures: which instruction set architectures provide this ioctl.
       x86 includes both i386 and x86_64.
 
+  Target: whether this is a per-vcpu or per-vm capability.
+
   Parameters: what parameters are accepted by the capability.
 
   Returns: the return value.  General error numbers (EBADF, ENOMEM, EINVAL)
@@ -2884,6 +2955,7 @@ The following information is provided along with the description:
 6.1 KVM_CAP_PPC_OSI
 
 Architectures: ppc
+Target: vcpu
 Parameters: none
 Returns: 0 on success; -1 on error
 
@@ -2898,6 +2970,7 @@ When this capability is enabled, KVM_EXIT_OSI can occur.
 6.2 KVM_CAP_PPC_PAPR
 
 Architectures: ppc
+Target: vcpu
 Parameters: none
 Returns: 0 on success; -1 on error
 
@@ -2917,6 +2990,7 @@ When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur.
 6.3 KVM_CAP_SW_TLB
 
 Architectures: ppc
+Target: vcpu
 Parameters: args[0] is the address of a struct kvm_config_tlb
 Returns: 0 on success; -1 on error
 
@@ -2959,6 +3033,7 @@ For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
 6.4 KVM_CAP_S390_CSS_SUPPORT
 
 Architectures: s390
+Target: vcpu
 Parameters: none
 Returns: 0 on success; -1 on error
 
@@ -2970,9 +3045,13 @@ handled in-kernel, while the other I/O instructions are passed to userspace.
 When this capability is enabled, KVM_EXIT_S390_TSCH will occur on TEST
 SUBCHANNEL intercepts.
 
+Note that even though this capability is enabled per-vcpu, the complete
+virtual machine is affected.
+
 6.5 KVM_CAP_PPC_EPR
 
 Architectures: ppc
+Target: vcpu
 Parameters: args[0] defines whether the proxy facility is active
 Returns: 0 on success; -1 on error
 
@@ -2998,7 +3077,17 @@ This capability connects the vcpu to an in-kernel MPIC device.
 6.7 KVM_CAP_IRQ_XICS
 
 Architectures: ppc
+Target: vcpu
 Parameters: args[0] is the XICS device fd
             args[1] is the XICS CPU number (server ID) for this vcpu
 
 This capability connects the vcpu to an in-kernel XICS device.
+
+6.8 KVM_CAP_S390_IRQCHIP
+
+Architectures: s390
+Target: vm
+Parameters: none
+
+This capability enables the in-kernel irqchip for s390. Please refer to
+"4.24 KVM_CREATE_IRQCHIP" for details.
index 134483f..1acc624 100644 (file)
@@ -70,6 +70,8 @@ Descriptions of section entries:
 
        P: Person (obsolete)
        M: Mail patches to: FullName <address@domain>
+       R: Designated reviewer: FullName <address@domain>
+          These reviewers should be CCed on patches.
        L: Mailing list that is relevant to this area
        W: Web-page with status/info
        Q: Patchwork web based patch tracking system site
@@ -156,7 +158,6 @@ F:  drivers/net/hamradio/6pack.c
 
 8169 10/100/1000 GIGABIT ETHERNET DRIVER
 M:     Realtek linux nic maintainers <nic_swsd@realtek.com>
-M:     Francois Romieu <romieu@fr.zoreil.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/realtek/r8169.c
@@ -943,16 +944,10 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
 F:     arch/arm/mach-imx/
+F:     arch/arm/mach-mxs/
 F:     arch/arm/boot/dts/imx*
 F:     arch/arm/configs/imx*_defconfig
 
-ARM/FREESCALE MXS ARM ARCHITECTURE
-M:     Shawn Guo <shawn.guo@linaro.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Maintained
-T:     git git://git.linaro.org/people/shawnguo/linux-2.6.git
-F:     arch/arm/mach-mxs/
-
 ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
 M:     Lennert Buytenhek <kernel@wantstofly.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1052,9 +1047,33 @@ M:       Santosh Shilimkar <santosh.shilimkar@ti.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-keystone/
-F:     drivers/clk/keystone/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
 
+ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK
+M:     Santosh Shilimkar <santosh.shilimkar@ti.com>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+F:     drivers/clk/keystone/
+
+ARM/TEXAS INSTRUMENT KEYSTONE ClOCKSOURCE
+M:     Santosh Shilimkar <santosh.shilimkar@ti.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+F:     drivers/clocksource/timer-keystone.c
+
+ARM/TEXAS INSTRUMENT KEYSTONE RESET DRIVER
+M:     Santosh Shilimkar <santosh.shilimkar@ti.com>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+F:     drivers/power/reset/keystone-reset.c
+
+ARM/TEXAS INSTRUMENT AEMIF/EMIF DRIVERS
+M:     Santosh Shilimkar <santosh.shilimkar@ti.com>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+F:     drivers/memory/*emif*
+
 ARM/LOGICPD PXA270 MACHINE SUPPORT
 M:     Lennert Buytenhek <kernel@wantstofly.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1296,6 +1315,20 @@ W:       http://oss.renesas.com
 Q:     http://patchwork.kernel.org/project/linux-sh/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
 S:     Supported
+F:     arch/arm/boot/dts/emev2*
+F:     arch/arm/boot/dts/r7s*
+F:     arch/arm/boot/dts/r8a*
+F:     arch/arm/boot/dts/sh*
+F:     arch/arm/configs/ape6evm_defconfig
+F:     arch/arm/configs/armadillo800eva_defconfig
+F:     arch/arm/configs/bockw_defconfig
+F:     arch/arm/configs/genmai_defconfig
+F:     arch/arm/configs/koelsch_defconfig
+F:     arch/arm/configs/kzm9g_defconfig
+F:     arch/arm/configs/lager_defconfig
+F:     arch/arm/configs/mackerel_defconfig
+F:     arch/arm/configs/marzen_defconfig
+F:     arch/arm/configs/shmobile_defconfig
 F:     arch/arm/mach-shmobile/
 F:     drivers/sh/
 
@@ -2917,6 +2950,9 @@ L:        linux-doc@vger.kernel.org
 T:     quilt http://www.infradead.org/~rdunlap/Doc/patches/
 S:     Maintained
 F:     Documentation/
+X:     Documentation/ABI/
+X:     Documentation/devicetree/
+X:     Documentation/[a-z][a-z]_[A-Z][A-Z]/
 
 DOUBLETALK DRIVER
 M:     "James R. Van Zandt" <jrv@vanzandt.mv.com>
@@ -3189,14 +3225,6 @@ L:       linux-scsi@vger.kernel.org
 S:     Maintained
 F:     drivers/scsi/eata_pio.*
 
-EBTABLES
-L:     netfilter-devel@vger.kernel.org
-W:     http://ebtables.sourceforge.net/
-S:     Orphan
-F:     include/linux/netfilter_bridge/ebt_*.h
-F:     include/uapi/linux/netfilter_bridge/ebt_*.h
-F:     net/bridge/netfilter/ebt*.c
-
 EC100 MEDIA DRIVER
 M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
@@ -3324,6 +3352,13 @@ W:       bluesmoke.sourceforge.net
 S:     Maintained
 F:     drivers/edac/i82975x_edac.c
 
+EDAC-IE31200
+M:     Jason Baron <jbaron@akamai.com>
+L:     linux-edac@vger.kernel.org
+W:     bluesmoke.sourceforge.net
+S:     Maintained
+F:     drivers/edac/ie31200_edac.c
+
 EDAC-MPC85XX
 M:     Johannes Thumshirn <johannes.thumshirn@men.de>
 L:     linux-edac@vger.kernel.org
@@ -4484,8 +4519,7 @@ S:        Supported
 F:     drivers/idle/i7300_idle.c
 
 IEEE 802.15.4 SUBSYSTEM
-M:     Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
-M:     Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+M:     Alexander Aring <alex.aring@gmail.com>
 L:     linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:     http://apps.sourceforge.net/trac/linux-zigbee
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
@@ -5517,10 +5551,11 @@ S:      Maintained
 F:     arch/arm/mach-lpc32xx/
 
 LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
-M:     Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>
-M:     Sreekanth Reddy <Sreekanth.Reddy@lsi.com>
-M:     support@lsi.com
-L:     DL-MPTFusionLinux@lsi.com
+M:     Nagalakshmi Nandigama <nagalakshmi.nandigama@avagotech.com>
+M:     Praveen Krishnamoorthy <praveen.krishnamoorthy@avagotech.com>
+M:     Sreekanth Reddy <sreekanth.reddy@avagotech.com>
+M:     Abhijit Mahajan <abhijit.mahajan@avagotech.com>
+L:     MPT-FusionLinux.pdl@avagotech.com
 L:     linux-scsi@vger.kernel.org
 W:     http://www.lsilogic.com/support
 S:     Supported
@@ -6105,12 +6140,11 @@ F:      Documentation/networking/s2io.txt
 F:     Documentation/networking/vxge.txt
 F:     drivers/net/ethernet/neterion/
 
-NETFILTER/IPTABLES
+NETFILTER ({IP,IP6,ARP,EB,NF}TABLES)
 M:     Pablo Neira Ayuso <pablo@netfilter.org>
 M:     Patrick McHardy <kaber@trash.net>
 M:     Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
 L:     netfilter-devel@vger.kernel.org
-L:     netfilter@vger.kernel.org
 L:     coreteam@netfilter.org
 W:     http://www.netfilter.org/
 W:     http://www.iptables.org/
@@ -6774,7 +6808,7 @@ F:        arch/x86/kernel/quirks.c
 
 PCI DRIVER FOR IMX6
 M:     Richard Zhu <r65037@freescale.com>
-M:     Shawn Guo <shawn.guo@linaro.org>
+M:     Shawn Guo <shawn.guo@freescale.com>
 L:     linux-pci@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -6931,6 +6965,12 @@ L:       linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     drivers/pinctrl/pinctrl-at91.c
 
+PIN CONTROLLER - RENESAS
+M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L:     linux-sh@vger.kernel.org
+S:     Maintained
+F:     drivers/pinctrl/sh-pfc/
+
 PIN CONTROLLER - SAMSUNG
 M:     Tomasz Figa <t.figa@samsung.com>
 M:     Thomas Abraham <thomas.abraham@linaro.org>
@@ -6960,7 +7000,7 @@ PKUNITY SOC DRIVERS
 M:     Guan Xuetao <gxt@mprc.pku.edu.cn>
 W:     http://mprc.pku.edu.cn/~guanxuetao/linux
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.git
+T:     git git://github.com/gxt/linux.git
 F:     drivers/input/serio/i8042-unicore32io.h
 F:     drivers/i2c/busses/i2c-puv3.c
 F:     drivers/video/fb-puv3.c
@@ -7212,6 +7252,12 @@ M:       Robert Jarzmik <robert.jarzmik@free.fr>
 L:     rtc-linux@googlegroups.com
 S:     Maintained
 
+QAT DRIVER
+M:      Tadeusz Struk <tadeusz.struk@intel.com>
+L:      qat-linux@intel.com
+S:      Supported
+F:      drivers/crypto/qat/
+
 QIB DRIVER
 M:     Mike Marciniszyn <infinipath@intel.com>
 L:     linux-rdma@vger.kernel.org
@@ -7393,16 +7439,20 @@ S:      Orphan
 F:     drivers/net/wireless/ray*
 
 RCUTORTURE MODULE
-M:     Josh Triplett <josh@freedesktop.org>
+M:     Josh Triplett <josh@joshtriplett.org>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
 L:     linux-kernel@vger.kernel.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 F:     Documentation/RCU/torture.txt
-F:     kernel/rcu/torture.c
+F:     kernel/rcu/rcutorture.c
 
 RCUTORTURE TEST FRAMEWORK
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M:     Josh Triplett <josh@joshtriplett.org>
+R:     Steven Rostedt <rostedt@goodmis.org>
+R:     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+R:     Lai Jiangshan <laijs@cn.fujitsu.com>
 L:     linux-kernel@vger.kernel.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
@@ -7425,8 +7475,11 @@ S:       Supported
 F:     net/rds/
 
 READ-COPY UPDATE (RCU)
-M:     Dipankar Sarma <dipankar@in.ibm.com>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M:     Josh Triplett <josh@joshtriplett.org>
+R:     Steven Rostedt <rostedt@goodmis.org>
+R:     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+R:     Lai Jiangshan <laijs@cn.fujitsu.com>
 L:     linux-kernel@vger.kernel.org
 W:     http://www.rdrop.com/users/paulmck/RCU/
 S:     Supported
@@ -7436,7 +7489,7 @@ X:        Documentation/RCU/torture.txt
 F:     include/linux/rcu*
 X:     include/linux/srcu.h
 F:     kernel/rcu/
-X:     kernel/rcu/torture.c
+X:     kernel/torture.c
 
 REAL TIME CLOCK (RTC) SUBSYSTEM
 M:     Alessandro Zummo <a.zummo@towertech.it>
@@ -7948,6 +8001,7 @@ F:        drivers/mmc/host/sdhci-spear.c
 
 SECURITY SUBSYSTEM
 M:     James Morris <james.l.morris@oracle.com>
+M:     Serge E. Hallyn <serge@hallyn.com>
 L:     linux-security-module@vger.kernel.org (suggested Cc:)
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
 W:     http://kernsec.org/
@@ -7993,6 +8047,16 @@ F:       drivers/ata/
 F:     include/linux/ata.h
 F:     include/linux/libata.h
 
+SERIAL ATA AHCI PLATFORM devices support
+M:     Hans de Goede <hdegoede@redhat.com>
+M:     Tejun Heo <tj@kernel.org>
+L:     linux-ide@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S:     Supported
+F:     drivers/ata/ahci_platform.c
+F:     drivers/ata/libahci_platform.c
+F:     include/linux/ahci_platform.h
+
 SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
 M:     Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
 L:     linux-scsi@vger.kernel.org
@@ -8195,17 +8259,22 @@ S:      Maintained
 F:     drivers/usb/misc/sisusbvga/
 
 SLAB ALLOCATOR
-M:     Christoph Lameter <cl@linux-foundation.org>
+M:     Christoph Lameter <cl@linux.com>
 M:     Pekka Enberg <penberg@kernel.org>
-M:     Matt Mackall <mpm@selenic.com>
+M:     David Rientjes <rientjes@google.com>
+M:     Joonsoo Kim <iamjoonsoo.kim@lge.com>
+M:     Andrew Morton <akpm@linux-foundation.org>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     include/linux/sl?b*.h
-F:     mm/sl?b.c
+F:     mm/sl?b*
 
 SLEEPABLE READ-COPY UPDATE (SRCU)
 M:     Lai Jiangshan <laijs@cn.fujitsu.com>
 M:     "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
+M:     Josh Triplett <josh@joshtriplett.org>
+R:     Steven Rostedt <rostedt@goodmis.org>
+R:     Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
 L:     linux-kernel@vger.kernel.org
 W:     http://www.rdrop.com/users/paulmck/RCU/
 S:     Supported
@@ -8878,7 +8947,7 @@ M:        Stephen Warren <swarren@wwwdotorg.org>
 M:     Thierry Reding <thierry.reding@gmail.com>
 L:     linux-tegra@vger.kernel.org
 Q:     http://patchwork.ozlabs.org/project/linux-tegra/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-tegra.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tegra/linux.git
 S:     Supported
 N:     [^a-z]tegra
 
@@ -8968,7 +9037,7 @@ F:        drivers/media/radio/radio-raremono.c
 
 THERMAL
 M:     Zhang Rui <rui.zhang@intel.com>
-M:     Eduardo Valentin <eduardo.valentin@ti.com>
+M:     Eduardo Valentin <edubezval@gmail.com>
 L:     linux-pm@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
@@ -8995,11 +9064,18 @@ S:      Maintained
 F:     drivers/platform/x86/thinkpad_acpi.c
 
 TI BANDGAP AND THERMAL DRIVER
-M:     Eduardo Valentin <eduardo.valentin@ti.com>
+M:     Eduardo Valentin <edubezval@gmail.com>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     drivers/thermal/ti-soc-thermal/
 
+TI CLOCK DRIVER
+M:     Tero Kristo <t-kristo@ti.com>
+L:     linux-omap@vger.kernel.org
+S:     Maintained
+F:     drivers/clk/ti/
+F:     include/linux/clk/ti.h
+
 TI FLASH MEDIA INTERFACE DRIVER
 M:     Alex Dubov <oakad@yahoo.com>
 S:     Maintained
@@ -9276,7 +9352,7 @@ UNICORE32 ARCHITECTURE:
 M:     Guan Xuetao <gxt@mprc.pku.edu.cn>
 W:     http://mprc.pku.edu.cn/~guanxuetao/linux
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.git
+T:     git git://github.com/gxt/linux.git
 F:     arch/unicore32/
 
 UNIFDEF
@@ -9409,12 +9485,6 @@ S:       Maintained
 F:     drivers/usb/host/isp116x*
 F:     include/linux/usb/isp116x.h
 
-USB KAWASAKI LSI DRIVER
-M:     Oliver Neukum <oliver@neukum.org>
-L:     linux-usb@vger.kernel.org
-S:     Maintained
-F:     drivers/usb/serial/kl5kusb105.*
-
 USB MASS STORAGE DRIVER
 M:     Matthew Dharm <mdharm-usb@one-eyed-alien.net>
 L:     linux-usb@vger.kernel.org
@@ -9442,12 +9512,6 @@ S:       Maintained
 F:     Documentation/usb/ohci.txt
 F:     drivers/usb/host/ohci*
 
-USB OPTION-CARD DRIVER
-M:     Matthias Urlichs <smurf@smurf.noris.de>
-L:     linux-usb@vger.kernel.org
-S:     Maintained
-F:     drivers/usb/serial/option.c
-
 USB PEGASUS DRIVER
 M:     Petko Manolov <petkan@nucleusys.com>
 L:     linux-usb@vger.kernel.org
@@ -9480,7 +9544,7 @@ S:        Maintained
 F:     drivers/net/usb/rtl8150.c
 
 USB SERIAL SUBSYSTEM
-M:     Johan Hovold <jhovold@gmail.com>
+M:     Johan Hovold <johan@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 F:     Documentation/usb/usb-serial.txt
@@ -9743,6 +9807,14 @@ L:       virtualization@lists.linux-foundation.org
 S:     Supported
 F:     arch/x86/kernel/cpu/vmware.c
 
+VMWARE BALLOON DRIVER
+M:     Xavier Deguillard <xdeguillard@vmware.com>
+M:     Philip Moltmann <moltmann@vmware.com>
+M:     "VMware, Inc." <pv-drivers@vmware.com>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+F:     drivers/misc/vmw_balloon.c
+
 VMWARE VMXNET3 ETHERNET DRIVER
 M:     Shreyas Bhatewara <sbhatewara@vmware.com>
 M:     "VMware, Inc." <pv-drivers@vmware.com>
index 97b2861..d0901b4 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 16
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION =
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
@@ -41,6 +41,29 @@ unexport GREP_OPTIONS
 # descending is started. They are now explicitly listed as the
 # prepare rule.
 
+# Beautify output
+# ---------------------------------------------------------------------------
+#
+# Normally, we echo the whole command before executing it. By making
+# that echo $($(quiet)$(cmd)), we now have the possibility to set
+# $(quiet) to choose other forms of output instead, e.g.
+#
+#         quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@
+#         cmd_cc_o_c       = $(CC) $(c_flags) -c -o $@ $<
+#
+# If $(quiet) is empty, the whole command will be printed.
+# If it is set to "quiet_", only the short version will be printed.
+# If it is set to "silent_", nothing will be printed at all, since
+# the variable $(silent_cmd_cc_o_c) doesn't exist.
+#
+# A simple variant is to prefix commands with $(Q) - that's useful
+# for commands that shall be hidden in non-verbose mode.
+#
+#      $(Q)ln $@ :<
+#
+# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
+# If KBUILD_VERBOSE equals 1 then the above command is displayed.
+#
 # To put more focus on warnings, be less verbose as default
 # Use 'make V=1' to see the full commands
 
@@ -51,6 +74,29 @@ ifndef KBUILD_VERBOSE
   KBUILD_VERBOSE = 0
 endif
 
+ifeq ($(KBUILD_VERBOSE),1)
+  quiet =
+  Q =
+else
+  quiet=quiet_
+  Q = @
+endif
+
+# If the user is running make -s (silent mode), suppress echoing of
+# commands
+
+ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
+ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
+  quiet=silent_
+endif
+else                                   # make-3.8x
+ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
+  quiet=silent_
+endif
+endif
+
+export quiet Q KBUILD_VERBOSE
+
 # Call a source code checker (by default, "sparse") as part of the
 # C compilation.
 #
@@ -126,7 +172,13 @@ PHONY += $(MAKECMDGOALS) sub-make
 $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
        @:
 
+# Fake the "Entering directory" message once, so that IDEs/editors are
+# able to understand relative filenames.
+       echodir := @echo
+ quiet_echodir := @echo
+silent_echodir := @:
 sub-make: FORCE
+       $($(quiet)echodir) "make[1]: Entering directory \`$(KBUILD_OUTPUT)'"
        $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \
        KBUILD_SRC=$(CURDIR) \
        KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile \
@@ -289,52 +341,6 @@ endif
 export KBUILD_MODULES KBUILD_BUILTIN
 export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD
 
-# Beautify output
-# ---------------------------------------------------------------------------
-#
-# Normally, we echo the whole command before executing it. By making
-# that echo $($(quiet)$(cmd)), we now have the possibility to set
-# $(quiet) to choose other forms of output instead, e.g.
-#
-#         quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@
-#         cmd_cc_o_c       = $(CC) $(c_flags) -c -o $@ $<
-#
-# If $(quiet) is empty, the whole command will be printed.
-# If it is set to "quiet_", only the short version will be printed.
-# If it is set to "silent_", nothing will be printed at all, since
-# the variable $(silent_cmd_cc_o_c) doesn't exist.
-#
-# A simple variant is to prefix commands with $(Q) - that's useful
-# for commands that shall be hidden in non-verbose mode.
-#
-#      $(Q)ln $@ :<
-#
-# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
-# If KBUILD_VERBOSE equals 1 then the above command is displayed.
-
-ifeq ($(KBUILD_VERBOSE),1)
-  quiet =
-  Q =
-else
-  quiet=quiet_
-  Q = @
-endif
-
-# If the user is running make -s (silent mode), suppress echoing of
-# commands
-
-ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
-ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
-  quiet=silent_
-endif
-else                                   # make-3.8x
-ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
-  quiet=silent_
-endif
-endif
-
-export quiet Q KBUILD_VERBOSE
-
 ifneq ($(CC),)
 ifeq ($(shell $(CC) -v 2>&1 | grep -c "clang version"), 1)
 COMPILER := clang
@@ -682,6 +688,8 @@ KBUILD_CFLAGS       += -fomit-frame-pointer
 endif
 endif
 
+KBUILD_CFLAGS   += $(call cc-option, -fno-var-tracking-assignments)
+
 ifdef CONFIG_DEBUG_INFO
 KBUILD_CFLAGS  += -g
 KBUILD_AFLAGS  += -Wa,-gdwarf-2
@@ -1170,7 +1178,7 @@ distclean: mrproper
 # Packaging of the kernel to various formats
 # ---------------------------------------------------------------------------
 # rpm target kept for backward compatibility
-package-dir    := $(srctree)/scripts/package
+package-dir    := scripts/package
 
 %src-pkg: FORCE
        $(Q)$(MAKE) $(build)=$(package-dir) $@
index 6cb7fe8..b4cf036 100644 (file)
@@ -57,6 +57,7 @@ unsigned long get_wchan(struct task_struct *p);
   ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
 
 #define cpu_relax()    barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 #define ARCH_HAS_PREFETCH
 #define ARCH_HAS_PREFETCHW
index c1d3d2d..b3c7509 100644 (file)
@@ -60,7 +60,7 @@ extern void read_decode_cache_bcr(void);
 #define ARC_REG_IC_IVIC                0x10
 #define ARC_REG_IC_CTRL                0x11
 #define ARC_REG_IC_IVIL                0x19
-#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4)
+#if defined(CONFIG_ARC_MMU_V3)
 #define ARC_REG_IC_PTAG                0x1E
 #endif
 
@@ -74,7 +74,7 @@ extern void read_decode_cache_bcr(void);
 #define ARC_REG_DC_IVDL                0x4A
 #define ARC_REG_DC_FLSH                0x4B
 #define ARC_REG_DC_FLDL                0x4C
-#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4)
+#if defined(CONFIG_ARC_MMU_V3)
 #define ARC_REG_DC_PTAG                0x5C
 #endif
 
index d99f9b3..82588f3 100644 (file)
@@ -62,6 +62,8 @@ unsigned long thread_saved_pc(struct task_struct *t);
 #define cpu_relax()    do { } while (0)
 #endif
 
+#define cpu_relax_lowlatency() cpu_relax()
+
 #define copy_segments(tsk, mm)      do { } while (0)
 #define release_segments(mm)        do { } while (0)
 
index 2618cc1..76a7739 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef _UAPI__ASM_ARC_PTRACE_H
 #define _UAPI__ASM_ARC_PTRACE_H
 
+#define PTRACE_GET_THREAD_AREA 25
 
 #ifndef __ASSEMBLY__
 /*
index 2ff0347..e248594 100644 (file)
@@ -10,9 +10,9 @@
  *  -This is the more "natural" hand written assembler
  */
 
+#include <linux/linkage.h>
 #include <asm/entry.h>       /* For the SAVE_* macros */
 #include <asm/asm-offsets.h>
-#include <asm/linkage.h>
 
 #define KSP_WORD_OFF   ((TASK_THREAD + THREAD_KSP) / 4)
 
index 0b3ef40..fffdb5e 100644 (file)
@@ -41,7 +41,7 @@ const struct machine_desc * __init setup_machine_fdt(void *dt)
 {
        const struct machine_desc *mdesc;
        unsigned long dt_root;
-       void *clk;
+       const void *clk;
        int len;
 
        if (!early_init_dt_scan(dt))
index 07a58f2..4d2481b 100644 (file)
@@ -77,10 +77,11 @@ stext:
        ; Clear BSS before updating any globals
        ; XXX: use ZOL here
        mov     r5, __bss_start
-       mov     r6, __bss_stop
+       sub     r6, __bss_stop, r5
+       lsr.f   lp_count, r6, 2
+       lpnz    1f
+       st.ab   0, [r5, 4]
 1:
-       st.ab   0, [r5,4]
-       brlt    r5, r6, 1b
 
        ; Uboot - kernel ABI
        ;    r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
index 63177e4..b9a5685 100644 (file)
@@ -99,10 +99,6 @@ static int arc_pmu_event_init(struct perf_event *event)
        struct hw_perf_event *hwc = &event->hw;
        int ret;
 
-       /* ARC 700 PMU does not support sampling events */
-       if (is_sampling_event(event))
-               return -ENOENT;
-
        switch (event->attr.type) {
        case PERF_TYPE_HARDWARE:
                if (event->attr.config >= PERF_COUNT_HW_MAX)
@@ -298,6 +294,9 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
                .read           = arc_pmu_read,
        };
 
+       /* ARC 700 PMU does not support sampling events */
+       arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
        ret = perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW);
 
        return ret;
index 5d76706..13b3ffb 100644 (file)
@@ -146,6 +146,10 @@ long arch_ptrace(struct task_struct *child, long request,
        pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
 
        switch (request) {
+       case PTRACE_GET_THREAD_AREA:
+               ret = put_user(task_thread_info(child)->thr_ptr,
+                              (unsigned long __user *)data);
+               break;
        default:
                ret = ptrace_request(child, request, addr, data);
                break;
index cf90b6f..c802bb5 100644 (file)
@@ -337,8 +337,19 @@ irqreturn_t do_IPI(int irq, void *dev_id)
  * API called by platform code to hookup arch-common ISR to their IPI IRQ
  */
 static DEFINE_PER_CPU(int, ipi_dev);
+
+static struct irqaction arc_ipi_irq = {
+        .name    = "IPI Interrupt",
+        .flags   = IRQF_PERCPU,
+        .handler = do_IPI,
+};
+
 int smp_ipi_irq_setup(int cpu, int irq)
 {
-       int *dev_id = &per_cpu(ipi_dev, smp_processor_id());
-       return request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev_id);
+       if (!cpu)
+               return setup_irq(irq, &arc_ipi_irq);
+       else
+               arch_unmask_irq(irq);
+
+       return 0;
 }
index 2555f58..dd35bde 100644 (file)
@@ -116,7 +116,7 @@ SECTIONS
 
        _edata = .;
 
-       BSS_SECTION(0, 0, 0)
+       BSS_SECTION(4, 4, 4)
 
 #ifdef CONFIG_ARC_DW2_UNWIND
        . = ALIGN(PAGE_SIZE);
index 1f676c4..353b202 100644 (file)
@@ -389,7 +389,7 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
 /***********************************************************
  * Machine specific helper for per line I-Cache invalidate.
  */
-static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
+static void __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
                                unsigned long sz)
 {
        unsigned long flags;
@@ -405,6 +405,23 @@ static inline void __ic_entire_inv(void)
        read_aux_reg(ARC_REG_IC_CTRL);  /* blocks */
 }
 
+struct ic_line_inv_vaddr_ipi {
+       unsigned long paddr, vaddr;
+       int sz;
+};
+
+static void __ic_line_inv_vaddr_helper(void *info)
+{
+        struct ic_line_inv_vaddr_ipi *ic_inv = (struct ic_line_inv_vaddr_ipi*) info;
+        __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
+}
+
+static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
+                               unsigned long sz)
+{
+       struct ic_line_inv_vaddr_ipi ic_inv = { paddr, vaddr , sz};
+       on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
+}
 #else
 
 #define __ic_entire_inv()
@@ -553,12 +570,8 @@ void flush_icache_range(unsigned long kstart, unsigned long kend)
  */
 void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
 {
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __ic_line_inv_vaddr(paddr, vaddr, len);
        __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
-       local_irq_restore(flags);
+       __ic_line_inv_vaddr(paddr, vaddr, len);
 }
 
 /* wrapper to compile time eliminate alignment checks in flush loop */
index 87b63fd..290f02e 100644 (file)
@@ -6,6 +6,7 @@ config ARM
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_HAVE_CUSTOM_GPIO_H
        select ARCH_MIGHT_HAVE_PC_PARPORT
+       select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_WANT_IPC_PARSE_VERSION
@@ -175,13 +176,6 @@ config ARCH_HAS_ILOG2_U32
 config ARCH_HAS_ILOG2_U64
        bool
 
-config ARCH_HAS_CPUFREQ
-       bool
-       help
-         Internal node to signify that the ARCH has CPUFREQ support
-         and that the relevant menu configurations are displayed for
-         it.
-
 config ARCH_HAS_BANDGAP
        bool
 
@@ -318,9 +312,8 @@ config ARCH_MULTIPLATFORM
 
 config ARCH_INTEGRATOR
        bool "ARM Ltd. Integrator family"
-       select ARCH_HAS_CPUFREQ
        select ARM_AMBA
-       select ARM_PATCH_PHYS_VIRT
+       select ARM_PATCH_PHYS_VIRT if MMU
        select AUTO_ZRELADDR
        select COMMON_CLK
        select COMMON_CLK_VERSATILE
@@ -538,7 +531,6 @@ config ARCH_DOVE
 
 config ARCH_KIRKWOOD
        bool "Marvell Kirkwood"
-       select ARCH_HAS_CPUFREQ
        select ARCH_REQUIRE_GPIOLIB
        select CPU_FEROCEON
        select GENERIC_CLOCKEVENTS
@@ -637,7 +629,6 @@ config ARCH_LPC32XX
 config ARCH_PXA
        bool "PXA2xx/PXA3xx-based"
        depends on MMU
-       select ARCH_HAS_CPUFREQ
        select ARCH_MTD_XIP
        select ARCH_REQUIRE_GPIOLIB
        select ARM_CPU_SUSPEND if PM
@@ -668,7 +659,7 @@ config ARCH_MSM
 config ARCH_SHMOBILE_LEGACY
        bool "Renesas ARM SoCs (non-multiplatform)"
        select ARCH_SHMOBILE
-       select ARM_PATCH_PHYS_VIRT
+       select ARM_PATCH_PHYS_VIRT if MMU
        select CLKDEV_LOOKUP
        select GENERIC_CLOCKEVENTS
        select HAVE_ARM_SCU if SMP
@@ -707,7 +698,6 @@ config ARCH_RPC
 
 config ARCH_SA1100
        bool "SA1100-based"
-       select ARCH_HAS_CPUFREQ
        select ARCH_MTD_XIP
        select ARCH_REQUIRE_GPIOLIB
        select ARCH_SPARSEMEM_ENABLE
@@ -725,7 +715,6 @@ config ARCH_SA1100
 
 config ARCH_S3C24XX
        bool "Samsung S3C24XX SoCs"
-       select ARCH_HAS_CPUFREQ
        select ARCH_REQUIRE_GPIOLIB
        select ATAGS
        select CLKDEV_LOOKUP
@@ -746,7 +735,6 @@ config ARCH_S3C24XX
 
 config ARCH_S3C64XX
        bool "Samsung S3C64XX"
-       select ARCH_HAS_CPUFREQ
        select ARCH_REQUIRE_GPIOLIB
        select ARM_AMBA
        select ARM_VIC
@@ -809,7 +797,6 @@ config ARCH_S5PC100
 
 config ARCH_S5PV210
        bool "Samsung S5PV210/S5PC110"
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_HOLES_MEMORYMODEL
        select ARCH_SPARSEMEM_ENABLE
        select ATAGS
@@ -845,7 +832,6 @@ config ARCH_DAVINCI
 config ARCH_OMAP1
        bool "TI OMAP1"
        depends on MMU
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_HOLES_MEMORYMODEL
        select ARCH_OMAP
        select ARCH_REQUIRE_GPIOLIB
@@ -1009,8 +995,6 @@ source "arch/arm/mach-rockchip/Kconfig"
 
 source "arch/arm/mach-sa1100/Kconfig"
 
-source "arch/arm/plat-samsung/Kconfig"
-
 source "arch/arm/mach-socfpga/Kconfig"
 
 source "arch/arm/mach-spear/Kconfig"
@@ -1028,6 +1012,7 @@ source "arch/arm/mach-s5pc100/Kconfig"
 source "arch/arm/mach-s5pv210/Kconfig"
 
 source "arch/arm/mach-exynos/Kconfig"
+source "arch/arm/plat-samsung/Kconfig"
 
 source "arch/arm/mach-shmobile/Kconfig"
 
@@ -2109,9 +2094,7 @@ endmenu
 
 menu "CPU Power Management"
 
-if ARCH_HAS_CPUFREQ
 source "drivers/cpufreq/Kconfig"
-endif
 
 source "drivers/cpuidle/Kconfig"
 
index 5986ff6..adb5ed9 100644 (file)
@@ -357,7 +357,7 @@ dtb-$(CONFIG_ARCH_STI)+= stih407-b2120.dtb \
        stih415-b2020.dtb \
        stih416-b2000.dtb \
        stih416-b2020.dtb \
-       stih416-b2020-revE.dtb
+       stih416-b2020e.dtb
 dtb-$(CONFIG_MACH_SUN4I) += \
        sun4i-a10-a1000.dtb \
        sun4i-a10-cubieboard.dtb \
index ecb2677..e2156a5 100644 (file)
                serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
                        0 0 1 2
                >;
-               tx-num-evt = <1>;
-               rx-num-evt = <1>;
+               tx-num-evt = <32>;
+               rx-num-evt = <32>;
 };
 
 &tps {
index ab9a34c..80a3b21 100644 (file)
                serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
                        0 0 1 2
                >;
-               tx-num-evt = <1>;
-               rx-num-evt = <1>;
+               tx-num-evt = <32>;
+               rx-num-evt = <32>;
 };
 
 &tscadc {
index 8a0a72d..a1a0cc5 100644 (file)
 
 &cpsw_emac0 {
        phy_id = <&davinci_mdio>, <0>;
+       phy-mode = "rmii";
 };
 
 &cpsw_emac1 {
        phy_id = <&davinci_mdio>, <1>;
+       phy-mode = "rmii";
+};
+
+&phy_sel {
+       rmii-clock-ext;
 };
 
 &elm {
index 19f1f7e..90098f9 100644 (file)
        phy-mode = "rmii";
 };
 
+&phy_sel {
+       rmii-clock-ext;
+};
+
 &i2c0 {
        status = "okay";
        pinctrl-names = "default";
index 772fec2..1e2919d 100644 (file)
@@ -91,6 +91,8 @@
                                marvell,nand-keep-config;
                                marvell,nand-enable-arbiter;
                                nand-on-flash-bbt;
+                               nand-ecc-strength = <4>;
+                               nand-ecc-step-size = <512>;
 
                                partition@0 {
                                        label = "U-Boot";
index e69bc67..4173a8a 100644 (file)
@@ -16,7 +16,7 @@
 
 / {
        model = "Marvell Armada 380 family SoC";
-       compatible = "marvell,armada380", "marvell,armada38x";
+       compatible = "marvell,armada380";
 
        cpus {
                #address-cells = <1>;
index ff9637d..1af886f 100644 (file)
@@ -16,7 +16,7 @@
 
 / {
        model = "Marvell Armada 385 Development Board";
-       compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada38x";
+       compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada380";
 
        chosen {
                bootargs = "console=ttyS0,115200 earlyprintk";
@@ -98,6 +98,8 @@
                                marvell,nand-keep-config;
                                marvell,nand-enable-arbiter;
                                nand-on-flash-bbt;
+                               nand-ecc-strength = <4>;
+                               nand-ecc-step-size = <512>;
 
                                partition@0 {
                                        label = "U-Boot";
index 4089325..aaca286 100644 (file)
@@ -17,7 +17,7 @@
 
 / {
        model = "Marvell Armada 385 Reference Design";
-       compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada38x";
+       compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada380";
 
        chosen {
                bootargs = "console=ttyS0,115200 earlyprintk";
index f011009..6283d79 100644 (file)
@@ -16,7 +16,7 @@
 
 / {
        model = "Marvell Armada 385 family SoC";
-       compatible = "marvell,armada385", "marvell,armada38x";
+       compatible = "marvell,armada385", "marvell,armada380";
 
        cpus {
                #address-cells = <1>;
index 3de364e..689fa1a 100644 (file)
@@ -20,7 +20,7 @@
 
 / {
        model = "Marvell Armada 38x family SoC";
-       compatible = "marvell,armada38x";
+       compatible = "marvell,armada380";
 
        aliases {
                gpio0 = &gpio0;
index e5c6a04..4e5a59e 100644 (file)
@@ -25,7 +25,7 @@
 
        memory {
                device_type = "memory";
-               reg = <0 0x00000000 0 0xC0000000>; /* 3 GB */
+               reg = <0 0x00000000 0 0x40000000>; /* 1 GB soldered on */
        };
 
        soc {
index b309c1c..04927db 100644 (file)
                                #size-cells = <0>;
                                #interrupt-cells = <1>;
 
-                               slow_rc_osc: slow_rc_osc {
-                                       compatible = "fixed-clock";
+                               main_osc: main_osc {
+                                       compatible = "atmel,at91rm9200-clk-main-osc";
                                        #clock-cells = <0>;
-                                       clock-frequency = <32768>;
-                                       clock-accuracy = <50000000>;
-                               };
-
-                               clk32k: slck {
-                                       compatible = "atmel,at91sam9260-clk-slow";
-                                       #clock-cells = <0>;
-                                       clocks = <&slow_rc_osc &slow_xtal>;
+                                       interrupts-extended = <&pmc AT91_PMC_MOSCS>;
+                                       clocks = <&main_xtal>;
                                };
 
                                main: mainck {
                                        compatible = "atmel,at91rm9200-clk-main";
                                        #clock-cells = <0>;
-                                       interrupts-extended = <&pmc AT91_PMC_MOSCS>;
-                                       clocks = <&main_xtal>;
+                                       clocks = <&main_osc>;
                                };
 
                                plla: pllack {
                                        compatible = "atmel,at91rm9200-clk-master";
                                        #clock-cells = <0>;
                                        interrupts-extended = <&pmc AT91_PMC_MCKRDY>;
-                                       clocks = <&clk32k>, <&main>, <&plla>, <&pllb>;
+                                       clocks = <&slow_xtal>, <&main>, <&plla>, <&pllb>;
                                        atmel,clk-output-range = <0 94000000>;
                                        atmel,clk-divisors = <1 2 4 0>;
                                };
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                        interrupt-parent = <&pmc>;
-                                       clocks = <&clk32k>, <&main>, <&plla>, <&pllb>;
+                                       clocks = <&slow_xtal>, <&main>, <&plla>, <&pllb>;
 
                                        prog0: prog0 {
                                                #clock-cells = <0>;
index c6683ea..aa35a7a 100644 (file)
                reg = <0x20000000 0x4000000>;
        };
 
+       slow_xtal {
+               clock-frequency = <32768>;
+       };
+
        main_xtal {
                clock-frequency = <18432000>;
        };
index d1b82e6..b84bac5 100644 (file)
                                                                      <595000000 650000000 3 0>,
                                                                      <545000000 600000000 0 1>,
                                                                      <495000000 555000000 1 1>,
-                                                                     <445000000 500000000 1 2>,
-                                                                     <400000000 450000000 1 3>;
+                                                                     <445000000 500000000 2 1>,
+                                                                     <400000000 450000000 3 1>;
                                };
 
                                plladiv: plladivck {
                        compatible = "atmel,at91rm9200-ohci", "usb-ohci";
                        reg = <0x00500000 0x00100000>;
                        interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
-                       clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
+                       clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
                                 <&uhpck>;
                        clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
                        status = "disabled";
index 1a57298..2c0d6ea 100644 (file)
                                                                       595000000 650000000 3 0
                                                                       545000000 600000000 0 1
                                                                       495000000 555000000 1 1
-                                                                      445000000 500000000 1 2
-                                                                      400000000 450000000 1 3>;
+                                                                      445000000 500000000 2 1
+                                                                      400000000 450000000 3 1>;
                                };
 
                                plladiv: plladivck {
                                reg = <0x00500000 0x80000
                                       0xf803c000 0x400>;
                                interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
+                               clocks = <&usb>, <&udphs_clk>;
+                               clock-names = "hclk", "pclk";
                                status = "disabled";
 
                                ep0 {
                                compatible = "atmel,at91sam9rl-pwm";
                                reg = <0xf8034000 0x300>;
                                interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>;
+                               clocks = <&pwm_clk>;
                                #pwm-cells = <3>;
                                status = "disabled";
                        };
                        compatible = "atmel,at91rm9200-ohci", "usb-ohci";
                        reg = <0x00600000 0x100000>;
                        interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
-                       clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
-                                <&uhpck>;
+                       clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
                        clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
                        status = "disabled";
                };
index 4adc280..8308954 100644 (file)
                                        regulator-name = "ldo3";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
+                                       regulator-always-on;
                                        regulator-boot-on;
                                };
 
index c29945e..8012763 100644 (file)
                        clocks = <&qspi_gfclk_div>;
                        clock-names = "fck";
                        num-cs = <4>;
-                       interrupts = <0 343 0x4>;
                        status = "disabled";
                };
 
                        #size-cells = <1>;
                        status = "disabled";
                };
+
+               atl: atl@4843c000 {
+                       compatible = "ti,dra7-atl";
+                       reg = <0x4843c000 0x3ff>;
+                       ti,hwmods = "atl";
+                       ti,provided-clocks = <&atl_clkin0_ck>, <&atl_clkin1_ck>,
+                                            <&atl_clkin2_ck>, <&atl_clkin3_ck>;
+                       clocks = <&atl_gfclk_mux>;
+                       clock-names = "fck";
+                       status = "disabled";
+               };
        };
 };
 
index b03cfe4..dc7a292 100644 (file)
 &cm_core_aon_clocks {
        atl_clkin0_ck: atl_clkin0_ck {
                #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <0>;
+               compatible = "ti,dra7-atl-clock";
+               clocks = <&atl_gfclk_mux>;
        };
 
        atl_clkin1_ck: atl_clkin1_ck {
                #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <0>;
+               compatible = "ti,dra7-atl-clock";
+               clocks = <&atl_gfclk_mux>;
        };
 
        atl_clkin2_ck: atl_clkin2_ck {
                #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <0>;
+               compatible = "ti,dra7-atl-clock";
+               clocks = <&atl_gfclk_mux>;
        };
 
        atl_clkin3_ck: atl_clkin3_ck {
                #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <0>;
+               compatible = "ti,dra7-atl-clock";
+               clocks = <&atl_gfclk_mux>;
        };
 
        hdmi_clkin_ck: hdmi_clkin_ck {
 
        l3_iclk_div: l3_iclk_div {
                #clock-cells = <0>;
-               compatible = "fixed-factor-clock";
+               compatible = "ti,divider-clock";
+               ti,max-div = <2>;
+               ti,bit-shift = <4>;
+               reg = <0x0100>;
                clocks = <&dpll_core_h12x2_ck>;
-               clock-mult = <1>;
-               clock-div = <1>;
+               ti,index-power-of-two;
        };
 
        l4_root_clk_div: l4_root_clk_div {
                compatible = "fixed-factor-clock";
                clocks = <&l3_iclk_div>;
                clock-mult = <1>;
-               clock-div = <1>;
+               clock-div = <2>;
        };
 
        video1_clk2_div: video1_clk2_div {
index b8ece4b..17b22e9 100644 (file)
                compatible = "arm,cortex-a9-gic";
                #interrupt-cells = <3>;
                interrupt-controller;
-               reg = <0x10490000 0x1000>, <0x10480000 0x100>;
+               reg = <0x10490000 0x10000>, <0x10480000 0x10000>;
        };
 
        combiner: interrupt-controller@10440000 {
                interrupts = <0 37 0>, <0 38 0>, <0 39 0>, <0 40 0>, <0 41 0>;
                clocks = <&clock CLK_PWM>;
                clock-names = "timers";
-               #pwm-cells = <2>;
+               #pwm-cells = <3>;
                status = "disabled";
        };
 
index ee3001f..97ea7a9 100644 (file)
                pinctrl2 = &pinctrl_2;
        };
 
+       pmu_system_controller: system-controller@10020000 {
+               clock-names = "clkout0", "clkout1", "clkout2", "clkout3",
+                               "clkout4", "clkout8", "clkout9";
+               clocks = <&clock CLK_OUT_DMC>, <&clock CLK_OUT_TOP>,
+                       <&clock CLK_OUT_LEFTBUS>, <&clock CLK_OUT_RIGHTBUS>,
+                       <&clock CLK_OUT_CPU>, <&clock CLK_XXTI>,
+                       <&clock CLK_XUSBXTI>;
+               #clock-cells = <1>;
+       };
+
        sysram@02020000 {
                compatible = "mmio-sram";
                reg = <0x02020000 0x20000>;
index c5a943d..de1f9c7 100644 (file)
 
        pmu_system_controller: system-controller@10020000 {
                compatible = "samsung,exynos4212-pmu", "syscon";
+               clock-names = "clkout0", "clkout1", "clkout2", "clkout3",
+                               "clkout4", "clkout8", "clkout9";
+               clocks = <&clock CLK_OUT_DMC>, <&clock CLK_OUT_TOP>,
+                       <&clock CLK_OUT_LEFTBUS>, <&clock CLK_OUT_RIGHTBUS>,
+                       <&clock CLK_OUT_CPU>, <&clock CLK_XXTI>,
+                       <&clock CLK_XUSBXTI>;
+               #clock-cells = <1>;
        };
 
        g2d@10800000 {
index 834fb5a..492e1ef 100644 (file)
        pmu_system_controller: system-controller@10040000 {
                compatible = "samsung,exynos5250-pmu", "syscon";
                reg = <0x10040000 0x5000>;
+               clock-names = "clkout16";
+               clocks = <&clock CLK_FIN_PLL>;
+               #clock-cells = <1>;
        };
 
        sysreg_system_controller: syscon@10050000 {
index e385322..a40a5c2 100644 (file)
                compatible = "samsung,exynos5420-audss-clock";
                reg = <0x03810000 0x0C>;
                #clock-cells = <1>;
-               clocks = <&clock CLK_FIN_PLL>, <&clock CLK_FOUT_EPLL>,
+               clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MAU_EPLL>,
                         <&clock CLK_SCLK_MAUDIO0>, <&clock CLK_SCLK_MAUPCM0>;
                clock-names = "pll_ref", "pll_in", "sclk_audio", "sclk_pcm_in";
        };
        mfc_pd: power-domain@10044060 {
                compatible = "samsung,exynos4210-pd";
                reg = <0x10044060 0x20>;
+               clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_SW_ACLK333>,
+                       <&clock CLK_MOUT_USER_ACLK333>;
+               clock-names = "oscclk", "pclk0", "clk0";
        };
 
        disp_pd: power-domain@100440C0 {
        pmu_system_controller: system-controller@10040000 {
                compatible = "samsung,exynos5420-pmu", "syscon";
                reg = <0x10040000 0x5000>;
+               clock-names = "clkout16";
+               clocks = <&clock CLK_FIN_PLL>;
+               #clock-cells = <1>;
        };
 
        sysreg_system_controller: syscon@10050000 {
index ab1116d..83a5b86 100644 (file)
@@ -73,7 +73,7 @@
 
                L2: l2-cache {
                        compatible = "arm,pl310-cache";
-                       reg = <0xfc10000 0x100000>;
+                       reg = <0x100000 0x100000>;
                        interrupts = <0 15 4>;
                        cache-unified;
                        cache-level = <2>;
index 6bc3243..181d77f 100644 (file)
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
-       fsl,cd-controller;
-       fsl,wp-controller;
+       cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
 &esdhc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc2>;
-       cd-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
                                MX51_PAD_SD1_DATA1__SD1_DATA1           0x20d5
                                MX51_PAD_SD1_DATA2__SD1_DATA2           0x20d5
                                MX51_PAD_SD1_DATA3__SD1_DATA3           0x20d5
-                               MX51_PAD_GPIO1_0__SD1_CD                0x20d5
-                               MX51_PAD_GPIO1_1__SD1_WP                0x20d5
+                               MX51_PAD_GPIO1_0__GPIO1_0               0x100
+                               MX51_PAD_GPIO1_1__GPIO1_1               0x100
                        >;
                };
 
index 75e66c9..31cfb7f 100644 (file)
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1 &pinctrl_esdhc1_cd>;
-       fsl,cd-controller;
+       cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
 
                pinctrl_esdhc1_cd: esdhc1_cd {
                        fsl,pins = <
-                               MX51_PAD_GPIO1_0__SD1_CD 0x20d5
+                               MX51_PAD_GPIO1_0__GPIO1_0 0xd5
                        >;
                };
 
index d5d146a..c4956b0 100644 (file)
                      <0xb0000000 0x20000000>;
        };
 
-       soc {
-               display1: display@di1 {
-                       compatible = "fsl,imx-parallel-display";
-                       interface-pix-fmt = "bgr666";
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&pinctrl_ipu_disp1>;
-
-                       display-timings {
-                               800x480p60 {
-                                       native-mode;
-                                       clock-frequency = <31500000>;
-                                       hactive = <800>;
-                                       vactive = <480>;
-                                       hfront-porch = <40>;
-                                       hback-porch = <88>;
-                                       hsync-len = <128>;
-                                       vback-porch = <33>;
-                                       vfront-porch = <9>;
-                                       vsync-len = <3>;
-                                       vsync-active = <1>;
-                               };
+       display1: display@di1 {
+               compatible = "fsl,imx-parallel-display";
+               interface-pix-fmt = "bgr666";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_ipu_disp1>;
+
+               display-timings {
+                       800x480p60 {
+                               native-mode;
+                               clock-frequency = <31500000>;
+                               hactive = <800>;
+                               vactive = <480>;
+                               hfront-porch = <40>;
+                               hback-porch = <88>;
+                               hsync-len = <128>;
+                               vback-porch = <33>;
+                               vfront-porch = <9>;
+                               vsync-len = <3>;
+                               vsync-active = <1>;
                        };
                };
 
index 5373a5f..c8e51dd 100644 (file)
                        fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>;
                };
 
+               pinctrl_hummingboard_usbotg_id: hummingboard-usbotg-id {
+                       /*
+                        * Similar to pinctrl_usbotg_2, but we want it
+                        * pulled down for a fixed host connection.
+                        */
+                       fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
+               };
+
                pinctrl_hummingboard_usbotg_vbus: hummingboard-usbotg-vbus {
                        fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>;
                };
 };
 
 &usbotg {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hummingboard_usbotg_id>;
        vbus-supply = <&reg_usbotg_vbus>;
        status = "okay";
 };
index af4929a..0e1406e 100644 (file)
@@ -11,7 +11,7 @@
 
 /dts-v1/;
 #include "imx6q.dtsi"
-#include "imx6qdl-gw54xx.dtsi"
+#include "imx6qdl-gw51xx.dtsi"
 
 / {
        model = "Gateworks Ventana i.MX6 Quad GW51XX";
index 25da82a..e8e7816 100644 (file)
                pinctrl-0 = <&pinctrl_cubox_i_ir>;
        };
 
+       pwmleds {
+               compatible = "pwm-leds";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_cubox_i_pwm1>;
+
+               front {
+                       active-low;
+                       label = "imx6:red:front";
+                       max-brightness = <248>;
+                       pwms = <&pwm1 0 50000>;
+               };
+       };
+
        regulators {
                compatible = "simple-bus";
 
                        >;
                };
 
+               pinctrl_cubox_i_pwm1: cubox-i-pwm1-front-led {
+                       fsl,pins = <MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b0>;
+               };
+
                pinctrl_cubox_i_spdif: cubox-i-spdif {
                        fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
                };
                        fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x4001b0b0>;
                };
 
+               pinctrl_cubox_i_usbotg_id: cubox-i-usbotg-id {
+                       /*
+                        * The Cubox-i pulls this low, but as it's pointless
+                        * leaving it as a pull-up, even if it is just 10uA.
+                        */
+                       fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
+               };
+
                pinctrl_cubox_i_usbotg_vbus: cubox-i-usbotg-vbus {
                        fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x4001b0b0>;
                };
 };
 
 &usbotg {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_cubox_i_usbotg_id>;
        vbus-supply = <&reg_usbotg_vbus>;
        status = "okay";
 };
index 31665ad..0db15af 100644 (file)
        status = "okay";
 
        pmic: ltc3676@3c {
-               compatible = "ltc,ltc3676";
+               compatible = "lltc,ltc3676";
                reg = <0x3c>;
 
                regulators {
index 367af3e..744c8a2 100644 (file)
        };
 
        pmic: ltc3676@3c {
-               compatible = "ltc,ltc3676";
+               compatible = "lltc,ltc3676";
                reg = <0x3c>;
 
                regulators {
        codec: sgtl5000@0a {
                compatible = "fsl,sgtl5000";
                reg = <0x0a>;
-               clocks = <&clks 169>;
+               clocks = <&clks 201>;
                VDDA-supply = <&reg_1p8v>;
                VDDIO-supply = <&reg_3p3v>;
        };
index c91b5a6..adf150c 100644 (file)
        };
 
        pmic: ltc3676@3c {
-               compatible = "ltc,ltc3676";
+               compatible = "lltc,ltc3676";
                reg = <0x3c>;
 
                regulators {
index d729d0b..79eac68 100644 (file)
                                MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA    0x1b0b1
                        >;
                };
-
-               pinctrl_microsom_usbotg: microsom-usbotg {
-                       /*
-                        * Similar to pinctrl_usbotg_2, but we want it
-                        * pulled down for a fixed host connection.
-                        */
-                       fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
-               };
        };
 };
 
@@ -26,8 +18,3 @@
        pinctrl-0 = <&pinctrl_microsom_uart1>;
        status = "okay";
 };
-
-&usbotg {
-       pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_microsom_usbotg>;
-};
index 2d4e528..57d4abe 100644 (file)
                                compatible = "fsl,imx6sl-fec", "fsl,imx25-fec";
                                reg = <0x02188000 0x4000>;
                                interrupts = <0 114 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX6SL_CLK_ENET_REF>,
+                               clocks = <&clks IMX6SL_CLK_ENET>,
                                         <&clks IMX6SL_CLK_ENET_REF>;
                                clock-names = "ipg", "ahb";
                                status = "disabled";
index c5a1fc7..b2d9834 100644 (file)
                compatible = "ethernet-phy-id0141.0cb0",
                             "ethernet-phy-ieee802.3-c22";
                reg = <0>;
-               phy-connection-type = "rgmii-id";
        };
 
        ethphy1: ethernet-phy@1 {
                compatible = "ethernet-phy-id0141.0cb0",
                             "ethernet-phy-ieee802.3-c22";
                reg = <1>;
-               phy-connection-type = "rgmii-id";
        };
 };
 
        status = "okay";
        ethernet0-port@0 {
                phy-handle = <&ethphy0>;
+               phy-connection-type = "rgmii-id";
        };
 };
 
        status = "okay";
        ethernet1-port@0 {
                phy-handle = <&ethphy1>;
+               phy-connection-type = "rgmii-id";
        };
 };
index cf0be66..1becefc 100644 (file)
                        codec {
                        };
                };
+
+               twl_power: power {
+                       compatible = "ti,twl4030-power-beagleboard-xm", "ti,twl4030-power-idle-osc-off";
+                       ti,use_poweroff;
+               };
        };
 };
 
 };
 
 &uart3 {
+       interrupts-extended = <&intc 74 &omap3_pmx_core OMAP3_UART3_RX>;
        pinctrl-names = "default";
        pinctrl-0 = <&uart3_pins>;
 };
index 8ae8f00..c8747c7 100644 (file)
        gpios = <&twl_gpio 18 GPIO_ACTIVE_LOW>;
 };
 
+&twl {
+       twl_power: power {
+               compatible = "ti,twl4030-power-omap3-evm", "ti,twl4030-power-idle";
+               ti,use_poweroff;
+       };
+};
+
 &i2c2 {
        clock-frequency = <400000>;
 };
index ae8ae3f..b15f1a7 100644 (file)
                compatible = "ti,twl4030-audio";
                ti,enable-vibra = <1>;
        };
+
+       twl_power: power {
+               compatible = "ti,twl4030-power-n900";
+               ti,use_poweroff;
+       };
 };
 
 &twl_keypad {
index 3bfda16..a4ed549 100644 (file)
@@ -45,7 +45,6 @@
 
                        operating-points = <
                                /* kHz    uV */
-                               500000  880000
                                1000000 1060000
                                1500000 1250000
                        >;
index 8d7ffae..79f68ac 100644 (file)
                        #clock-cells = <0>;
                        clock-output-names = "sd1";
                };
-               sd2_clk: sd3_clk@e615007c {
+               sd2_clk: sd3_clk@e615026c {
                        compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
-                       reg = <0 0xe615007c 0 4>;
+                       reg = <0 0xe615026c 0 4>;
                        clocks = <&pll1_div2_clk>;
                        #clock-cells = <0>;
                        clock-output-names = "sd2";
index f557feb..90d8b6c 100644 (file)
@@ -4,7 +4,7 @@
  */
 
 /dts-v1/;
-/include/ "ste-nomadik-stn8815.dtsi"
+#include "ste-nomadik-stn8815.dtsi"
 
 / {
        model = "Calao Systems USB-S8815";
index d316c95..dbcf521 100644 (file)
@@ -1,7 +1,9 @@
 /*
  * Device Tree for the ST-Ericsson Nomadik 8815 STn8815 SoC
  */
-/include/ "skeleton.dtsi"
+
+#include <dt-bindings/gpio/gpio.h>
+#include "skeleton.dtsi"
 
 / {
        #address-cells = <1>;
                        bus-width = <4>;
                        cap-mmc-highspeed;
                        cap-sd-highspeed;
-                       cd-gpios = <&gpio3 15 0x1>;
-                       cd-inverted;
+                       cd-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>;
                        pinctrl-names = "default";
                        pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>;
                        vmmc-supply = <&vmmc_regulator>;
index d6f254f..a0f6f75 100644 (file)
 
                        pinctrl-names   = "default";
                        pinctrl-0       = <&pinctrl_mii0>;
-                       clock-names     = "stmmaceth";
-                       clocks          = <&clk_s_a1_ls CLK_GMAC0_PHY>;
+                       clock-names     = "stmmaceth", "sti-ethclk";
+                       clocks          = <&clk_s_a1_ls CLK_ICN_IF_2>, <&clk_s_a1_ls CLK_GMAC0_PHY>;
                };
 
                ethernet1: dwmac@fef08000 {
                        reset-names             = "stmmaceth";
                        pinctrl-names   = "default";
                        pinctrl-0       = <&pinctrl_mii1>;
-                       clock-names     = "stmmaceth";
-                       clocks          = <&clk_s_a0_ls CLK_ETH1_PHY>;
+                       clock-names     = "stmmaceth", "sti-ethclk";
+                       clocks          = <&clk_s_a0_ls CLK_ICN_REG>, <&clk_s_a0_ls CLK_ETH1_PHY>;
                };
 
                rc: rc@fe518000 {
diff --git a/arch/arm/boot/dts/stih416-b2020-revE.dts b/arch/arm/boot/dts/stih416-b2020-revE.dts
deleted file mode 100644 (file)
index ba0fa2c..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2014 STMicroelectronics (R&D) Limited.
- * Author: Lee Jones <lee.jones@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-/dts-v1/;
-#include "stih416.dtsi"
-#include "stih41x-b2020.dtsi"
-/ {
-       model = "STiH416 B2020 REV-E";
-       compatible = "st,stih416-b2020", "st,stih416";
-
-       soc {
-               leds {
-                       compatible = "gpio-leds";
-                       red {
-                               #gpio-cells             = <1>;
-                               label                   = "Front Panel LED";
-                               gpios                   = <&PIO4 1>;
-                               linux,default-trigger   = "heartbeat";
-                       };
-                       green {
-                               gpios                   = <&PIO1 3>;
-                               default-state           = "off";
-                       };
-               };
-
-               ethernet1: dwmac@fef08000 {
-                       snps,reset-gpio = <&PIO0 7>;
-               };
-       };
-};
diff --git a/arch/arm/boot/dts/stih416-b2020e.dts b/arch/arm/boot/dts/stih416-b2020e.dts
new file mode 100644 (file)
index 0000000..ba0fa2c
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2014 STMicroelectronics (R&D) Limited.
+ * Author: Lee Jones <lee.jones@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publishhed by the Free Software Foundation.
+ */
+/dts-v1/;
+#include "stih416.dtsi"
+#include "stih41x-b2020.dtsi"
+/ {
+       model = "STiH416 B2020 REV-E";
+       compatible = "st,stih416-b2020", "st,stih416";
+
+       soc {
+               leds {
+                       compatible = "gpio-leds";
+                       red {
+                               #gpio-cells             = <1>;
+                               label                   = "Front Panel LED";
+                               gpios                   = <&PIO4 1>;
+                               linux,default-trigger   = "heartbeat";
+                       };
+                       green {
+                               gpios                   = <&PIO1 3>;
+                               default-state           = "off";
+                       };
+               };
+
+               ethernet1: dwmac@fef08000 {
+                       snps,reset-gpio = <&PIO0 7>;
+               };
+       };
+};
index 06473c5..84758d7 100644 (file)
                        reset-names             = "stmmaceth";
                        pinctrl-names   = "default";
                        pinctrl-0       = <&pinctrl_mii0>;
-                       clock-names     = "stmmaceth";
-                       clocks          = <&clk_s_a1_ls CLK_GMAC0_PHY>;
+                       clock-names     = "stmmaceth", "sti-ethclk";
+                       clocks          = <&clk_s_a1_ls CLK_ICN_IF_2>, <&clk_s_a1_ls CLK_GMAC0_PHY>;
                };
 
                ethernet1: dwmac@fef08000 {
                        reset-names     = "stmmaceth";
                        pinctrl-names   = "default";
                        pinctrl-0       = <&pinctrl_mii1>;
-                       clock-names     = "stmmaceth";
-                       clocks          = <&clk_s_a0_ls CLK_ETH1_PHY>;
+                       clock-names     = "stmmaceth", "sti-ethclk";
+                       clocks          = <&clk_s_a0_ls CLK_ICN_REG>, <&clk_s_a0_ls CLK_ETH1_PHY>;
                };
 
                rc: rc@fe518000 {
index 6ef146e..a20fa80 100644 (file)
@@ -182,7 +182,6 @@ static int scoop_probe(struct platform_device *pdev)
        struct scoop_config *inf;
        struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        int ret;
-       int temp;
 
        if (!mem)
                return -EINVAL;
index 9d13dae..4bf7226 100644 (file)
@@ -94,10 +94,10 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_PWM=y
 # CONFIG_USB_SUPPORT is not set
 CONFIG_MMC=y
-CONFIG_MMC_UNSAFE_RESUME=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_TEST=y
 CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_BCM_KONA=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
index ef88153..59b7e45 100644 (file)
@@ -186,6 +186,7 @@ CONFIG_VIDEO_MX3=y
 CONFIG_V4L_MEM2MEM_DRIVERS=y
 CONFIG_VIDEO_CODA=y
 CONFIG_SOC_CAMERA_OV2640=y
+CONFIG_IMX_IPUV3_CORE=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
index e2d6204..5348364 100644 (file)
@@ -223,12 +223,12 @@ CONFIG_POWER_RESET_GPIO=y
 CONFIG_POWER_RESET_SUN6I=y
 CONFIG_SENSORS_LM90=y
 CONFIG_THERMAL=y
-CONFIG_DOVE_THERMAL=y
 CONFIG_ARMADA_THERMAL=y
 CONFIG_WATCHDOG=y
 CONFIG_ORION_WATCHDOG=y
 CONFIG_SUNXI_WATCHDOG=y
 CONFIG_MFD_AS3722=y
+CONFIG_MFD_BCM590XX=y
 CONFIG_MFD_CROS_EC=y
 CONFIG_MFD_CROS_EC_SPI=y
 CONFIG_MFD_MAX8907=y
@@ -240,6 +240,7 @@ CONFIG_MFD_TPS65910=y
 CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
 CONFIG_REGULATOR_AB8500=y
 CONFIG_REGULATOR_AS3722=y
+CONFIG_REGULATOR_BCM590XX=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_MAX8907=y
 CONFIG_REGULATOR_PALMAS=y
@@ -300,6 +301,7 @@ CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=16
 CONFIG_MMC_ARMMMCI=y
 CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_OF_ARASAN=y
 CONFIG_MMC_SDHCI_ESDHC_IMX=y
 CONFIG_MMC_SDHCI_DOVE=y
@@ -352,6 +354,7 @@ CONFIG_MFD_NVEC=y
 CONFIG_KEYBOARD_NVEC=y
 CONFIG_SERIO_NVEC_PS2=y
 CONFIG_NVEC_POWER=y
+CONFIG_QCOM_GSBI=y
 CONFIG_COMMON_CLK_QCOM=y
 CONFIG_MSM_GCC_8660=y
 CONFIG_MSM_MMCC_8960=y
index e11170e..b0bfefa 100644 (file)
@@ -14,6 +14,7 @@ CONFIG_MACH_ARMADA_370=y
 CONFIG_MACH_ARMADA_375=y
 CONFIG_MACH_ARMADA_38X=y
 CONFIG_MACH_ARMADA_XP=y
+CONFIG_MACH_DOVE=y
 CONFIG_NEON=y
 # CONFIG_CACHE_L2X0 is not set
 # CONFIG_SWP_EMULATE is not set
@@ -52,6 +53,7 @@ CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_I2C=y
 CONFIG_SPI=y
 CONFIG_SPI_ORION=y
index 59066cf..536a137 100644 (file)
@@ -32,6 +32,7 @@ CONFIG_SOC_OMAP5=y
 CONFIG_SOC_AM33XX=y
 CONFIG_SOC_AM43XX=y
 CONFIG_SOC_DRA7XX=y
+CONFIG_CACHE_L2X0=y
 CONFIG_ARM_THUMBEE=y
 CONFIG_ARM_ERRATA_411920=y
 CONFIG_SMP=y
index 4522366..15468fb 100644 (file)
@@ -137,7 +137,7 @@ static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
                                dst += AES_BLOCK_SIZE;
                        } while (--blocks);
                }
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        return err;
 }
@@ -158,7 +158,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
                bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
                                  walk.nbytes, &ctx->dec, walk.iv);
                kernel_neon_end();
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        while (walk.nbytes) {
                u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -182,7 +182,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
                        dst += AES_BLOCK_SIZE;
                        src += AES_BLOCK_SIZE;
                } while (--blocks);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        return err;
 }
@@ -268,7 +268,7 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
                bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
                                  walk.nbytes, &ctx->enc, walk.iv);
                kernel_neon_end();
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        return err;
 }
@@ -292,7 +292,7 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
                bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
                                  walk.nbytes, &ctx->dec, walk.iv);
                kernel_neon_end();
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        return err;
 }
index eb577f4..39eb16b 100644 (file)
@@ -52,7 +52,7 @@ extern inline void *return_address(unsigned int level)
 
 #endif
 
-#define ftrace_return_addr(n) return_address(n)
+#define ftrace_return_address(n) return_address(n)
 
 #endif /* ifndef __ASSEMBLY__ */
 
index 060a75e..0406cb3 100644 (file)
@@ -50,6 +50,7 @@ struct machine_desc {
        struct smp_operations   *smp;           /* SMP operations       */
        bool                    (*smp_init)(void);
        void                    (*fixup)(struct tag *, char **);
+       void                    (*dt_fixup)(void);
        void                    (*init_meminfo)(void);
        void                    (*reserve)(void);/* reserve mem blocks  */
        void                    (*map_io)(void);/* IO mapping function  */
index d9702eb..94060ad 100644 (file)
@@ -208,8 +208,6 @@ struct sync_struct {
        struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
 };
 
-extern unsigned long sync_phys;        /* physical address of *mcpm_sync */
-
 void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
 void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
 void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
index c3d5fc1..8a1e8e9 100644 (file)
@@ -82,6 +82,8 @@ unsigned long get_wchan(struct task_struct *p);
 #define cpu_relax()                    barrier()
 #endif
 
+#define cpu_relax_lowlatency()                cpu_relax()
+
 #define task_pt_regs(p) \
        ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
 
index f989d7c..e4e4208 100644 (file)
@@ -114,8 +114,14 @@ static inline struct thread_info *current_thread_info(void)
        ((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
 #define thread_saved_sp(tsk)   \
        ((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
+
+#ifndef CONFIG_THUMB2_KERNEL
 #define thread_saved_fp(tsk)   \
        ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
+#else
+#define thread_saved_fp(tsk)   \
+       ((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
+#endif
 
 extern void crunch_task_disable(struct thread_info *);
 extern void crunch_task_copy(struct thread_info *, void *);
index e94a157..11c54de 100644 (file)
@@ -212,7 +212,7 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
        mdesc_best = &__mach_desc_GENERIC_DT;
 #endif
 
-       if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys)))
+       if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
                return NULL;
 
        mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
@@ -237,6 +237,12 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
                dump_machine_table(); /* does not return */
        }
 
+       /* We really don't want to do this, but sometimes firmware provides buggy data */
+       if (mdesc->dt_fixup)
+               mdesc->dt_fixup();
+
+       early_init_dt_scan_nodes();
+
        /* Change machine number to match the mdesc we're using */
        __machine_arch_type = mdesc->nr;
 
index a5599cf..2b32978 100644 (file)
@@ -94,13 +94,19 @@ ENTRY(iwmmxt_task_enable)
 
        mrc     p15, 0, r2, c2, c0, 0
        mov     r2, r2                          @ cpwait
+       bl      concan_save
 
-       teq     r1, #0                          @ test for last ownership
-       mov     lr, r9                          @ normal exit from exception
-       beq     concan_load                     @ no owner, skip save
+#ifdef CONFIG_PREEMPT_COUNT
+       get_thread_info r10
+#endif
+4:     dec_preempt_count r10, r3
+       mov     pc, r9                          @ normal exit from exception
 
 concan_save:
 
+       teq     r1, #0                          @ test for last ownership
+       beq     concan_load                     @ no owner, skip save
+
        tmrc    r2, wCon
 
        @ CUP? wCx
@@ -138,7 +144,7 @@ concan_dump:
        wstrd   wR15, [r1, #MMX_WR15]
 
 2:     teq     r0, #0                          @ anything to load?
-       beq     3f
+       moveq   pc, lr                          @ if not, return
 
 concan_load:
 
@@ -171,14 +177,9 @@ concan_load:
        @ clear CUP/MUP (only if r1 != 0)
        teq     r1, #0
        mov     r2, #0
-       beq     3f
-       tmcr    wCon, r2
+       moveq   pc, lr
 
-3:
-#ifdef CONFIG_PREEMPT_COUNT
-       get_thread_info r10
-#endif
-4:     dec_preempt_count r10, r3
+       tmcr    wCon, r2
        mov     pc, lr
 
 /*
index 778c2f7..a74b53c 100644 (file)
@@ -160,12 +160,16 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
 static struct undef_hook kgdb_brkpt_hook = {
        .instr_mask             = 0xffffffff,
        .instr_val              = KGDB_BREAKINST,
+       .cpsr_mask              = MODE_MASK,
+       .cpsr_val               = SVC_MODE,
        .fn                     = kgdb_brk_fn
 };
 
 static struct undef_hook kgdb_compiled_brkpt_hook = {
        .instr_mask             = 0xffffffff,
        .instr_val              = KGDB_COMPILED_BREAK,
+       .cpsr_mask              = MODE_MASK,
+       .cpsr_val               = SVC_MODE,
        .fn                     = kgdb_compiled_brk_fn
 };
 
index 9db4b65..cb14242 100644 (file)
@@ -74,8 +74,6 @@ void kprobe_arm_test_cases(void)
        TEST_RRR( op "lt" s "   r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\
        TEST_RR(  op "gt" s "   r12, r13"       ", r",14,val, ", ror r",14,7,"")\
        TEST_RR(  op "le" s "   r14, r",0, val, ", r13"       ", lsl r",14,8,"")\
-       TEST_RR(  op s "        r12, pc"        ", r",14,val, ", ror r",14,7,"")\
-       TEST_RR(  op s "        r14, r",0, val, ", pc"        ", lsl r",14,8,"")\
        TEST_R(   op "eq" s "   r0,  r",11,VAL1,", #0xf5")                      \
        TEST_R(   op "ne" s "   r11, r",0, VAL1,", #0xf5000000")                \
        TEST_R(   op s "        r7,  r",8, VAL2,", #0x000af000")                \
@@ -103,8 +101,6 @@ void kprobe_arm_test_cases(void)
        TEST_RRR( op "ge        r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")   \
        TEST_RR(  op "le        r13"       ", r",14,val, ", ror r",14,7,"")     \
        TEST_RR(  op "gt        r",0, val, ", r13"       ", lsl r",14,8,"")     \
-       TEST_RR(  op "  pc"        ", r",14,val, ", ror r",14,7,"")             \
-       TEST_RR(  op "  r",0, val, ", pc"        ", lsl r",14,8,"")             \
        TEST_R(   op "eq        r",11,VAL1,", #0xf5")                           \
        TEST_R(   op "ne        r",0, VAL1,", #0xf5000000")                     \
        TEST_R(   op "  r",8, VAL2,", #0x000af000")
@@ -125,7 +121,6 @@ void kprobe_arm_test_cases(void)
        TEST_RR(  op "ge" s "   r11, r",11,N(val),", asr r",7, 6,"")    \
        TEST_RR(  op "lt" s "   r12, r",11,val, ", ror r",14,7,"")      \
        TEST_R(   op "gt" s "   r14, r13"       ", lsl r",14,8,"")      \
-       TEST_R(   op "le" s "   r14, pc"        ", lsl r",14,8,"")      \
        TEST(     op "eq" s "   r0,  #0xf5")                            \
        TEST(     op "ne" s "   r11, #0xf5000000")                      \
        TEST(     op s "        r7,  #0x000af000")                      \
@@ -159,12 +154,19 @@ void kprobe_arm_test_cases(void)
        TEST_SUPPORTED("cmp     pc, #0x1000");
        TEST_SUPPORTED("cmp     sp, #0x1000");
 
-       /* Data-processing with PC as shift*/
+       /* Data-processing with PC and a shift count in a register */
        TEST_UNSUPPORTED(__inst_arm(0xe15c0f1e) "       @ cmp   r12, r14, asl pc")
        TEST_UNSUPPORTED(__inst_arm(0xe1a0cf1e) "       @ mov   r12, r14, asl pc")
        TEST_UNSUPPORTED(__inst_arm(0xe08caf1e) "       @ add   r10, r12, r14, asl pc")
-
-       /* Data-processing with PC as shift*/
+       TEST_UNSUPPORTED(__inst_arm(0xe151021f) "       @ cmp   r1, pc, lsl r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe17f0211) "       @ cmn   pc, r1, lsl r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe1a0121f) "       @ mov   r1, pc, lsl r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe1a0f211) "       @ mov   pc, r1, lsl r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe042131f) "       @ sub   r1, r2, pc, lsl r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe1cf1312) "       @ bic   r1, pc, r2, lsl r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe081f312) "       @ add   pc, r1, r2, lsl r3")
+
+       /* Data-processing with PC as a target and status registers updated */
        TEST_UNSUPPORTED("movs  pc, r1")
        TEST_UNSUPPORTED("movs  pc, r1, lsl r2")
        TEST_UNSUPPORTED("movs  pc, #0x10000")
@@ -187,14 +189,14 @@ void kprobe_arm_test_cases(void)
        TEST_BF_R ("add pc, pc, r",14,2f-1f-8,"")
        TEST_BF_R ("add pc, r",14,2f-1f-8,", pc")
        TEST_BF_R ("mov pc, r",0,2f,"")
-       TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"")
+       TEST_BF_R ("add pc, pc, r",14,(2f-1f-8)*2,", asr #1")
        TEST_BB(   "sub pc, pc, #1b-2b+8")
 #if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
        TEST_BB(   "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
 #endif
        TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
        TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
-       TEST_RR(   "add pc, pc, r",10,-2,", asl r",11,1,"")
+       TEST_R(    "add pc, pc, r",10,-2,", asl #1")
 #ifdef CONFIG_THUMB2_KERNEL
        TEST_ARM_TO_THUMB_INTERWORK_R("add      pc, pc, r",0,3f-1f-8+1,"")
        TEST_ARM_TO_THUMB_INTERWORK_R("sub      pc, r",0,3f+8+1,", #8")
@@ -216,6 +218,7 @@ void kprobe_arm_test_cases(void)
        TEST_BB_R("bx   r",7,2f,"")
        TEST_BF_R("bxeq r",14,2f,"")
 
+#if __LINUX_ARM_ARCH__ >= 5
        TEST_R("clz     r0, r",0, 0x0,"")
        TEST_R("clzeq   r7, r",14,0x1,"")
        TEST_R("clz     lr, r",7, 0xffffffff,"")
@@ -337,6 +340,7 @@ void kprobe_arm_test_cases(void)
        TEST_UNSUPPORTED(__inst_arm(0xe16f02e1) " @ smultt pc, r1, r2")
        TEST_UNSUPPORTED(__inst_arm(0xe16002ef) " @ smultt r0, pc, r2")
        TEST_UNSUPPORTED(__inst_arm(0xe1600fe1) " @ smultt r0, r1, pc")
+#endif
 
        TEST_GROUP("Multiply and multiply-accumulate")
 
@@ -559,6 +563,7 @@ void kprobe_arm_test_cases(void)
        TEST_UNSUPPORTED("ldrsht        r1, [r2], #48")
 #endif
 
+#if __LINUX_ARM_ARCH__ >= 5
        TEST_RPR(  "strd        r",0, VAL1,", [r",1, 48,", -r",2,24,"]")
        TEST_RPR(  "strccd      r",8, VAL2,", [r",13,0, ", r",12,48,"]")
        TEST_RPR(  "strd        r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
@@ -595,6 +600,7 @@ void kprobe_arm_test_cases(void)
        TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) "       @ ldrd r12, [pc, #48]!")
        TEST_UNSUPPORTED(__inst_arm(0xe0c9f3d0) "       @ ldrd pc, [r9], #48")
        TEST_UNSUPPORTED(__inst_arm(0xe0c9e3d0) "       @ ldrd lr, [r9], #48")
+#endif
 
        TEST_GROUP("Miscellaneous")
 
@@ -1227,7 +1233,9 @@ void kprobe_arm_test_cases(void)
        TEST_COPROCESSOR( "mrc"two"     0, 0, r0, cr0, cr0, 0")
 
        COPROCESSOR_INSTRUCTIONS_ST_LD("",e)
+#if __LINUX_ARM_ARCH__ >= 5
        COPROCESSOR_INSTRUCTIONS_MC_MR("",e)
+#endif
        TEST_UNSUPPORTED("svc   0")
        TEST_UNSUPPORTED("svc   0xffffff")
 
@@ -1287,7 +1295,9 @@ void kprobe_arm_test_cases(void)
        TEST(   "blx    __dummy_thumb_subroutine_odd")
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
+#if __LINUX_ARM_ARCH__ >= 5
        COPROCESSOR_INSTRUCTIONS_ST_LD("2",f)
+#endif
 #if __LINUX_ARM_ARCH__ >= 6
        COPROCESSOR_INSTRUCTIONS_MC_MR("2",f)
 #endif
index 3796399..08d7312 100644 (file)
@@ -225,6 +225,7 @@ static int pre_handler_called;
 static int post_handler_called;
 static int jprobe_func_called;
 static int kretprobe_handler_called;
+static int tests_failed;
 
 #define FUNC_ARG1 0x12345678
 #define FUNC_ARG2 0xabcdef
@@ -461,6 +462,13 @@ static int run_api_tests(long (*func)(long, long))
 
        pr_info("    jprobe\n");
        ret = test_jprobe(func);
+#if defined(CONFIG_THUMB2_KERNEL) && !defined(MODULE)
+       if (ret == -EINVAL) {
+               pr_err("FAIL: Known longtime bug with jprobe on Thumb kernels\n");
+               tests_failed = ret;
+               ret = 0;
+       }
+#endif
        if (ret < 0)
                return ret;
 
@@ -1671,6 +1679,8 @@ static int __init run_all_tests(void)
 #endif
 
 out:
+       if (ret == 0)
+               ret = tests_failed;
        if (ret == 0)
                pr_info("Finished kprobe tests OK\n");
        else
index 2037f72..1d37568 100644 (file)
@@ -1924,7 +1924,7 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
                                   struct perf_event *event)
 {
        int idx;
-       int bit;
+       int bit = -1;
        unsigned int prefix;
        unsigned int region;
        unsigned int code;
@@ -1953,7 +1953,7 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
        }
 
        idx = armv7pmu_get_event_idx(cpuc, event);
-       if (idx < 0 && krait_event)
+       if (idx < 0 && bit >= 0)
                clear_bit(bit, cpuc->used_mask);
 
        return idx;
index 51a13a0..8eaef81 100644 (file)
@@ -341,12 +341,12 @@ static const union decode_item arm_cccc_000x_table[] = {
        /* CMP (reg-shift reg)  cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */
        /* CMN (reg-shift reg)  cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */
        DECODE_EMULATEX (0x0f900090, 0x01100010, PROBES_DATA_PROCESSING_REG,
-                                                REGS(ANY, 0, NOPC, 0, ANY)),
+                                                REGS(NOPC, 0, NOPC, 0, NOPC)),
 
        /* MOV (reg-shift reg)  cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */
        /* MVN (reg-shift reg)  cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */
        DECODE_EMULATEX (0x0fa00090, 0x01a00010, PROBES_DATA_PROCESSING_REG,
-                                                REGS(0, ANY, NOPC, 0, ANY)),
+                                                REGS(0, NOPC, NOPC, 0, NOPC)),
 
        /* AND (reg-shift reg)  cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */
        /* EOR (reg-shift reg)  cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */
@@ -359,7 +359,7 @@ static const union decode_item arm_cccc_000x_table[] = {
        /* ORR (reg-shift reg)  cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */
        /* BIC (reg-shift reg)  cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */
        DECODE_EMULATEX (0x0e000090, 0x00000010, PROBES_DATA_PROCESSING_REG,
-                                                REGS(ANY, ANY, NOPC, 0, ANY)),
+                                                REGS(NOPC, NOPC, NOPC, 0, NOPC)),
 
        DECODE_END
 };
index 0dd3b79..0c27ed6 100644 (file)
@@ -908,7 +908,7 @@ enum ptrace_syscall_dir {
        PTRACE_SYSCALL_EXIT,
 };
 
-static int tracehook_report_syscall(struct pt_regs *regs,
+static void tracehook_report_syscall(struct pt_regs *regs,
                                    enum ptrace_syscall_dir dir)
 {
        unsigned long ip;
@@ -926,7 +926,6 @@ static int tracehook_report_syscall(struct pt_regs *regs,
                current_thread_info()->syscall = -1;
 
        regs->ARM_ip = ip;
-       return current_thread_info()->syscall;
 }
 
 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
@@ -938,7 +937,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
                return -1;
 
        if (test_thread_flag(TIF_SYSCALL_TRACE))
-               scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+               tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+
+       scno = current_thread_info()->syscall;
 
        if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
                trace_sys_enter(regs, scno);
index 9d85318..e35d880 100644 (file)
@@ -275,7 +275,7 @@ void store_cpu_topology(unsigned int cpuid)
                cpu_topology[cpuid].socket_id, mpidr);
 }
 
-static inline const int cpu_corepower_flags(void)
+static inline int cpu_corepower_flags(void)
 {
        return SD_SHARE_PKG_RESOURCES  | SD_SHARE_POWERDOMAIN;
 }
index 9bc6db1..41c8391 100644 (file)
@@ -1,10 +1,9 @@
-config ARCH_BCM
+menuconfig ARCH_BCM
        bool "Broadcom SoC Support" if ARCH_MULTI_V6_V7
        help
          This enables support for Broadcom ARM based SoC chips
 
-menu "Broadcom SoC Selection"
-       depends on ARCH_BCM
+if ARCH_BCM
 
 config ARCH_BCM_MOBILE
        bool "Broadcom Mobile SoC Support" if ARCH_MULTI_V7
@@ -88,4 +87,4 @@ config ARCH_BCM_5301X
          different SoC or with the older BCM47XX and BCM53XX based
          network SoC using a MIPS CPU, they are supported by arch/mips/bcm47xx
 
-endmenu
+endif
index 101e0f3..2631cfc 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_BERLIN
+menuconfig ARCH_BERLIN
        bool "Marvell Berlin SoCs" if ARCH_MULTI_V7
        select ARCH_REQUIRE_GPIOLIB
        select ARM_GIC
@@ -9,8 +9,6 @@ config ARCH_BERLIN
 
 if ARCH_BERLIN
 
-menu "Marvell Berlin SoC variants"
-
 config MACH_BERLIN_BG2
        bool "Marvell Armada 1500 (BG2)"
        select CACHE_L2X0
@@ -30,6 +28,4 @@ config MACH_BERLIN_BG2Q
        select HAVE_ARM_TWD if SMP
        select PINCTRL_BERLIN_BG2Q
 
-endmenu
-
 endif
index 66838f4..3c22a19 100644 (file)
@@ -1,12 +1,11 @@
-config ARCH_CNS3XXX
+menuconfig ARCH_CNS3XXX
        bool "Cavium Networks CNS3XXX family" if ARCH_MULTI_V6
        select ARM_GIC
        select PCI_DOMAINS if PCI
        help
          Support for Cavium Networks CNS3XXX platform.
 
-menu "CNS3XXX platform type"
-       depends on ARCH_CNS3XXX
+if ARCH_CNS3XXX
 
 config MACH_CNS3420VB
        bool "Support for CNS3420 Validation Board"
@@ -17,4 +16,4 @@ config MACH_CNS3420VB
          This is a platform with an on-board ARM11 MPCore and has support
          for USB, USB-OTG, MMC/SD/SDIO, SATA, PCI-E, etc.
 
-endmenu
+endif
index db18ef8..584e8d4 100644 (file)
@@ -39,7 +39,6 @@ config ARCH_DAVINCI_DA830
 config ARCH_DAVINCI_DA850
        bool "DA850/OMAP-L138/AM18x based system"
        select ARCH_DAVINCI_DA8XX
-       select ARCH_HAS_CPUFREQ
        select CP_INTC
 
 config ARCH_DAVINCI_DA8XX
index d58995c..8f9b66c 100644 (file)
@@ -7,10 +7,9 @@
 
 # Configuration options for the EXYNOS4
 
-config ARCH_EXYNOS
+menuconfig ARCH_EXYNOS
        bool "Samsung EXYNOS" if ARCH_MULTI_V7
        select ARCH_HAS_BANDGAP
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_HOLES_MEMORYMODEL
        select ARCH_REQUIRE_GPIOLIB
        select ARM_AMBA
@@ -30,8 +29,6 @@ config ARCH_EXYNOS
 
 if ARCH_EXYNOS
 
-menu "SAMSUNG EXYNOS SoCs Support"
-
 config ARCH_EXYNOS3
        bool "SAMSUNG EXYNOS3"
        select ARM_CPU_SUSPEND if PM
@@ -118,8 +115,6 @@ config SOC_EXYNOS5800
        default y
        depends on SOC_EXYNOS5420
 
-endmenu
-
 config EXYNOS5420_MCPM
        bool "Exynos5420 Multi-Cluster PM support"
        depends on MCPM && SOC_EXYNOS5420
index 16617bd..1ee9176 100644 (file)
@@ -118,6 +118,7 @@ extern void __iomem *sysram_ns_base_addr;
 extern void __iomem *sysram_base_addr;
 void exynos_init_io(void);
 void exynos_restart(enum reboot_mode mode, const char *cmd);
+void exynos_sysram_init(void);
 void exynos_cpuidle_init(void);
 void exynos_cpufreq_init(void);
 void exynos_init_late(void);
index 90aab4d..66c9b96 100644 (file)
@@ -173,10 +173,8 @@ static struct platform_device exynos_cpuidle = {
 
 void __init exynos_cpuidle_init(void)
 {
-       if (soc_is_exynos5440())
-               return;
-
-       platform_device_register(&exynos_cpuidle);
+       if (soc_is_exynos4210() || soc_is_exynos5250())
+               platform_device_register(&exynos_cpuidle);
 }
 
 void __init exynos_cpufreq_init(void)
@@ -184,6 +182,28 @@ void __init exynos_cpufreq_init(void)
        platform_device_register_simple("exynos-cpufreq", -1, NULL, 0);
 }
 
+void __iomem *sysram_base_addr;
+void __iomem *sysram_ns_base_addr;
+
+void __init exynos_sysram_init(void)
+{
+       struct device_node *node;
+
+       for_each_compatible_node(node, NULL, "samsung,exynos4210-sysram") {
+               if (!of_device_is_available(node))
+                       continue;
+               sysram_base_addr = of_iomap(node, 0);
+               break;
+       }
+
+       for_each_compatible_node(node, NULL, "samsung,exynos4210-sysram-ns") {
+               if (!of_device_is_available(node))
+                       continue;
+               sysram_ns_base_addr = of_iomap(node, 0);
+               break;
+       }
+}
+
 void __init exynos_init_late(void)
 {
        if (of_machine_is_compatible("samsung,exynos5440"))
@@ -198,7 +218,7 @@ static int __init exynos_fdt_map_chipid(unsigned long node, const char *uname,
                                        int depth, void *data)
 {
        struct map_desc iodesc;
-       __be32 *reg;
+       const __be32 *reg;
        int len;
 
        if (!of_flat_dt_is_compatible(node, "samsung,exynos4210-chipid") &&
@@ -271,6 +291,13 @@ static void __init exynos_dt_machine_init(void)
                }
        }
 
+       /*
+        * This is called from smp_prepare_cpus if we've built for SMP, but
+        * we still need to set it up for PM and firmware ops if not.
+        */
+       if (!IS_ENABLED(CONFIG_SMP))
+               exynos_sysram_init();
+
        exynos_cpuidle_init();
        exynos_cpufreq_init();
 
@@ -308,6 +335,15 @@ static void __init exynos_reserve(void)
 #endif
 }
 
+static void __init exynos_dt_fixup(void)
+{
+       /*
+        * Some versions of uboot pass garbage entries in the memory node,
+        * use the old CONFIG_ARM_NR_BANKS
+        */
+       of_fdt_limit_memory(8);
+}
+
 DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
        /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
        /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
@@ -321,4 +357,5 @@ DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
        .dt_compat      = exynos_dt_compat,
        .restart        = exynos_restart,
        .reserve        = exynos_reserve,
+       .dt_fixup       = exynos_dt_fixup,
 MACHINE_END
index eb91d23..e8797bb 100644 (file)
@@ -57,8 +57,13 @@ static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
 
        boot_reg = sysram_ns_base_addr + 0x1c;
 
-       if (!soc_is_exynos4212() && !soc_is_exynos3250())
-               boot_reg += 4*cpu;
+       /*
+        * Almost all Exynos-series of SoCs that run in secure mode don't need
+        * additional offset for every CPU, with Exynos4412 being the only
+        * exception.
+        */
+       if (soc_is_exynos4412())
+               boot_reg += 4 * cpu;
 
        __raw_writel(boot_addr, boot_reg);
        return 0;
index 69fa483..920a4ba 100644 (file)
@@ -40,21 +40,17 @@ static inline void cpu_leave_lowpower(void)
 
 static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
 {
+       u32 mpidr = cpu_logical_map(cpu);
+       u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+
        for (;;) {
 
-               /* make cpu1 to be turned off at next WFI command */
-               if (cpu == 1)
-                       exynos_cpu_power_down(cpu);
+               /* Turn the CPU off on next WFI instruction. */
+               exynos_cpu_power_down(core_id);
 
-               /*
-                * here's the WFI
-                */
-               asm(".word      0xe320f003\n"
-                   :
-                   :
-                   : "memory", "cc");
+               wfi();
 
-               if (pen_release == cpu_logical_map(cpu)) {
+               if (pen_release == core_id) {
                        /*
                         * OK, proper wakeup, we're done
                         */
index 0498d0b..ace0ed6 100644 (file)
@@ -25,7 +25,6 @@
 
 #define EXYNOS5420_CPUS_PER_CLUSTER    4
 #define EXYNOS5420_NR_CLUSTERS         2
-#define MCPM_BOOT_ADDR_OFFSET          0x1c
 
 /*
  * The common v7_exit_coherency_flush API could not be used because of the
@@ -343,11 +342,13 @@ static int __init exynos_mcpm_init(void)
        pr_info("Exynos MCPM support installed\n");
 
        /*
-        * Future entries into the kernel can now go
-        * through the cluster entry vectors.
+        * U-Boot SPL is hardcoded to jump to the start of ns_sram_base_addr
+        * as part of secondary_cpu_start().  Let's redirect it to the
+        * mcpm_entry_point().
         */
-       __raw_writel(virt_to_phys(mcpm_entry_point),
-                       ns_sram_base_addr + MCPM_BOOT_ADDR_OFFSET);
+       __raw_writel(0xe59f0000, ns_sram_base_addr);     /* ldr r0, [pc, #0] */
+       __raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx  r0 */
+       __raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 8);
 
        iounmap(ns_sram_base_addr);
 
index ec02422..50b9aad 100644 (file)
 
 extern void exynos4_secondary_startup(void);
 
-void __iomem *sysram_base_addr;
-void __iomem *sysram_ns_base_addr;
-
-static void __init exynos_smp_prepare_sysram(void)
-{
-       struct device_node *node;
-
-       for_each_compatible_node(node, NULL, "samsung,exynos4210-sysram") {
-               if (!of_device_is_available(node))
-                       continue;
-               sysram_base_addr = of_iomap(node, 0);
-               break;
-       }
-
-       for_each_compatible_node(node, NULL, "samsung,exynos4210-sysram-ns") {
-               if (!of_device_is_available(node))
-                       continue;
-               sysram_ns_base_addr = of_iomap(node, 0);
-               break;
-       }
-}
-
 static inline void __iomem *cpu_boot_reg_base(void)
 {
        if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1)
@@ -112,7 +90,8 @@ static void exynos_secondary_init(unsigned int cpu)
 static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
        unsigned long timeout;
-       unsigned long phys_cpu = cpu_logical_map(cpu);
+       u32 mpidr = cpu_logical_map(cpu);
+       u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
        int ret = -ENOSYS;
 
        /*
@@ -126,17 +105,18 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
         * the holding pen - release it, then wait for it to flag
         * that it has been released by resetting pen_release.
         *
-        * Note that "pen_release" is the hardware CPU ID, whereas
+        * Note that "pen_release" is the hardware CPU core ID, whereas
         * "cpu" is Linux's internal ID.
         */
-       write_pen_release(phys_cpu);
+       write_pen_release(core_id);
 
-       if (!exynos_cpu_power_state(cpu)) {
-               exynos_cpu_power_up(cpu);
+       if (!exynos_cpu_power_state(core_id)) {
+               exynos_cpu_power_up(core_id);
                timeout = 10;
 
                /* wait max 10 ms until cpu1 is on */
-               while (exynos_cpu_power_state(cpu) != S5P_CORE_LOCAL_PWR_EN) {
+               while (exynos_cpu_power_state(core_id)
+                      != S5P_CORE_LOCAL_PWR_EN) {
                        if (timeout-- == 0)
                                break;
 
@@ -167,20 +147,20 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
                 * Try to set boot address using firmware first
                 * and fall back to boot register if it fails.
                 */
-               ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr);
+               ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
                if (ret && ret != -ENOSYS)
                        goto fail;
                if (ret == -ENOSYS) {
-                       void __iomem *boot_reg = cpu_boot_reg(phys_cpu);
+                       void __iomem *boot_reg = cpu_boot_reg(core_id);
 
                        if (IS_ERR(boot_reg)) {
                                ret = PTR_ERR(boot_reg);
                                goto fail;
                        }
-                       __raw_writel(boot_addr, cpu_boot_reg(phys_cpu));
+                       __raw_writel(boot_addr, cpu_boot_reg(core_id));
                }
 
-               call_firmware_op(cpu_boot, phys_cpu);
+               call_firmware_op(cpu_boot, core_id);
 
                arch_send_wakeup_ipi_mask(cpumask_of(cpu));
 
@@ -234,11 +214,11 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
 {
        int i;
 
+       exynos_sysram_init();
+
        if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
                scu_enable(scu_base_addr());
 
-       exynos_smp_prepare_sysram();
-
        /*
         * Write the address of secondary startup into the
         * system-wide flags register. The boot monitor waits
@@ -249,22 +229,24 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
         * boot register if it fails.
         */
        for (i = 1; i < max_cpus; ++i) {
-               unsigned long phys_cpu;
                unsigned long boot_addr;
+               u32 mpidr;
+               u32 core_id;
                int ret;
 
-               phys_cpu = cpu_logical_map(i);
+               mpidr = cpu_logical_map(i);
+               core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
                boot_addr = virt_to_phys(exynos4_secondary_startup);
 
-               ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr);
+               ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
                if (ret && ret != -ENOSYS)
                        break;
                if (ret == -ENOSYS) {
-                       void __iomem *boot_reg = cpu_boot_reg(phys_cpu);
+                       void __iomem *boot_reg = cpu_boot_reg(core_id);
 
                        if (IS_ERR(boot_reg))
                                break;
-                       __raw_writel(boot_addr, cpu_boot_reg(phys_cpu));
+                       __raw_writel(boot_addr, cpu_boot_reg(core_id));
                }
        }
 }
index 87c0d34..202ca73 100644 (file)
@@ -300,7 +300,7 @@ static int exynos_pm_suspend(void)
        tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
        __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
 
-       if (!soc_is_exynos5250())
+       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
                exynos_cpu_save_register();
 
        return 0;
@@ -334,7 +334,7 @@ static void exynos_pm_resume(void)
        if (exynos_pm_central_resume())
                goto early_wakeup;
 
-       if (!soc_is_exynos5250())
+       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
                exynos_cpu_restore_register();
 
        /* For release retention */
@@ -353,7 +353,7 @@ static void exynos_pm_resume(void)
 
        s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
 
-       if (!soc_is_exynos5250())
+       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
                scu_enable(S5P_VA_SCU);
 
 early_wakeup:
@@ -440,15 +440,18 @@ static int exynos_cpu_pm_notifier(struct notifier_block *self,
        case CPU_PM_ENTER:
                if (cpu == 0) {
                        exynos_pm_central_suspend();
-                       exynos_cpu_save_register();
+                       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
+                               exynos_cpu_save_register();
                }
                break;
 
        case CPU_PM_EXIT:
                if (cpu == 0) {
-                       if (!soc_is_exynos5250())
+                       if (read_cpuid_part_number() ==
+                                       ARM_CPU_PART_CORTEX_A9) {
                                scu_enable(S5P_VA_SCU);
-                       exynos_cpu_restore_register();
+                               exynos_cpu_restore_register();
+                       }
                        exynos_pm_central_resume();
                }
                break;
index fe6570e..797cb13 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/pm_domain.h>
+#include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
@@ -24,6 +25,8 @@
 
 #include "regs-pmu.h"
 
+#define MAX_CLK_PER_DOMAIN     4
+
 /*
  * Exynos specific wrapper around the generic power domain
  */
@@ -32,6 +35,9 @@ struct exynos_pm_domain {
        char const *name;
        bool is_off;
        struct generic_pm_domain pd;
+       struct clk *oscclk;
+       struct clk *clk[MAX_CLK_PER_DOMAIN];
+       struct clk *pclk[MAX_CLK_PER_DOMAIN];
 };
 
 static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
@@ -44,6 +50,19 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
        pd = container_of(domain, struct exynos_pm_domain, pd);
        base = pd->base;
 
+       /* Set oscclk before powering off a domain*/
+       if (!power_on) {
+               int i;
+
+               for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+                       if (IS_ERR(pd->clk[i]))
+                               break;
+                       if (clk_set_parent(pd->clk[i], pd->oscclk))
+                               pr_err("%s: error setting oscclk as parent to clock %d\n",
+                                               pd->name, i);
+               }
+       }
+
        pwr = power_on ? S5P_INT_LOCAL_PWR_EN : 0;
        __raw_writel(pwr, base);
 
@@ -60,6 +79,20 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
                cpu_relax();
                usleep_range(80, 100);
        }
+
+       /* Restore clocks after powering on a domain*/
+       if (power_on) {
+               int i;
+
+               for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+                       if (IS_ERR(pd->clk[i]))
+                               break;
+                       if (clk_set_parent(pd->clk[i], pd->pclk[i]))
+                               pr_err("%s: error setting parent to clock%d\n",
+                                               pd->name, i);
+               }
+       }
+
        return 0;
 }
 
@@ -152,9 +185,11 @@ static __init int exynos4_pm_init_power_domain(void)
 
        for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
                struct exynos_pm_domain *pd;
-               int on;
+               int on, i;
+               struct device *dev;
 
                pdev = of_find_device_by_node(np);
+               dev = &pdev->dev;
 
                pd = kzalloc(sizeof(*pd), GFP_KERNEL);
                if (!pd) {
@@ -170,6 +205,30 @@ static __init int exynos4_pm_init_power_domain(void)
                pd->pd.power_on = exynos_pd_power_on;
                pd->pd.of_node = np;
 
+               pd->oscclk = clk_get(dev, "oscclk");
+               if (IS_ERR(pd->oscclk))
+                       goto no_clk;
+
+               for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+                       char clk_name[8];
+
+                       snprintf(clk_name, sizeof(clk_name), "clk%d", i);
+                       pd->clk[i] = clk_get(dev, clk_name);
+                       if (IS_ERR(pd->clk[i]))
+                               break;
+                       snprintf(clk_name, sizeof(clk_name), "pclk%d", i);
+                       pd->pclk[i] = clk_get(dev, clk_name);
+                       if (IS_ERR(pd->pclk[i])) {
+                               clk_put(pd->clk[i]);
+                               pd->clk[i] = ERR_PTR(-EINVAL);
+                               break;
+                       }
+               }
+
+               if (IS_ERR(pd->clk[0]))
+                       clk_put(pd->oscclk);
+
+no_clk:
                platform_set_drvdata(pdev, pd);
 
                on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
index 830b76e..a5960e2 100644 (file)
@@ -1,7 +1,6 @@
 config ARCH_HIGHBANK
        bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7
        select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_HOLES_MEMORYMODEL
        select ARCH_HAS_OPP
        select ARCH_SUPPORTS_BIG_ENDIAN
index 8d42eab..4b51857 100644 (file)
@@ -1,6 +1,5 @@
-config ARCH_MXC
+menuconfig ARCH_MXC
        bool "Freescale i.MX family" if ARCH_MULTI_V4_V5 || ARCH_MULTI_V6_V7
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
        select ARCH_REQUIRE_GPIOLIB
        select ARM_CPU_SUSPEND if PM
@@ -13,8 +12,7 @@ config ARCH_MXC
        help
          Support for Freescale MXC/iMX-based family of processors
 
-menu "Freescale i.MX support"
-       depends on ARCH_MXC
+if ARCH_MXC
 
 config MXC_TZIC
        bool
@@ -99,7 +97,6 @@ config SOC_IMX25
 
 config SOC_IMX27
        bool
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
        select CPU_ARM926T
        select IMX_HAVE_IOMUX_V1
@@ -124,7 +121,6 @@ config SOC_IMX35
 
 config SOC_IMX5
        bool
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
        select ARCH_MXC_IOMUX_V3
        select MXC_TZIC
@@ -738,9 +734,9 @@ config SOC_IMX6
        select HAVE_IMX_MMDC
        select HAVE_IMX_SRC
        select MFD_SYSCON
-       select PL310_ERRATA_588369 if CACHE_PL310
-       select PL310_ERRATA_727915 if CACHE_PL310
-       select PL310_ERRATA_769419 if CACHE_PL310
+       select PL310_ERRATA_588369 if CACHE_L2X0
+       select PL310_ERRATA_727915 if CACHE_L2X0
+       select PL310_ERRATA_769419 if CACHE_L2X0
 
 config SOC_IMX6Q
        bool "i.MX6 Quad/DualLite support"
@@ -775,9 +771,9 @@ config SOC_VF610
        select ARM_GIC
        select PINCTRL_VF610
        select VF_PIT_TIMER
-       select PL310_ERRATA_588369 if CACHE_PL310
-       select PL310_ERRATA_727915 if CACHE_PL310
-       select PL310_ERRATA_769419 if CACHE_PL310
+       select PL310_ERRATA_588369 if CACHE_L2X0
+       select PL310_ERRATA_727915 if CACHE_L2X0
+       select PL310_ERRATA_769419 if CACHE_L2X0
 
        help
          This enable support for Freescale Vybrid VF610 processor.
@@ -786,4 +782,4 @@ endif
 
 source "arch/arm/mach-imx/devices/Kconfig"
 
-endmenu
+endif
index 4ba587d..84acdfd 100644 (file)
@@ -67,8 +67,12 @@ static void clk_gate2_disable(struct clk_hw *hw)
 
        spin_lock_irqsave(gate->lock, flags);
 
-       if (gate->share_count && --(*gate->share_count) > 0)
-               goto out;
+       if (gate->share_count) {
+               if (WARN_ON(*gate->share_count == 0))
+                       goto out;
+               else if (--(*gate->share_count) > 0)
+                       goto out;
+       }
 
        reg = readl(gate->reg);
        reg &= ~(3 << gate->bit_idx);
@@ -78,19 +82,26 @@ out:
        spin_unlock_irqrestore(gate->lock, flags);
 }
 
-static int clk_gate2_is_enabled(struct clk_hw *hw)
+static int clk_gate2_reg_is_enabled(void __iomem *reg, u8 bit_idx)
 {
-       u32 reg;
-       struct clk_gate2 *gate = to_clk_gate2(hw);
+       u32 val = readl(reg);
 
-       reg = readl(gate->reg);
-
-       if (((reg >> gate->bit_idx) & 1) == 1)
+       if (((val >> bit_idx) & 1) == 1)
                return 1;
 
        return 0;
 }
 
+static int clk_gate2_is_enabled(struct clk_hw *hw)
+{
+       struct clk_gate2 *gate = to_clk_gate2(hw);
+
+       if (gate->share_count)
+               return !!(*gate->share_count);
+       else
+               return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx);
+}
+
 static struct clk_ops clk_gate2_ops = {
        .enable = clk_gate2_enable,
        .disable = clk_gate2_disable,
@@ -116,6 +127,10 @@ struct clk *clk_register_gate2(struct device *dev, const char *name,
        gate->bit_idx = bit_idx;
        gate->flags = clk_gate2_flags;
        gate->lock = lock;
+
+       /* Initialize share_count per hardware state */
+       if (share_count)
+               *share_count = clk_gate2_reg_is_enabled(reg, bit_idx) ? 1 : 0;
        gate->share_count = share_count;
 
        init.name = name;
index 8e795de..8556c78 100644 (file)
@@ -70,7 +70,7 @@ static const char *cko_sels[] = { "cko1", "cko2", };
 static const char *lvds_sels[] = {
        "dummy", "dummy", "dummy", "dummy", "dummy", "dummy",
        "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref",
-       "pcie_ref", "sata_ref",
+       "pcie_ref_125m", "sata_ref_100m",
 };
 
 enum mx6q_clks {
@@ -491,7 +491,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
 
        /* All existing boards with PCIe use LVDS1 */
        if (IS_ENABLED(CONFIG_PCI_IMX6))
-               clk_set_parent(clk[lvds1_sel], clk[sata_ref]);
+               clk_set_parent(clk[lvds1_sel], clk[sata_ref_100m]);
 
        /* Set initial power mode */
        imx6q_set_lpm(WAIT_CLOCKED);
index 21cf06c..5408ca7 100644 (file)
@@ -312,6 +312,7 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
        clks[IMX6SL_CLK_ECSPI2]       = imx_clk_gate2("ecspi2",       "ecspi_root",        base + 0x6c, 2);
        clks[IMX6SL_CLK_ECSPI3]       = imx_clk_gate2("ecspi3",       "ecspi_root",        base + 0x6c, 4);
        clks[IMX6SL_CLK_ECSPI4]       = imx_clk_gate2("ecspi4",       "ecspi_root",        base + 0x6c, 6);
+       clks[IMX6SL_CLK_ENET]         = imx_clk_gate2("enet",         "ipg",               base + 0x6c, 10);
        clks[IMX6SL_CLK_EPIT1]        = imx_clk_gate2("epit1",        "perclk",            base + 0x6c, 12);
        clks[IMX6SL_CLK_EPIT2]        = imx_clk_gate2("epit2",        "perclk",            base + 0x6c, 14);
        clks[IMX6SL_CLK_EXTERN_AUDIO] = imx_clk_gate2("extern_audio", "extern_audio_podf", base + 0x6c, 16);
index ba43321..64f8e25 100644 (file)
@@ -28,7 +28,7 @@ config ARCH_CINTEGRATOR
        bool
 
 config INTEGRATOR_IMPD1
-       tristate "Include support for Integrator/IM-PD1"
+       bool "Include support for Integrator/IM-PD1"
        depends on ARCH_INTEGRATOR_AP
        select ARCH_REQUIRE_GPIOLIB
        select ARM_VIC
index 0e870ea..3ce8807 100644 (file)
@@ -308,7 +308,12 @@ static struct impd1_device impd1_devs[] = {
  */
 #define IMPD1_VALID_IRQS 0x00000bffU
 
-static int __init impd1_probe(struct lm_device *dev)
+/*
+ * As this module is bool, it is OK to have this as __init_refok() - no
+ * probe calls will be done after the initial system bootup, as devices
+ * are discovered as part of the machine startup.
+ */
+static int __init_refok impd1_probe(struct lm_device *dev)
 {
        struct impd1_module *impd1;
        int irq_base;
@@ -397,6 +402,11 @@ static void impd1_remove(struct lm_device *dev)
 static struct lm_driver impd1_driver = {
        .drv = {
                .name   = "impd1",
+               /*
+                * As we're dropping the probe() function, suppress driver
+                * binding from sysfs.
+                */
+               .suppress_bind_attrs = true,
        },
        .probe          = impd1_probe,
        .remove         = impd1_remove,
index dd0cc67..660ca6f 100644 (file)
@@ -480,25 +480,18 @@ static const struct of_device_id ebi_match[] = {
 static void __init ap_init_of(void)
 {
        unsigned long sc_dec;
-       struct device_node *root;
        struct device_node *syscon;
        struct device_node *ebi;
        struct device *parent;
        struct soc_device *soc_dev;
        struct soc_device_attribute *soc_dev_attr;
        u32 ap_sc_id;
-       int err;
        int i;
 
-       /* Here we create an SoC device for the root node */
-       root = of_find_node_by_path("/");
-       if (!root)
-               return;
-
-       syscon = of_find_matching_node(root, ap_syscon_match);
+       syscon = of_find_matching_node(NULL, ap_syscon_match);
        if (!syscon)
                return;
-       ebi = of_find_matching_node(root, ebi_match);
+       ebi = of_find_matching_node(NULL, ebi_match);
        if (!ebi)
                return;
 
@@ -509,19 +502,17 @@ static void __init ap_init_of(void)
        if (!ebi_base)
                return;
 
+       of_platform_populate(NULL, of_default_bus_match_table,
+                       ap_auxdata_lookup, NULL);
+
        ap_sc_id = readl(ap_syscon_base);
 
        soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
        if (!soc_dev_attr)
                return;
 
-       err = of_property_read_string(root, "compatible",
-                                     &soc_dev_attr->soc_id);
-       if (err)
-               return;
-       err = of_property_read_string(root, "model", &soc_dev_attr->machine);
-       if (err)
-               return;
+       soc_dev_attr->soc_id = "XVC";
+       soc_dev_attr->machine = "Integrator/AP";
        soc_dev_attr->family = "Integrator";
        soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c",
                                           'A' + (ap_sc_id & 0x0f));
@@ -536,9 +527,6 @@ static void __init ap_init_of(void)
        parent = soc_device_to_device(soc_dev);
        integrator_init_sysfs(parent, ap_sc_id);
 
-       of_platform_populate(root, of_default_bus_match_table,
-                       ap_auxdata_lookup, parent);
-
        sc_dec = readl(ap_syscon_base + INTEGRATOR_SC_DEC_OFFSET);
        for (i = 0; i < 4; i++) {
                struct lm_device *lmdev;
index a938242..0e57f8f 100644 (file)
@@ -279,20 +279,13 @@ static const struct of_device_id intcp_syscon_match[] = {
 
 static void __init intcp_init_of(void)
 {
-       struct device_node *root;
        struct device_node *cpcon;
        struct device *parent;
        struct soc_device *soc_dev;
        struct soc_device_attribute *soc_dev_attr;
        u32 intcp_sc_id;
-       int err;
 
-       /* Here we create an SoC device for the root node */
-       root = of_find_node_by_path("/");
-       if (!root)
-               return;
-
-       cpcon = of_find_matching_node(root, intcp_syscon_match);
+       cpcon = of_find_matching_node(NULL, intcp_syscon_match);
        if (!cpcon)
                return;
 
@@ -300,19 +293,17 @@ static void __init intcp_init_of(void)
        if (!intcp_con_base)
                return;
 
+       of_platform_populate(NULL, of_default_bus_match_table,
+                            intcp_auxdata_lookup, NULL);
+
        intcp_sc_id = readl(intcp_con_base);
 
        soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
        if (!soc_dev_attr)
                return;
 
-       err = of_property_read_string(root, "compatible",
-                                     &soc_dev_attr->soc_id);
-       if (err)
-               return;
-       err = of_property_read_string(root, "model", &soc_dev_attr->machine);
-       if (err)
-               return;
+       soc_dev_attr->soc_id = "XCV";
+       soc_dev_attr->machine = "Integrator/CP";
        soc_dev_attr->family = "Integrator";
        soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c",
                                           'A' + (intcp_sc_id & 0x0f));
@@ -326,8 +317,6 @@ static void __init intcp_init_of(void)
 
        parent = soc_device_to_device(soc_dev);
        integrator_init_sysfs(parent, intcp_sc_id);
-       of_platform_populate(root, of_default_bus_match_table,
-                       intcp_auxdata_lookup, parent);
 }
 
 static const char * intcp_dt_board_compat[] = {
index f50bc93..98a156a 100644 (file)
@@ -1,6 +1,7 @@
 config ARCH_KEYSTONE
        bool "Texas Instruments Keystone Devices"
        depends on ARCH_MULTI_V7
+       depends on ARM_PATCH_PHYS_VIRT
        select ARM_GIC
        select HAVE_ARM_ARCH_TIMER
        select CLKSRC_MMIO
index 82a4ba8..f49328c 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_MOXART
+menuconfig ARCH_MOXART
        bool "MOXA ART SoC" if ARCH_MULTI_V4
        select CPU_FA526
        select ARM_DMA_MEM_BUFFERABLE
index 6090b9e..b9bc599 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_MVEBU
+menuconfig ARCH_MVEBU
        bool "Marvell Engineering Business Unit (MVEBU) SoCs" if (ARCH_MULTI_V7 || ARCH_MULTI_V5)
        select ARCH_SUPPORTS_BIG_ENDIAN
        select CLKSRC_MMIO
@@ -10,15 +10,15 @@ config ARCH_MVEBU
        select ZONE_DMA if ARM_LPAE
        select ARCH_REQUIRE_GPIOLIB
        select PCI_QUIRKS if PCI
+       select OF_ADDRESS_PCI
 
 if ARCH_MVEBU
 
-menu "Marvell EBU SoC variants"
-
 config MACH_MVEBU_V7
        bool
        select ARMADA_370_XP_TIMER
        select CACHE_L2X0
+       select ARM_CPU_SUSPEND
 
 config MACH_ARMADA_370
        bool "Marvell Armada 370 boards" if ARCH_MULTI_V7
@@ -84,7 +84,6 @@ config MACH_DOVE
 
 config MACH_KIRKWOOD
        bool "Marvell Kirkwood boards" if ARCH_MULTI_V5
-       select ARCH_HAS_CPUFREQ
        select ARCH_REQUIRE_GPIOLIB
        select CPU_FEROCEON
        select KIRKWOOD_CLK
@@ -97,6 +96,4 @@ config MACH_KIRKWOOD
          Say 'Y' here if you want your kernel to support boards based
          on the Marvell Kirkwood device tree.
 
-endmenu
-
 endif
index 2ecb828..1636cdb 100644 (file)
@@ -7,7 +7,7 @@ CFLAGS_pmsu.o                   := -march=armv7-a
 obj-y                           += system-controller.o mvebu-soc-id.o
 
 ifeq ($(CONFIG_MACH_MVEBU_V7),y)
-obj-y                           += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o
+obj-y                           += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o
 obj-$(CONFIG_SMP)               += platsmp.o headsmp.o platsmp-a9.o headsmp-a9.o
 obj-$(CONFIG_HOTPLUG_CPU)       += hotplug.o
 endif
index 8bb742f..b2524d6 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mbus.h>
 #include <linux/signal.h>
 #include <linux/slab.h>
+#include <linux/irqchip.h>
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -71,17 +72,23 @@ static int armada_375_external_abort_wa(unsigned long addr, unsigned int fsr,
        return 1;
 }
 
-static void __init mvebu_timer_and_clk_init(void)
+static void __init mvebu_init_irq(void)
 {
-       of_clk_init(NULL);
-       clocksource_of_init();
+       irqchip_init();
        mvebu_scu_enable();
        coherency_init();
        BUG_ON(mvebu_mbus_dt_init(coherency_available()));
+}
+
+static void __init external_abort_quirk(void)
+{
+       u32 dev, rev;
 
-       if (of_machine_is_compatible("marvell,armada375"))
-               hook_fault_code(16 + 6, armada_375_external_abort_wa, SIGBUS, 0,
-                               "imprecise external abort");
+       if (mvebu_get_soc_id(&dev, &rev) == 0 && rev > ARMADA_375_Z1_REV)
+               return;
+
+       hook_fault_code(16 + 6, armada_375_external_abort_wa, SIGBUS, 0,
+                       "imprecise external abort");
 }
 
 static void __init i2c_quirk(void)
@@ -169,8 +176,10 @@ static void __init mvebu_dt_init(void)
 {
        if (of_machine_is_compatible("plathome,openblocks-ax3-4"))
                i2c_quirk();
-       if (of_machine_is_compatible("marvell,a375-db"))
+       if (of_machine_is_compatible("marvell,a375-db")) {
+               external_abort_quirk();
                thermal_quirk();
+       }
 
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
@@ -185,7 +194,7 @@ DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)")
        .l2c_aux_mask   = ~0,
        .smp            = smp_ops(armada_xp_smp_ops),
        .init_machine   = mvebu_dt_init,
-       .init_time      = mvebu_timer_and_clk_init,
+       .init_irq       = mvebu_init_irq,
        .restart        = mvebu_restart,
        .dt_compat      = armada_370_xp_dt_compat,
 MACHINE_END
@@ -198,7 +207,7 @@ static const char * const armada_375_dt_compat[] = {
 DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)")
        .l2c_aux_val    = 0,
        .l2c_aux_mask   = ~0,
-       .init_time      = mvebu_timer_and_clk_init,
+       .init_irq       = mvebu_init_irq,
        .init_machine   = mvebu_dt_init,
        .restart        = mvebu_restart,
        .dt_compat      = armada_375_dt_compat,
@@ -213,7 +222,7 @@ static const char * const armada_38x_dt_compat[] = {
 DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)")
        .l2c_aux_val    = 0,
        .l2c_aux_mask   = ~0,
-       .init_time      = mvebu_timer_and_clk_init,
+       .init_irq       = mvebu_init_irq,
        .restart        = mvebu_restart,
        .dt_compat      = armada_38x_dt_compat,
 MACHINE_END
index 477202f..2bdc323 100644 (file)
@@ -292,6 +292,10 @@ static struct notifier_block mvebu_hwcc_nb = {
        .notifier_call = mvebu_hwcc_notifier,
 };
 
+static struct notifier_block mvebu_hwcc_pci_nb = {
+       .notifier_call = mvebu_hwcc_notifier,
+};
+
 static void __init armada_370_coherency_init(struct device_node *np)
 {
        struct resource res;
@@ -427,7 +431,7 @@ static int __init coherency_pci_init(void)
 {
        if (coherency_available())
                bus_register_notifier(&pci_bus_type,
-                                      &mvebu_hwcc_nb);
+                                      &mvebu_hwcc_pci_nb);
        return 0;
 }
 
index 5925366..da5bb29 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
+#include <asm/assembler.h>
+
        __CPUINIT
 #define CPU_RESUME_ADDR_REG 0xf10182d4
 
 .global armada_375_smp_cpu1_enable_code_end
 
 armada_375_smp_cpu1_enable_code_start:
-       ldr     r0, [pc, #4]
+ARM_BE8(setend be)
+       adr     r0, 1f
+       ldr     r0, [r0]
        ldr     r1, [r0]
+ARM_BE8(rev    r1, r1)
        mov     pc, r1
+1:
        .word   CPU_RESUME_ADDR_REG
 armada_375_smp_cpu1_enable_code_end:
 
 ENTRY(mvebu_cortex_a9_secondary_startup)
+ARM_BE8(setend be)
        bl      v7_invalidate_l1
        b       secondary_startup
 ENDPROC(mvebu_cortex_a9_secondary_startup)
index 53a55c8..25aa823 100644 (file)
@@ -66,6 +66,8 @@ static void __iomem *pmsu_mp_base;
 extern void ll_disable_coherency(void);
 extern void ll_enable_coherency(void);
 
+extern void armada_370_xp_cpu_resume(void);
+
 static struct platform_device armada_xp_cpuidle_device = {
        .name = "cpuidle-armada-370-xp",
 };
@@ -140,13 +142,6 @@ static void armada_370_xp_pmsu_enable_l2_powerdown_onidle(void)
        writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL);
 }
 
-static void armada_370_xp_cpu_resume(void)
-{
-       asm volatile("bl    ll_add_cpu_to_smp_group\n\t"
-                    "bl    ll_enable_coherency\n\t"
-                    "b     cpu_resume\n\t");
-}
-
 /* No locking is needed because we only access per-CPU registers */
 void armada_370_xp_pmsu_idle_prepare(bool deepidle)
 {
@@ -206,12 +201,12 @@ static noinline int do_armada_370_xp_cpu_suspend(unsigned long deepidle)
 
        /* Test the CR_C bit and set it if it was cleared */
        asm volatile(
-       "mrc    p15, 0, %0, c1, c0, 0 \n\t"
-       "tst    %0, #(1 << 2) \n\t"
-       "orreq  %0, %0, #(1 << 2) \n\t"
-       "mcreq  p15, 0, %0, c1, c0, 0 \n\t"
+       "mrc    p15, 0, r0, c1, c0, 0 \n\t"
+       "tst    r0, #(1 << 2) \n\t"
+       "orreq  r0, r0, #(1 << 2) \n\t"
+       "mcreq  p15, 0, r0, c1, c0, 0 \n\t"
        "isb    "
-       : : "r" (0));
+       : : : "r0");
 
        pr_warn("Failed to suspend the system\n");
 
diff --git a/arch/arm/mach-mvebu/pmsu_ll.S b/arch/arm/mach-mvebu/pmsu_ll.S
new file mode 100644 (file)
index 0000000..fc3de68
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2014 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * This is the entry point through which CPUs exiting cpuidle deep
+ * idle state are going.
+ */
+ENTRY(armada_370_xp_cpu_resume)
+ARM_BE8(setend be )                    @ go BE8 if entered LE
+       bl      ll_add_cpu_to_smp_group
+       bl      ll_enable_coherency
+       b       cpu_resume
+ENDPROC(armada_370_xp_cpu_resume)
+
index 486d301..3c61096 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_NOMADIK
+menuconfig ARCH_NOMADIK
        bool "ST-Ericsson Nomadik"
        depends on ARCH_MULTI_V5
        select ARCH_REQUIRE_GPIOLIB
@@ -15,7 +15,6 @@ config ARCH_NOMADIK
          Support for the Nomadik platform by ST-Ericsson
 
 if ARCH_NOMADIK
-menu "Nomadik boards"
 
 config MACH_NOMADIK_8815NHK
        bool "ST 8815 Nomadik Hardware Kit (evaluation board)"
@@ -24,7 +23,6 @@ config MACH_NOMADIK_8815NHK
        select I2C_ALGOBIT
        select I2C_NOMADIK
 
-endmenu
 endif
 
 config NOMADIK_8815
index 0ba4826..1c1ed73 100644 (file)
@@ -1,3 +1,6 @@
+menu "TI OMAP/AM/DM/DRA Family"
+       depends on ARCH_MULTI_V6 || ARCH_MULTI_V7
+
 config ARCH_OMAP
        bool
 
@@ -28,12 +31,11 @@ config ARCH_OMAP4
        select ARM_CPU_SUSPEND if PM
        select ARM_ERRATA_720789
        select ARM_GIC
-       select CACHE_L2X0
        select HAVE_ARM_SCU if SMP
        select HAVE_ARM_TWD if SMP
        select OMAP_INTERCONNECT
-       select PL310_ERRATA_588369
-       select PL310_ERRATA_727915
+       select PL310_ERRATA_588369 if CACHE_L2X0
+       select PL310_ERRATA_727915 if CACHE_L2X0
        select PM_OPP if PM
        select PM_RUNTIME if CPU_IDLE
        select ARM_ERRATA_754322
@@ -80,7 +82,6 @@ config SOC_DRA7XX
 config ARCH_OMAP2PLUS
        bool
        select ARCH_HAS_BANDGAP
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_HOLES_MEMORYMODEL
        select ARCH_OMAP
        select ARCH_REQUIRE_GPIOLIB
@@ -343,3 +344,5 @@ config OMAP4_ERRATA_I688
 endmenu
 
 endif
+
+endmenu
index 8421f38..8ca99e9 100644 (file)
@@ -110,14 +110,16 @@ obj-y                                     += prm_common.o cm_common.o
 obj-$(CONFIG_ARCH_OMAP2)               += prm2xxx_3xxx.o prm2xxx.o cm2xxx.o
 obj-$(CONFIG_ARCH_OMAP3)               += prm2xxx_3xxx.o prm3xxx.o cm3xxx.o
 obj-$(CONFIG_ARCH_OMAP3)               += vc3xxx_data.o vp3xxx_data.o
-obj-$(CONFIG_SOC_AM33XX)               += prm33xx.o cm33xx.o
 omap-prcm-4-5-common                   =  cminst44xx.o cm44xx.o prm44xx.o \
                                           prcm_mpu44xx.o prminst44xx.o \
                                           vc44xx_data.o vp44xx_data.o
 obj-$(CONFIG_ARCH_OMAP4)               += $(omap-prcm-4-5-common)
 obj-$(CONFIG_SOC_OMAP5)                        += $(omap-prcm-4-5-common)
 obj-$(CONFIG_SOC_DRA7XX)               += $(omap-prcm-4-5-common)
-obj-$(CONFIG_SOC_AM43XX)               += $(omap-prcm-4-5-common)
+am33xx-43xx-prcm-common                        += prm33xx.o cm33xx.o
+obj-$(CONFIG_SOC_AM33XX)               += $(am33xx-43xx-prcm-common)
+obj-$(CONFIG_SOC_AM43XX)               += $(omap-prcm-4-5-common) \
+                                          $(am33xx-43xx-prcm-common)
 
 # OMAP voltage domains
 voltagedomain-common                   := voltage.o vc.o vp.o
index 332af92..67fd26a 100644 (file)
@@ -76,7 +76,7 @@
  * (assuming that it is counting N upwards), or -2 if the enclosing loop
  * should skip to the next iteration (again assuming N is increasing).
  */
-static int _dpll_test_fint(struct clk_hw_omap *clk, u8 n)
+static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n)
 {
        struct dpll_data *dd;
        long fint, fint_min, fint_max;
index 04dab2f..ee6c784 100644 (file)
 #define OMAP3430_EN_WDT3_SHIFT                         12
 #define OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK           (1 << 0)
 #define OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT          0
+#define OMAP3430_IVA2_DPLL_FREQSEL_SHIFT               4
 #define OMAP3430_IVA2_DPLL_FREQSEL_MASK                        (0xf << 4)
 #define OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT         3
+#define OMAP3430_EN_IVA2_DPLL_SHIFT                    0
 #define OMAP3430_EN_IVA2_DPLL_MASK                     (0x7 << 0)
 #define OMAP3430_ST_IVA2_SHIFT                         0
 #define OMAP3430_ST_IVA2_CLK_MASK                      (1 << 0)
+#define OMAP3430_AUTO_IVA2_DPLL_SHIFT                  0
 #define OMAP3430_AUTO_IVA2_DPLL_MASK                   (0x7 << 0)
 #define OMAP3430_IVA2_CLK_SRC_SHIFT                    19
 #define OMAP3430_IVA2_CLK_SRC_WIDTH                    3
index 15a778c..bd24417 100644 (file)
@@ -380,7 +380,7 @@ void am33xx_cm_clkdm_disable_hwsup(u16 inst, u16 cdoffs);
 void am33xx_cm_clkdm_force_sleep(u16 inst, u16 cdoffs);
 void am33xx_cm_clkdm_force_wakeup(u16 inst, u16 cdoffs);
 
-#ifdef CONFIG_SOC_AM33XX
+#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
 extern int am33xx_cm_wait_module_idle(u16 inst, s16 cdoffs,
                                        u16 clkctrl_offs);
 extern void am33xx_cm_module_enable(u8 mode, u16 inst, s16 cdoffs,
index ff02973..dc571f1 100644 (file)
@@ -91,7 +91,14 @@ extern void omap3_sync32k_timer_init(void);
 extern void omap3_secure_sync32k_timer_init(void);
 extern void omap3_gptimer_timer_init(void);
 extern void omap4_local_timer_init(void);
+#ifdef CONFIG_CACHE_L2X0
 int omap_l2_cache_init(void);
+#else
+static inline int omap_l2_cache_init(void)
+{
+       return 0;
+}
+#endif
 extern void omap5_realtime_timer_init(void);
 
 void omap2420_init_early(void);
@@ -155,7 +162,8 @@ static inline void omap3xxx_restart(enum reboot_mode mode, const char *cmd)
 }
 #endif
 
-#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+       defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX)
 void omap44xx_restart(enum reboot_mode mode, const char *cmd);
 #else
 static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd)
@@ -241,7 +249,6 @@ static inline void __iomem *omap4_get_scu_base(void)
 }
 #endif
 
-extern void __init gic_init_irq(void);
 extern void gic_dist_disable(void);
 extern void gic_dist_enable(void);
 extern bool gic_dist_disabled(void);
index 592ba0a..b6f8f34 100644 (file)
@@ -297,33 +297,6 @@ static void omap_init_audio(void)
 static inline void omap_init_audio(void) {}
 #endif
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI) || \
-               defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI_MODULE)
-
-static struct platform_device omap_hdmi_audio = {
-       .name   = "omap-hdmi-audio",
-       .id     = -1,
-};
-
-static void __init omap_init_hdmi_audio(void)
-{
-       struct omap_hwmod *oh;
-       struct platform_device *pdev;
-
-       oh = omap_hwmod_lookup("dss_hdmi");
-       if (!oh)
-               return;
-
-       pdev = omap_device_build("omap-hdmi-audio-dai", -1, oh, NULL, 0);
-       WARN(IS_ERR(pdev),
-            "Can't build omap_device for omap-hdmi-audio-dai.\n");
-
-       platform_device_register(&omap_hdmi_audio);
-}
-#else
-static inline void omap_init_hdmi_audio(void) {}
-#endif
-
 #if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE)
 
 #include <linux/platform_data/spi-omap2-mcspi.h>
@@ -459,7 +432,6 @@ static int __init omap2_init_devices(void)
         */
        omap_init_audio();
        omap_init_camera();
-       omap_init_hdmi_audio();
        omap_init_mbox();
        /* If dtb is there, the devices will be created dynamically */
        if (!of_have_populated_dt()) {
index b8208b4..f7492df 100644 (file)
@@ -29,6 +29,7 @@
 #ifdef CONFIG_TIDSPBRIDGE_DVFS
 #include "omap-pm.h"
 #endif
+#include "soc.h"
 
 #include <linux/platform_data/dsp-omap.h>
 
@@ -59,6 +60,9 @@ void __init omap_dsp_reserve_sdram_memblock(void)
        phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
        phys_addr_t paddr;
 
+       if (!cpu_is_omap34xx())
+               return;
+
        if (!size)
                return;
 
@@ -83,6 +87,9 @@ static int __init omap_dsp_init(void)
        int err = -ENOMEM;
        struct omap_dsp_platform_data *pdata = &omap_dsp_pdata;
 
+       if (!cpu_is_omap34xx())
+               return 0;
+
        pdata->phys_mempool_base = omap_dsp_get_mempool_base();
 
        if (pdata->phys_mempool_base) {
@@ -115,6 +122,9 @@ module_init(omap_dsp_init);
 
 static void __exit omap_dsp_exit(void)
 {
+       if (!cpu_is_omap34xx())
+               return;
+
        platform_device_unregister(omap_dsp_pdev);
 }
 module_exit(omap_dsp_exit);
index 17cd393..93914d2 100644 (file)
@@ -50,6 +50,16 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
                 soc_is_omap54xx() || soc_is_dra7xx())
                return 1;
 
+       if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
+                ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
+               if (cpu_is_omap24xx())
+                       return 0;
+               else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
+                       return 0;
+               else
+                       return 1;
+       }
+
        /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
         * which require H/W based ECC error detection */
        if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
@@ -57,14 +67,6 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
                 (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
                return 0;
 
-       /*
-        * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1
-        * and AM33xx derivates. Other chips may be added if confirmed to work.
-        */
-       if ((ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW) &&
-           (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0)))
-               return 0;
-
        /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
        if (ecc_opt == OMAP_ECC_HAM1_CODE_HW)
                return 1;
index 2c0c281..8bc1338 100644 (file)
@@ -1615,7 +1615,7 @@ static int gpmc_probe_dt(struct platform_device *pdev)
                return ret;
        }
 
-       for_each_child_of_node(pdev->dev.of_node, child) {
+       for_each_available_child_of_node(pdev->dev.of_node, child) {
 
                if (!child->name)
                        continue;
index 43969da..d42022f 100644 (file)
@@ -649,6 +649,18 @@ void __init dra7xxx_check_revision(void)
                }
                break;
 
+       case 0xb9bc:
+               switch (rev) {
+               case 0:
+                       omap_revision = DRA722_REV_ES1_0;
+                       break;
+               default:
+                       /* If we have no new revisions */
+                       omap_revision = DRA722_REV_ES1_0;
+                       break;
+               }
+               break;
+
        default:
                /* Unknown default to latest silicon rev as default*/
                pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%d)\n",
index fd88ede..f62f753 100644 (file)
@@ -183,8 +183,10 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
                m0_entry = mux->muxnames[0];
 
                /* First check for full name in mode0.muxmode format */
-               if (mode0_len && strncmp(muxname, m0_entry, mode0_len))
-                       continue;
+               if (mode0_len)
+                       if (strncmp(muxname, m0_entry, mode0_len) ||
+                           (strlen(m0_entry) != mode0_len))
+                               continue;
 
                /* Then check for muxmode only */
                for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
index 326cd98..a0fe747 100644 (file)
@@ -102,26 +102,6 @@ void __init omap_barriers_init(void)
 {}
 #endif
 
-void __init gic_init_irq(void)
-{
-       void __iomem *omap_irq_base;
-
-       /* Static mapping, never released */
-       gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
-       BUG_ON(!gic_dist_base_addr);
-
-       twd_base = ioremap(OMAP44XX_LOCAL_TWD_BASE, SZ_4K);
-       BUG_ON(!twd_base);
-
-       /* Static mapping, never released */
-       omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
-       BUG_ON(!omap_irq_base);
-
-       omap_wakeupgen_init();
-
-       gic_init(0, 29, gic_dist_base_addr, omap_irq_base);
-}
-
 void gic_dist_disable(void)
 {
        if (gic_dist_base_addr)
@@ -188,6 +168,10 @@ static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
                smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
                break;
 
+       case L310_POWER_CTRL:
+               pr_info_once("OMAP L2C310: ROM does not support power control setting\n");
+               return;
+
        default:
                WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
                return;
index f7bb435..6c074f3 100644 (file)
@@ -4251,9 +4251,9 @@ void __init omap_hwmod_init(void)
                soc_ops.enable_module = _omap4_enable_module;
                soc_ops.disable_module = _omap4_disable_module;
                soc_ops.wait_target_ready = _omap4_wait_target_ready;
-               soc_ops.assert_hardreset = _omap4_assert_hardreset;
-               soc_ops.deassert_hardreset = _omap4_deassert_hardreset;
-               soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
+               soc_ops.assert_hardreset = _am33xx_assert_hardreset;
+               soc_ops.deassert_hardreset = _am33xx_deassert_hardreset;
+               soc_ops.is_hardreset_asserted = _am33xx_is_hardreset_asserted;
                soc_ops.init_clkdm = _init_clkdm;
        } else if (soc_is_am33xx()) {
                soc_ops.enable_module = _am33xx_enable_module;
index 290213f..1103aa0 100644 (file)
@@ -2020,6 +2020,77 @@ static struct omap_hwmod omap54xx_wd_timer2_hwmod = {
        },
 };
 
+/*
+ * 'ocp2scp' class
+ * bridge to transform ocp interface protocol to scp (serial control port)
+ * protocol
+ */
+/* ocp2scp3 */
+static struct omap_hwmod omap54xx_ocp2scp3_hwmod;
+/* l4_cfg -> ocp2scp3 */
+static struct omap_hwmod_ocp_if omap54xx_l4_cfg__ocp2scp3 = {
+       .master         = &omap54xx_l4_cfg_hwmod,
+       .slave          = &omap54xx_ocp2scp3_hwmod,
+       .clk            = "l4_root_clk_div",
+       .user           = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod omap54xx_ocp2scp3_hwmod = {
+       .name           = "ocp2scp3",
+       .class          = &omap54xx_ocp2scp_hwmod_class,
+       .clkdm_name     = "l3init_clkdm",
+       .prcm = {
+               .omap4 = {
+                       .clkctrl_offs = OMAP54XX_CM_L3INIT_OCP2SCP3_CLKCTRL_OFFSET,
+                       .context_offs = OMAP54XX_RM_L3INIT_OCP2SCP3_CONTEXT_OFFSET,
+                       .modulemode   = MODULEMODE_HWCTRL,
+               },
+       },
+};
+
+/*
+ * 'sata' class
+ * sata:  serial ata interface  gen2 compliant   ( 1 rx/ 1 tx)
+ */
+
+static struct omap_hwmod_class_sysconfig omap54xx_sata_sysc = {
+       .sysc_offs      = 0x0000,
+       .sysc_flags     = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+                          SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+                          MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
+       .sysc_fields    = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap54xx_sata_hwmod_class = {
+       .name   = "sata",
+       .sysc   = &omap54xx_sata_sysc,
+};
+
+/* sata */
+static struct omap_hwmod omap54xx_sata_hwmod = {
+       .name           = "sata",
+       .class          = &omap54xx_sata_hwmod_class,
+       .clkdm_name     = "l3init_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
+       .main_clk       = "func_48m_fclk",
+       .mpu_rt_idx     = 1,
+       .prcm = {
+               .omap4 = {
+                       .clkctrl_offs = OMAP54XX_CM_L3INIT_SATA_CLKCTRL_OFFSET,
+                       .context_offs = OMAP54XX_RM_L3INIT_SATA_CONTEXT_OFFSET,
+                       .modulemode   = MODULEMODE_SWCTRL,
+               },
+       },
+};
+
+/* l4_cfg -> sata */
+static struct omap_hwmod_ocp_if omap54xx_l4_cfg__sata = {
+       .master         = &omap54xx_l4_cfg_hwmod,
+       .slave          = &omap54xx_sata_hwmod,
+       .clk            = "l3_iclk_div",
+       .user           = OCP_USER_MPU | OCP_USER_SDMA,
+};
 
 /*
  * Interfaces
@@ -2765,6 +2836,8 @@ static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = {
        &omap54xx_l4_cfg__usb_tll_hs,
        &omap54xx_l4_cfg__usb_otg_ss,
        &omap54xx_l4_wkup__wd_timer2,
+       &omap54xx_l4_cfg__ocp2scp3,
+       &omap54xx_l4_cfg__sata,
        NULL,
 };
 
index 20b4398..284324f 100644 (file)
@@ -1268,9 +1268,6 @@ static struct omap_hwmod_class dra7xx_sata_hwmod_class = {
 };
 
 /* sata */
-static struct omap_hwmod_opt_clk sata_opt_clks[] = {
-       { .role = "ref_clk", .clk = "sata_ref_clk" },
-};
 
 static struct omap_hwmod dra7xx_sata_hwmod = {
        .name           = "sata",
@@ -1278,6 +1275,7 @@ static struct omap_hwmod dra7xx_sata_hwmod = {
        .clkdm_name     = "l3init_clkdm",
        .flags          = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
        .main_clk       = "func_48m_fclk",
+       .mpu_rt_idx     = 1,
        .prcm = {
                .omap4 = {
                        .clkctrl_offs = DRA7XX_CM_L3INIT_SATA_CLKCTRL_OFFSET,
@@ -1285,8 +1283,6 @@ static struct omap_hwmod dra7xx_sata_hwmod = {
                        .modulemode   = MODULEMODE_SWCTRL,
                },
        },
-       .opt_clks       = sata_opt_clks,
-       .opt_clks_cnt   = ARRAY_SIZE(sata_opt_clks),
 };
 
 /*
@@ -1731,8 +1727,20 @@ static struct omap_hwmod dra7xx_uart6_hwmod = {
  *
  */
 
+static struct omap_hwmod_class_sysconfig dra7xx_usb_otg_ss_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .sysc_flags     = (SYSC_HAS_DMADISABLE | SYSC_HAS_MIDLEMODE |
+                          SYSC_HAS_SIDLEMODE),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+                          SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+                          MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
+       .sysc_fields    = &omap_hwmod_sysc_type2,
+};
+
 static struct omap_hwmod_class dra7xx_usb_otg_ss_hwmod_class = {
        .name   = "usb_otg_ss",
+       .sysc   = &dra7xx_usb_otg_ss_sysc,
 };
 
 /* usb_otg_ss1 */
index 106132d..cbefbd7 100644 (file)
@@ -35,6 +35,8 @@
 #define OMAP3430_LOGICSTATEST_MASK                     (1 << 2)
 #define OMAP3430_LASTLOGICSTATEENTERED_MASK            (1 << 2)
 #define OMAP3430_LASTPOWERSTATEENTERED_MASK            (0x3 << 0)
+#define OMAP3430_GRPSEL_MCBSP5_MASK                    (1 << 10)
+#define OMAP3430_GRPSEL_MCBSP1_MASK                    (1 << 9)
 #define OMAP3630_GRPSEL_UART4_MASK                     (1 << 18)
 #define OMAP3430_GRPSEL_GPIO6_MASK                     (1 << 17)
 #define OMAP3430_GRPSEL_GPIO5_MASK                     (1 << 16)
 #define OMAP3430_GRPSEL_GPIO3_MASK                     (1 << 14)
 #define OMAP3430_GRPSEL_GPIO2_MASK                     (1 << 13)
 #define OMAP3430_GRPSEL_UART3_MASK                     (1 << 11)
+#define OMAP3430_GRPSEL_GPT8_MASK                      (1 << 9)
+#define OMAP3430_GRPSEL_GPT7_MASK                      (1 << 8)
+#define OMAP3430_GRPSEL_GPT6_MASK                      (1 << 7)
+#define OMAP3430_GRPSEL_GPT5_MASK                      (1 << 6)
 #define OMAP3430_GRPSEL_MCBSP4_MASK                    (1 << 2)
 #define OMAP3430_GRPSEL_MCBSP3_MASK                    (1 << 1)
 #define OMAP3430_GRPSEL_MCBSP2_MASK                    (1 << 0)
index de2a34c..01ca808 100644 (file)
@@ -462,6 +462,7 @@ IS_OMAP_TYPE(3430, 0x3430)
 #define DRA7XX_CLASS           0x07000000
 #define DRA752_REV_ES1_0       (DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8))
 #define DRA752_REV_ES1_1       (DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8))
+#define DRA722_REV_ES1_0       (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
 
 void omap2xxx_check_revision(void);
 void omap3xxx_check_revision(void);
index e4e505f..042f693 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_SIRF
+menuconfig ARCH_SIRF
        bool "CSR SiRF" if ARCH_MULTI_V7
        select ARCH_HAS_RESET_CONTROLLER
        select ARCH_REQUIRE_GPIOLIB
@@ -11,7 +11,7 @@ config ARCH_SIRF
 
 if ARCH_SIRF
 
-menu "CSR SiRF atlas6/primaII/Marco/Polo Specific Features"
+comment "CSR SiRF atlas6/primaII/Marco/Polo Specific Features"
 
 config ARCH_ATLAS6
        bool "CSR SiRFSoC ATLAS6 ARM Cortex A9 Platform"
@@ -37,8 +37,6 @@ config ARCH_MARCO
        help
           Support for CSR SiRFSoC ARM Cortex A9 Platform
 
-endmenu
-
 config SIRF_IRQ
        bool
 
index fd2b99d..ee5697b 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_QCOM
+menuconfig ARCH_QCOM
        bool "Qualcomm Support" if ARCH_MULTI_V7
        select ARCH_REQUIRE_GPIOLIB
        select ARM_GIC
@@ -11,8 +11,6 @@ config ARCH_QCOM
 
 if ARCH_QCOM
 
-menu "Qualcomm SoC Selection"
-
 config ARCH_MSM8X60
        bool "Enable support for MSM8X60"
        select CLKSRC_QCOM
@@ -25,8 +23,6 @@ config ARCH_MSM8974
        bool "Enable support for MSM8974"
        select HAVE_ARM_ARCH_TIMER
 
-endmenu
-
 config QCOM_SCM
        bool
 
index 1caee6d..e4564c2 100644 (file)
@@ -2,6 +2,7 @@ config ARCH_ROCKCHIP
        bool "Rockchip RK2928 and RK3xxx SOCs" if ARCH_MULTI_V7
        select PINCTRL
        select PINCTRL_ROCKCHIP
+       select ARCH_HAS_RESET_CONTROLLER
        select ARCH_REQUIRE_GPIOLIB
        select ARM_GIC
        select CACHE_L2X0
index 04284de..ad5316a 100644 (file)
@@ -117,7 +117,7 @@ config S3C24XX_SETUP_TS
          Compile in platform device definition for Samsung TouchScreen.
 
 config S3C24XX_DMA
-       bool "S3C2410 DMA support"
+       bool "S3C2410 DMA support (deprecated)"
        select S3C_DMA
        help
          S3C2410 DMA support. This is needed for drivers like sound which
index 3136d86..26ca242 100644 (file)
@@ -18,9 +18,9 @@ config CPU_S3C6410
          Enable S3C6410 CPU support
 
 config S3C64XX_PL080
-       bool "S3C64XX DMA using generic PL08x driver"
+       def_bool DMADEVICES
+       select ARM_AMBA
        select AMBA_PL08X
-       select SAMSUNG_DMADEV
 
 config S3C64XX_SETUP_SDHCI
        bool
index bb2111b..26003e2 100644 (file)
@@ -9,16 +9,18 @@ if ARCH_S5P64X0
 
 config CPU_S5P6440
        bool
+       select ARM_AMBA
+       select PL330_DMA if DMADEVICES
        select S5P_SLEEP if PM
-       select SAMSUNG_DMADEV
        select SAMSUNG_WAKEMASK if PM
        help
          Enable S5P6440 CPU support
 
 config CPU_S5P6450
        bool
+       select ARM_AMBA
+       select PL330_DMA if DMADEVICES
        select S5P_SLEEP if PM
-       select SAMSUNG_DMADEV
        select SAMSUNG_WAKEMASK if PM
        help
          Enable S5P6450 CPU support
index 15170be..c5e3a96 100644 (file)
@@ -9,8 +9,9 @@ if ARCH_S5PC100
 
 config CPU_S5PC100
        bool
+       select ARM_AMBA
+       select PL330_DMA if DMADEVICES
        select S5P_EXT_INT
-       select SAMSUNG_DMADEV
        help
          Enable S5PC100 CPU support
 
index 8c3abe5..f60f286 100644 (file)
@@ -11,10 +11,11 @@ if ARCH_S5PV210
 
 config CPU_S5PV210
        bool
+       select ARM_AMBA
+       select PL330_DMA if DMADEVICES
        select S5P_EXT_INT
        select S5P_PM if PM
        select S5P_SLEEP if PM
-       select SAMSUNG_DMADEV
        help
          Enable S5PV210 CPU support
 
index f9874ba..108939f 100644 (file)
@@ -329,6 +329,11 @@ static struct mtd_partition collie_partitions[] = {
                .name           = "rootfs",
                .offset         = MTDPART_OFS_APPEND,
                .size           = 0x00e20000,
+       }, {
+               .name           = "bootblock",
+               .offset         = MTDPART_OFS_APPEND,
+               .size           = 0x00020000,
+               .mask_flags     = MTD_WRITEABLE
        }
 };
 
@@ -356,7 +361,7 @@ static void collie_flash_exit(void)
 }
 
 static struct flash_platform_data collie_flash_data = {
-       .map_name       = "jedec_probe",
+       .map_name       = "cfi_probe",
        .init           = collie_flash_init,
        .set_vpp        = collie_set_vpp,
        .exit           = collie_flash_exit,
index dbd954e..7980730 100644 (file)
@@ -1,7 +1,7 @@
 config ARCH_SHMOBILE
        bool
 
-config ARCH_SHMOBILE_MULTI
+menuconfig ARCH_SHMOBILE_MULTI
        bool "Renesas ARM SoCs" if ARCH_MULTI_V7
        depends on MMU
        select ARCH_SHMOBILE
@@ -15,7 +15,7 @@ config ARCH_SHMOBILE_MULTI
 
 if ARCH_SHMOBILE_MULTI
 
-comment "Renesas ARM SoCs System Type"
+#comment "Renesas ARM SoCs System Type"
 
 config ARCH_EMEV2
        bool "Emma Mobile EV2"
@@ -85,7 +85,6 @@ config ARCH_R8A73A4
        select CPU_V7
        select SH_CLK_CPG
        select RENESAS_IRQC
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
        select SYS_SUPPORTS_SH_CMT
        select SYS_SUPPORTS_SH_TMU
@@ -264,7 +263,6 @@ config MACH_KOELSCH
 config MACH_KZM9G
        bool "KZM-A9-GT board"
        depends on ARCH_SH73A0
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
        select ARCH_REQUIRE_GPIOLIB
        select REGULATOR_FIXED_VOLTAGE if REGULATOR
index 0786249..90df202 100644 (file)
@@ -14,7 +14,6 @@ if PLAT_SPEAR
 config ARCH_SPEAR13XX
        bool "ST SPEAr13xx"
        depends on ARCH_MULTI_V7 || PLAT_SPEAR_SINGLE
-       select ARCH_HAS_CPUFREQ
        select ARM_GIC
        select GPIO_SPEAR_SPICS
        select HAVE_ARM_SCU if SMP
index abf9ee9..878e9ec 100644 (file)
@@ -1,5 +1,5 @@
 menuconfig ARCH_STI
-       bool "STMicroelectronics Consumer Electronics SOCs with Device Trees" if ARCH_MULTI_V7
+       bool "STMicroelectronics Consumer Electronics SOCs" if ARCH_MULTI_V7
        select ARM_GIC
        select ARM_GLOBAL_TIMER
        select PINCTRL
@@ -11,8 +11,8 @@ menuconfig ARCH_STI
        select ARM_ERRATA_754322
        select ARM_ERRATA_764369 if SMP
        select ARM_ERRATA_775420
-       select PL310_ERRATA_753970 if CACHE_PL310
-       select PL310_ERRATA_769419 if CACHE_PL310
+       select PL310_ERRATA_753970 if CACHE_L2X0
+       select PL310_ERRATA_769419 if CACHE_L2X0
        help
          Include support for STiH41x SOCs like STiH415/416 using the device tree
          for discovery
index 3f9587b..b608508 100644 (file)
 
 #include <linux/clk-provider.h>
 #include <linux/clocksource.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/reboot.h>
 
 #include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/system_misc.h>
+
+#define SUN4I_WATCHDOG_CTRL_REG                0x00
+#define SUN4I_WATCHDOG_CTRL_RESTART            BIT(0)
+#define SUN4I_WATCHDOG_MODE_REG                0x04
+#define SUN4I_WATCHDOG_MODE_ENABLE             BIT(0)
+#define SUN4I_WATCHDOG_MODE_RESET_ENABLE       BIT(1)
+
+#define SUN6I_WATCHDOG1_IRQ_REG                0x00
+#define SUN6I_WATCHDOG1_CTRL_REG       0x10
+#define SUN6I_WATCHDOG1_CTRL_RESTART           BIT(0)
+#define SUN6I_WATCHDOG1_CONFIG_REG     0x14
+#define SUN6I_WATCHDOG1_CONFIG_RESTART         BIT(0)
+#define SUN6I_WATCHDOG1_CONFIG_IRQ             BIT(1)
+#define SUN6I_WATCHDOG1_MODE_REG       0x18
+#define SUN6I_WATCHDOG1_MODE_ENABLE            BIT(0)
+
+static void __iomem *wdt_base;
+
+static void sun4i_restart(enum reboot_mode mode, const char *cmd)
+{
+       if (!wdt_base)
+               return;
+
+       /* Enable timer and set reset bit in the watchdog */
+       writel(SUN4I_WATCHDOG_MODE_ENABLE | SUN4I_WATCHDOG_MODE_RESET_ENABLE,
+              wdt_base + SUN4I_WATCHDOG_MODE_REG);
+
+       /*
+        * Restart the watchdog. The default (and lowest) interval
+        * value for the watchdog is 0.5s.
+        */
+       writel(SUN4I_WATCHDOG_CTRL_RESTART, wdt_base + SUN4I_WATCHDOG_CTRL_REG);
+
+       while (1) {
+               mdelay(5);
+               writel(SUN4I_WATCHDOG_MODE_ENABLE | SUN4I_WATCHDOG_MODE_RESET_ENABLE,
+                      wdt_base + SUN4I_WATCHDOG_MODE_REG);
+       }
+}
+
+static struct of_device_id sunxi_restart_ids[] = {
+       { .compatible = "allwinner,sun4i-a10-wdt" },
+       { /*sentinel*/ }
+};
+
+static void sunxi_setup_restart(void)
+{
+       struct device_node *np;
+
+       np = of_find_matching_node(NULL, sunxi_restart_ids);
+       if (WARN(!np, "unable to setup watchdog restart"))
+               return;
+
+       wdt_base = of_iomap(np, 0);
+       WARN(!wdt_base, "failed to map watchdog base address");
+}
+
+static void __init sunxi_dt_init(void)
+{
+       sunxi_setup_restart();
+
+       of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
 
 static const char * const sunxi_board_dt_compat[] = {
        "allwinner,sun4i-a10",
@@ -23,7 +96,9 @@ static const char * const sunxi_board_dt_compat[] = {
 };
 
 DT_MACHINE_START(SUNXI_DT, "Allwinner A1X (Device Tree)")
+       .init_machine   = sunxi_dt_init,
        .dt_compat      = sunxi_board_dt_compat,
+       .restart        = sun4i_restart,
 MACHINE_END
 
 static const char * const sun6i_board_dt_compat[] = {
@@ -51,5 +126,7 @@ static const char * const sun7i_board_dt_compat[] = {
 };
 
 DT_MACHINE_START(SUN7I_DT, "Allwinner sun7i (A20) Family")
+       .init_machine   = sunxi_dt_init,
        .dt_compat      = sun7i_board_dt_compat,
+       .restart        = sun4i_restart,
 MACHINE_END
index e16999e..0953996 100644 (file)
@@ -1,6 +1,5 @@
-config ARCH_TEGRA
+menuconfig ARCH_TEGRA
        bool "NVIDIA Tegra" if ARCH_MULTI_V7
-       select ARCH_HAS_CPUFREQ
        select ARCH_REQUIRE_GPIOLIB
        select ARCH_SUPPORTS_TRUSTED_FOUNDATIONS
        select ARM_GIC
@@ -16,8 +15,7 @@ config ARCH_TEGRA
        help
          This enables support for NVIDIA Tegra based systems.
 
-menu "NVIDIA Tegra options"
-       depends on ARCH_TEGRA
+if ARCH_TEGRA
 
 config ARCH_TEGRA_2x_SOC
        bool "Enable support for Tegra20 family"
@@ -69,4 +67,4 @@ config TEGRA_AHB
          which controls AHB bus master arbitration and some
          performance parameters(priority, prefech size).
 
-endmenu
+endif
index e3a96d7..bc51a71 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_U300
+menuconfig ARCH_U300
        bool "ST-Ericsson U300 Series" if ARCH_MULTI_V5
        depends on MMU
        select ARCH_REQUIRE_GPIOLIB
@@ -16,8 +16,6 @@ config ARCH_U300
 
 if ARCH_U300
 
-menu "ST-Ericsson AB U300/U335 Platform"
-
 config MACH_U300
        depends on ARCH_U300
        bool "U300"
@@ -43,6 +41,4 @@ config MACH_U300_SPIDUMMY
                you don't need it. Selecting this will activate the
                SPI framework and ARM PL022 support.
 
-endmenu
-
 endif
index b41a42d..699e860 100644 (file)
@@ -1,9 +1,8 @@
-config ARCH_U8500
+menuconfig ARCH_U8500
        bool "ST-Ericsson U8500 Series" if ARCH_MULTI_V7
        depends on MMU
        select AB8500_CORE
        select ABX500_CORE
-       select ARCH_HAS_CPUFREQ
        select ARCH_REQUIRE_GPIOLIB
        select ARM_AMBA
        select ARM_ERRATA_754322
@@ -16,7 +15,7 @@ config ARCH_U8500
        select PINCTRL
        select PINCTRL_ABX500
        select PINCTRL_NOMADIK
-       select PL310_ERRATA_753970 if CACHE_PL310
+       select PL310_ERRATA_753970 if CACHE_L2X0
        help
          Support for ST-Ericsson's Ux500 architecture
 
@@ -34,8 +33,6 @@ config UX500_SOC_DB8500
        select REGULATOR
        select REGULATOR_DB8500_PRCMU
 
-menu "Ux500 target platform (boards)"
-
 config MACH_MOP500
        bool "U8500 Development platform, MOP500 versions"
        select I2C
@@ -68,8 +65,6 @@ config UX500_AUTO_PLATFORM
          a working kernel. If everything else is disabled, this
          automatically enables MACH_MOP500.
 
-endmenu
-
 config UX500_DEBUG_UART
        int "Ux500 UART to use for low-level debug"
        default 2
index 90249cf..d8b9330 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_VEXPRESS
+menuconfig ARCH_VEXPRESS
        bool "ARM Ltd. Versatile Express family" if ARCH_MULTI_V7
        select ARCH_REQUIRE_GPIOLIB
        select ARCH_SUPPORTS_BIG_ENDIAN
@@ -37,14 +37,13 @@ config ARCH_VEXPRESS
          platforms. The traditional (ATAGs) boot method is not usable on
          these boards with this option.
 
-menu "Versatile Express platform type"
-       depends on ARCH_VEXPRESS
+if ARCH_VEXPRESS
 
 config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
        bool "Enable A5 and A9 only errata work-arounds"
        default y
        select ARM_ERRATA_720789
-       select PL310_ERRATA_753970 if CACHE_PL310
+       select PL310_ERRATA_753970 if CACHE_L2X0
        help
          Provides common dependencies for Versatile Express platforms
          based on Cortex-A5 and Cortex-A9 processors. In order to
@@ -65,7 +64,6 @@ config ARCH_VEXPRESS_DCSCB
 
 config ARCH_VEXPRESS_SPC
        bool "Versatile Express Serial Power Controller (SPC)"
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
        select PM_OPP
        help
@@ -83,4 +81,4 @@ config ARCH_VEXPRESS_TC2_PM
          Support for CPU and cluster power management on Versatile Express
          with a TC2 (A15x2 A7x3) big.LITTLE core tile.
 
-endmenu
+endif
index 08f56a4..aaaa24f 100644 (file)
@@ -1,6 +1,5 @@
 config ARCH_VT8500
        bool
-       select ARCH_HAS_CPUFREQ
        select ARCH_REQUIRE_GPIOLIB
        select CLKDEV_LOOKUP
        select VT8500_TIMER
index 573e0db..0c164f8 100644 (file)
@@ -1,6 +1,5 @@
 config ARCH_ZYNQ
        bool "Xilinx Zynq ARM Cortex A9 Platform" if ARCH_MULTI_V7
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
        select ARCH_SUPPORTS_BIG_ENDIAN
        select ARM_AMBA
index eda0dd0..c348eae 100644 (file)
@@ -889,9 +889,10 @@ config CACHE_L2X0
        help
          This option enables the L2x0 PrimeCell.
 
+if CACHE_L2X0
+
 config CACHE_PL310
        bool
-       depends on CACHE_L2X0
        default y if CPU_V7 && !(CPU_V6 || CPU_V6K)
        help
          This option enables optimisations for the PL310 cache
@@ -899,7 +900,6 @@ config CACHE_PL310
 
 config PL310_ERRATA_588369
        bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
-       depends on CACHE_L2X0
        help
           The PL310 L2 cache controller implements three types of Clean &
           Invalidate maintenance operations: by Physical Address
@@ -912,7 +912,6 @@ config PL310_ERRATA_588369
 
 config PL310_ERRATA_727915
        bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
-       depends on CACHE_L2X0
        help
          PL310 implements the Clean & Invalidate by Way L2 cache maintenance
          operation (offset 0x7FC). This operation runs in background so that
@@ -923,7 +922,6 @@ config PL310_ERRATA_727915
 
 config PL310_ERRATA_753970
        bool "PL310 errata: cache sync operation may be faulty"
-       depends on CACHE_PL310
        help
          This option enables the workaround for the 753970 PL310 (r3p0) erratum.
 
@@ -938,7 +936,6 @@ config PL310_ERRATA_753970
 
 config PL310_ERRATA_769419
        bool "PL310 errata: no automatic Store Buffer drain"
-       depends on CACHE_L2X0
        help
          On revisions of the PL310 prior to r3p2, the Store Buffer does
          not automatically drain. This can cause normal, non-cacheable
@@ -948,6 +945,8 @@ config PL310_ERRATA_769419
          on systems with an outer cache, the store buffer is drained
          explicitly.
 
+endif
+
 config CACHE_TAUROS2
        bool "Enable the Tauros2 L2 cache controller"
        depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4)
index efc5cab..7c3fb41 100644 (file)
@@ -664,7 +664,7 @@ static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, v
 
 static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
 {
-       unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
+       unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
        bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
 
        if (rev >= L310_CACHE_ID_RTL_R2P0) {
@@ -1068,6 +1068,33 @@ static const struct l2c_init_data of_l2c310_data __initconst = {
        },
 };
 
+/*
+ * This is a variant of the of_l2c310_data with .sync set to
+ * NULL. Outer sync operations are not needed when the system is I/O
+ * coherent, and potentially harmful in certain situations (PCIe/PL310
+ * deadlock on Armada 375/38x due to hardware I/O coherency). The
+ * other operations are kept because they are infrequent (therefore do
+ * not cause the deadlock in practice) and needed for secondary CPU
+ * boot and other power management activities.
+ */
+static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
+       .type = "L2C-310 Coherent",
+       .way_size_0 = SZ_8K,
+       .num_lock = 8,
+       .of_parse = l2c310_of_parse,
+       .enable = l2c310_enable,
+       .fixup = l2c310_fixup,
+       .save  = l2c310_save,
+       .outer_cache = {
+               .inv_range   = l2c210_inv_range,
+               .clean_range = l2c210_clean_range,
+               .flush_range = l2c210_flush_range,
+               .flush_all   = l2c210_flush_all,
+               .disable     = l2c310_disable,
+               .resume      = l2c310_resume,
+       },
+};
+
 /*
  * Note that the end addresses passed to Linux primitives are
  * noninclusive, while the hardware cache range operations use
@@ -1487,6 +1514,10 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
 
        data = of_match_node(l2x0_ids, np)->data;
 
+       if (of_device_is_compatible(np, "arm,pl310-cache") &&
+           of_property_read_bool(np, "arm,io-coherent"))
+               data = &of_l2c310_coherent_data;
+
        old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
        if (old_aux != ((old_aux & aux_mask) | aux_val)) {
                pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
index 4c88935..1f88db0 100644 (file)
@@ -461,12 +461,21 @@ void __init dma_contiguous_remap(void)
                map.type = MT_MEMORY_DMA_READY;
 
                /*
-                * Clear previous low-memory mapping
+                * Clear previous low-memory mapping to ensure that the
+                * TLB does not see any conflicting entries, then flush
+                * the TLB of the old entries before creating new mappings.
+                *
+                * This ensures that any speculatively loaded TLB entries
+                * (even though they may be rare) can not cause any problems,
+                * and ensures that this code is architecturally compliant.
                 */
                for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
                     addr += PMD_SIZE)
                        pmd_clear(pmd_off_k(addr));
 
+               flush_tlb_kernel_range(__phys_to_virt(start),
+                                      __phys_to_virt(end));
+
                iotable_init(&map, 1);
        }
 }
index 8e0e52e..c447ec7 100644 (file)
@@ -9,6 +9,11 @@
 #include <asm/sections.h>
 #include <asm/system_info.h>
 
+/*
+ * Note: accesses outside of the kernel image and the identity map area
+ * are not supported on any CPU using the idmap tables as its current
+ * page tables.
+ */
 pgd_t *idmap_pgd;
 phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
 
@@ -25,6 +30,13 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
                        pr_warning("Failed to allocate identity pmd.\n");
                        return;
                }
+               /*
+                * Copy the original PMD to ensure that the PMD entries for
+                * the kernel image are preserved.
+                */
+               if (!pud_none(*pud))
+                       memcpy(pmd, pmd_offset(pud, 0),
+                              PTRS_PER_PMD * sizeof(pmd_t));
                pud_populate(&init_mm, pud, pmd);
                pmd += pmd_index(addr);
        } else
index ab14b79..6e3ba8d 100644 (file)
@@ -1406,8 +1406,8 @@ void __init early_paging_init(const struct machine_desc *mdesc,
                return;
 
        /* remap kernel code and data */
-       map_start = init_mm.start_code;
-       map_end   = init_mm.brk;
+       map_start = init_mm.start_code & PMD_MASK;
+       map_end   = ALIGN(init_mm.brk, PMD_SIZE);
 
        /* get a handle on things... */
        pgd0 = pgd_offset_k(0);
@@ -1442,7 +1442,7 @@ void __init early_paging_init(const struct machine_desc *mdesc,
        }
 
        /* remap pmds for kernel mapping */
-       phys = __pa(map_start) & PMD_MASK;
+       phys = __pa(map_start);
        do {
                *pmdk++ = __pmd(phys | pmdprot);
                phys += PMD_SIZE;
index da1874f..a014dfa 100644 (file)
@@ -300,6 +300,7 @@ void __init sanity_check_meminfo(void)
        sanity_check_meminfo_mpu();
        end = memblock_end_of_DRAM();
        high_memory = __va(end - 1) + 1;
+       memblock_set_current_limit(end);
 }
 
 /*
index 97448c3..ba0d58e 100644 (file)
@@ -502,6 +502,7 @@ __\name\()_proc_info:
        .long   \cpu_val
        .long   \cpu_mask
        .long   PMD_TYPE_SECT | \
+               PMD_SECT_CACHEABLE | \
                PMD_BIT4 | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
index 243dfcb..301b892 100644 (file)
@@ -35,27 +35,15 @@ config SAMSUNG_PM
          Base platform power management code for samsung code
 
 if PLAT_SAMSUNG
+menu "Samsung Common options"
 
 # boot configurations
 
 comment "Boot options"
 
-config S3C_BOOT_ERROR_RESET
-       bool "S3C Reboot on decompression error"
-       help
-         Say y here to use the watchdog to reset the system if the
-         kernel decompressor detects an error during decompression.
-
-config S3C_BOOT_UART_FORCE_FIFO
-       bool "Force UART FIFO on during boot process"
-       default y
-       help
-         Say Y here to force the UART FIFOs on during the kernel
-        uncompressor
-
-
 config S3C_LOWLEVEL_UART_PORT
        int "S3C UART to use for low-level messages"
+       depends on ARCH_S3C64XX
        default 0
        help
          Choice of which UART port to use for the low-level messages,
@@ -407,17 +395,16 @@ config SAMSUNG_PM_GPIO
          Include legacy GPIO power management code for platforms not using
          pinctrl-samsung driver.
 
-endif
-
 config SAMSUNG_DMADEV
-       bool
-       select ARM_AMBA
+       bool "Use legacy Samsung DMA abstraction"
+       depends on CPU_S5PV210 || CPU_S5PC100 || ARCH_S5P64X0 || ARCH_S3C64XX
        select DMADEVICES
-       select PL330_DMA if (ARCH_EXYNOS5 || ARCH_EXYNOS4 || CPU_S5PV210 || CPU_S5PC100 || \
-                                       CPU_S5P6450 || CPU_S5P6440)
+       default y
        help
          Use DMA device engine for PL330 DMAC.
 
+endif
+
 config S5P_DEV_MFC
        bool
        help
@@ -503,4 +490,5 @@ config DEBUG_S3C_UART
        default "2" if DEBUG_S3C_UART2
        default "3" if DEBUG_S3C_UART3
 
+endmenu
 endif
index 859a9bb..91cf08b 100644 (file)
@@ -51,3 +51,8 @@ int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
 {
        return -ENOSYS;
 }
+
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
+{
+       return 0;
+}
index 7295419..f3b584b 100644 (file)
@@ -1,14 +1,17 @@
 config ARM64
        def_bool y
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-       select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+       select ARCH_USE_CMPXCHG_LOCKREF
+       select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
        select ARCH_WANT_FRAME_POINTERS
        select ARM_AMBA
        select ARM_ARCH_TIMER
        select ARM_GIC
+       select AUDIT_ARCH_COMPAT_GENERIC
+       select ARM_GIC_V3
        select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS
        select COMMON_CLK
@@ -27,10 +30,12 @@ config ARM64
        select GENERIC_STRNLEN_USER
        select GENERIC_TIME_VSYSCALL
        select HARDIRQS_SW_RESEND
+       select HAVE_ARCH_AUDITSYSCALL
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_TRACEHOOK
        select HAVE_C_RECORDMCOUNT
+       select HAVE_CC_STACKPROTECTOR
        select HAVE_DEBUG_BUGVERBOSE
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_API_DEBUG
@@ -61,6 +66,7 @@ config ARM64
        select RTC_LIB
        select SPARSE_IRQ
        select SYSCTL_EXCEPTION_TRACE
+       select HAVE_CONTEXT_TRACKING
        help
          ARM 64-bit (AArch64) Linux support.
 
@@ -153,14 +159,63 @@ endmenu
 
 menu "Kernel Features"
 
+choice
+       prompt "Page size"
+       default ARM64_4K_PAGES
+       help
+         Page size (translation granule) configuration.
+
+config ARM64_4K_PAGES
+       bool "4KB"
+       help
+         This feature enables 4KB pages support.
+
 config ARM64_64K_PAGES
-       bool "Enable 64KB pages support"
+       bool "64KB"
        help
          This feature enables 64KB pages support (4KB by default)
          allowing only two levels of page tables and faster TLB
          look-up. AArch32 emulation is not available when this feature
          is enabled.
 
+endchoice
+
+choice
+       prompt "Virtual address space size"
+       default ARM64_VA_BITS_39 if ARM64_4K_PAGES
+       default ARM64_VA_BITS_42 if ARM64_64K_PAGES
+       help
+         Allows choosing one of multiple possible virtual address
+         space sizes. The level of translation table is determined by
+         a combination of page size and virtual address space size.
+
+config ARM64_VA_BITS_39
+       bool "39-bit"
+       depends on ARM64_4K_PAGES
+
+config ARM64_VA_BITS_42
+       bool "42-bit"
+       depends on ARM64_64K_PAGES
+
+config ARM64_VA_BITS_48
+       bool "48-bit"
+       depends on BROKEN
+
+endchoice
+
+config ARM64_VA_BITS
+       int
+       default 39 if ARM64_VA_BITS_39
+       default 42 if ARM64_VA_BITS_42
+       default 48 if ARM64_VA_BITS_48
+
+config ARM64_PGTABLE_LEVELS
+       int
+       default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42
+       default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48
+       default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39
+       default 4 if ARM64_4K_PAGES && ARM64_VA_BITS_48
+
 config CPU_BIG_ENDIAN
        bool "Build big-endian kernel"
        help
index 1c1b756..4ee8e90 100644 (file)
@@ -28,4 +28,19 @@ config PID_IN_CONTEXTIDR
          instructions during context switch. Say Y here only if you are
          planning to use hardware trace tools with this kernel.
 
+config ARM64_RANDOMIZE_TEXT_OFFSET
+       bool "Randomize TEXT_OFFSET at build time"
+       help
+         Say Y here if you want the image load offset (AKA TEXT_OFFSET)
+         of the kernel to be randomized at build-time. When selected,
+         this option will cause TEXT_OFFSET to be randomized upon any
+         build of the kernel, and the offset will be reflected in the
+         text_offset field of the resulting Image. This can be used to
+         fuzz-test bootloaders which respect text_offset.
+
+         This option is intended for bootloader and/or kernel testing
+         only. Bootloaders must make no assumptions regarding the value
+         of TEXT_OFFSET and platforms must not require a specific
+         value.
+
 endmenu
index 8185a91..e8d025c 100644 (file)
@@ -38,7 +38,11 @@ CHECKFLAGS   += -D__aarch64__
 head-y         := arch/arm64/kernel/head.o
 
 # The byte offset of the kernel image in RAM from the start of RAM.
+ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
+TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%04x0\n", int(65535 * rand())}')
+else
 TEXT_OFFSET := 0x00080000
+endif
 
 export TEXT_OFFSET GZFLAGS
 
index 1247ca1..6541962 100644 (file)
@@ -24,3 +24,7 @@
                reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
        };
 };
+
+&serial0 {
+       status = "ok";
+};
index c5f0a47..40aa96c 100644 (file)
                };
 
                serial0: serial@1c020000 {
+                       status = "disabled";
                        device_type = "serial";
-                       compatible = "ns16550";
+                       compatible = "ns16550a";
                        reg = <0 0x1c020000 0x0 0x1000>;
                        reg-shift = <2>;
                        clock-frequency = <10000000>; /* Updated by bootloader */
                        interrupts = <0x0 0x4c 0x4>;
                };
 
+               serial1: serial@1c021000 {
+                       status = "disabled";
+                       device_type = "serial";
+                       compatible = "ns16550a";
+                       reg = <0 0x1c021000 0x0 0x1000>;
+                       reg-shift = <2>;
+                       clock-frequency = <10000000>; /* Updated by bootloader */
+                       interrupt-parent = <&gic>;
+                       interrupts = <0x0 0x4d 0x4>;
+               };
+
+               serial2: serial@1c022000 {
+                       status = "disabled";
+                       device_type = "serial";
+                       compatible = "ns16550a";
+                       reg = <0 0x1c022000 0x0 0x1000>;
+                       reg-shift = <2>;
+                       clock-frequency = <10000000>; /* Updated by bootloader */
+                       interrupt-parent = <&gic>;
+                       interrupts = <0x0 0x4e 0x4>;
+               };
+
+               serial3: serial@1c023000 {
+                       status = "disabled";
+                       device_type = "serial";
+                       compatible = "ns16550a";
+                       reg = <0 0x1c023000 0x0 0x1000>;
+                       reg-shift = <2>;
+                       clock-frequency = <10000000>; /* Updated by bootloader */
+                       interrupt-parent = <&gic>;
+                       interrupts = <0x0 0x4f 0x4>;
+               };
+
                phy1: phy@1f21a000 {
                        compatible = "apm,xgene-phy";
                        reg = <0x0 0x1f21a000 0x0 0x100>;
index 157e1d8..1e52b74 100644 (file)
@@ -6,9 +6,18 @@ CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_MEMCG_KMEM=y
+CONFIG_CGROUP_HUGETLB=y
 # CONFIG_UTS_NS is not set
 # CONFIG_IPC_NS is not set
 # CONFIG_PID_NS is not set
@@ -27,6 +36,7 @@ CONFIG_ARCH_VEXPRESS=y
 CONFIG_ARCH_XGENE=y
 CONFIG_SMP=y
 CONFIG_PREEMPT=y
+CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CMA=y
 CONFIG_CMDLINE="console=ttyAMA0"
@@ -42,9 +52,13 @@ CONFIG_IP_PNP_BOOTP=y
 # CONFIG_INET_LRO is not set
 # CONFIG_IPV6 is not set
 # CONFIG_WIRELESS is not set
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
 CONFIG_DMA_CMA=y
+CONFIG_BLK_DEV_LOOP=y
 CONFIG_VIRTIO_BLK=y
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=y
@@ -53,6 +67,8 @@ CONFIG_ATA=y
 CONFIG_PATA_PLATFORM=y
 CONFIG_PATA_OF_PLATFORM=y
 CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_VIRTIO_NET=y
 CONFIG_SMC91X=y
 CONFIG_SMSC911X=y
 # CONFIG_WLAN is not set
@@ -64,6 +80,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
 CONFIG_SERIAL_AMBA_PL011=y
 CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_VIRTIO_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 CONFIG_REGULATOR=y
@@ -78,6 +95,7 @@ CONFIG_USB_ISP1760_HCD=y
 CONFIG_USB_STORAGE=y
 CONFIG_MMC=y
 CONFIG_MMC_ARMMMCI=y
+CONFIG_VIRTIO_BALLOON=y
 CONFIG_VIRTIO_MMIO=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXT2_FS=y
@@ -85,6 +103,8 @@ CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
 CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=y
 CONFIG_VFAT_FS=y
@@ -93,6 +113,7 @@ CONFIG_HUGETLBFS=y
 # CONFIG_MISC_FILESYSTEMS is not set
 CONFIG_NFS_FS=y
 CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
 CONFIG_NLS_CODEPAGE_437=y
 CONFIG_NLS_ISO8859_1=y
 CONFIG_VIRTUALIZATION=y
@@ -104,6 +125,7 @@ CONFIG_DEBUG_KERNEL=y
 CONFIG_LOCKUP_DETECTOR=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_FTRACE is not set
+CONFIG_SECURITY=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
index 2070a56..a3f935f 100644 (file)
@@ -35,4 +35,4 @@ AFLAGS_aes-neon.o     := -DINTERLEAVE=4
 CFLAGS_aes-glue-ce.o   := -DUSE_V8_CRYPTO_EXTENSIONS
 
 $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
-       $(call if_changed_dep,cc_o_c)
+       $(call if_changed_rule,cc_o_c)
index 60f2f4c..79cd911 100644 (file)
@@ -106,7 +106,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
        for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
                aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_enc, rounds, blocks, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -128,7 +128,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
        for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
                aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_dec, rounds, blocks, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -151,7 +151,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
                                first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -174,7 +174,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_dec, rounds, blocks, walk.iv,
                                first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -243,7 +243,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key1.key_enc, rounds, blocks,
                                (u8 *)ctx->key2.key_enc, walk.iv, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
 
@@ -267,7 +267,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key1.key_dec, rounds, blocks,
                                (u8 *)ctx->key2.key_enc, walk.iv, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
 
index b9e6eaf..dc45701 100644 (file)
@@ -3,14 +3,6 @@
  *
  * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
  *
- * Based on arch/x86/crypto/ghash-pmullni-intel_asm.S
- *
- * Copyright (c) 2009 Intel Corp.
- *   Author: Huang Ying <ying.huang@intel.com>
- *           Vinodh Gopal
- *           Erdinc Ozturk
- *           Deniz Karakoyunlu
- *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
  * by the Free Software Foundation.
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 
-       DATA    .req    v0
-       SHASH   .req    v1
-       IN1     .req    v2
+       SHASH   .req    v0
+       SHASH2  .req    v1
        T1      .req    v2
        T2      .req    v3
-       T3      .req    v4
-       VZR     .req    v5
+       MASK    .req    v4
+       XL      .req    v5
+       XM      .req    v6
+       XH      .req    v7
+       IN1     .req    v7
 
        .text
        .arch           armv8-a+crypto
         *                         struct ghash_key const *k, const char *head)
         */
 ENTRY(pmull_ghash_update)
-       ld1             {DATA.16b}, [x1]
        ld1             {SHASH.16b}, [x3]
-       eor             VZR.16b, VZR.16b, VZR.16b
+       ld1             {XL.16b}, [x1]
+       movi            MASK.16b, #0xe1
+       ext             SHASH2.16b, SHASH.16b, SHASH.16b, #8
+       shl             MASK.2d, MASK.2d, #57
+       eor             SHASH2.16b, SHASH2.16b, SHASH.16b
 
        /* do the head block first, if supplied */
        cbz             x4, 0f
-       ld1             {IN1.2d}, [x4]
+       ld1             {T1.2d}, [x4]
        b               1f
 
-0:     ld1             {IN1.2d}, [x2], #16
+0:     ld1             {T1.2d}, [x2], #16
        sub             w0, w0, #1
-1:     ext             IN1.16b, IN1.16b, IN1.16b, #8
-CPU_LE(        rev64           IN1.16b, IN1.16b        )
-       eor             DATA.16b, DATA.16b, IN1.16b
 
-       /* multiply DATA by SHASH in GF(2^128) */
-       ext             T2.16b, DATA.16b, DATA.16b, #8
-       ext             T3.16b, SHASH.16b, SHASH.16b, #8
-       eor             T2.16b, T2.16b, DATA.16b
-       eor             T3.16b, T3.16b, SHASH.16b
+1:     /* multiply XL by SHASH in GF(2^128) */
+CPU_LE(        rev64           T1.16b, T1.16b  )
 
-       pmull2          T1.1q, SHASH.2d, DATA.2d        // a1 * b1
-       pmull           DATA.1q, SHASH.1d, DATA.1d      // a0 * b0
-       pmull           T2.1q, T2.1d, T3.1d             // (a1 + a0)(b1 + b0)
-       eor             T2.16b, T2.16b, T1.16b          // (a0 * b1) + (a1 * b0)
-       eor             T2.16b, T2.16b, DATA.16b
+       ext             T2.16b, XL.16b, XL.16b, #8
+       ext             IN1.16b, T1.16b, T1.16b, #8
+       eor             T1.16b, T1.16b, T2.16b
+       eor             XL.16b, XL.16b, IN1.16b
 
-       ext             T3.16b, VZR.16b, T2.16b, #8
-       ext             T2.16b, T2.16b, VZR.16b, #8
-       eor             DATA.16b, DATA.16b, T3.16b
-       eor             T1.16b, T1.16b, T2.16b  // <T1:DATA> is result of
-                                               // carry-less multiplication
+       pmull2          XH.1q, SHASH.2d, XL.2d          // a1 * b1
+       eor             T1.16b, T1.16b, XL.16b
+       pmull           XL.1q, SHASH.1d, XL.1d          // a0 * b0
+       pmull           XM.1q, SHASH2.1d, T1.1d         // (a1 + a0)(b1 + b0)
 
-       /* first phase of the reduction */
-       shl             T3.2d, DATA.2d, #1
-       eor             T3.16b, T3.16b, DATA.16b
-       shl             T3.2d, T3.2d, #5
-       eor             T3.16b, T3.16b, DATA.16b
-       shl             T3.2d, T3.2d, #57
-       ext             T2.16b, VZR.16b, T3.16b, #8
-       ext             T3.16b, T3.16b, VZR.16b, #8
-       eor             DATA.16b, DATA.16b, T2.16b
-       eor             T1.16b, T1.16b, T3.16b
+       ext             T1.16b, XL.16b, XH.16b, #8
+       eor             T2.16b, XL.16b, XH.16b
+       eor             XM.16b, XM.16b, T1.16b
+       eor             XM.16b, XM.16b, T2.16b
+       pmull           T2.1q, XL.1d, MASK.1d
 
-       /* second phase of the reduction */
-       ushr            T2.2d, DATA.2d, #5
-       eor             T2.16b, T2.16b, DATA.16b
-       ushr            T2.2d, T2.2d, #1
-       eor             T2.16b, T2.16b, DATA.16b
-       ushr            T2.2d, T2.2d, #1
-       eor             T1.16b, T1.16b, T2.16b
-       eor             DATA.16b, DATA.16b, T1.16b
+       mov             XH.d[0], XM.d[1]
+       mov             XM.d[1], XL.d[0]
+
+       eor             XL.16b, XM.16b, T2.16b
+       ext             T2.16b, XL.16b, XL.16b, #8
+       pmull           XL.1q, XL.1d, MASK.1d
+       eor             T2.16b, T2.16b, XH.16b
+       eor             XL.16b, XL.16b, T2.16b
 
        cbnz            w0, 0b
 
-       st1             {DATA.16b}, [x1]
+       st1             {XL.16b}, [x1]
        ret
 ENDPROC(pmull_ghash_update)
index b92baf3..833ec1e 100644 (file)
@@ -67,11 +67,12 @@ static int ghash_update(struct shash_desc *desc, const u8 *src,
                blocks = len / GHASH_BLOCK_SIZE;
                len %= GHASH_BLOCK_SIZE;
 
-               kernel_neon_begin_partial(6);
+               kernel_neon_begin_partial(8);
                pmull_ghash_update(blocks, ctx->digest, src, key,
                                   partial ? ctx->buf : NULL);
                kernel_neon_end();
                src += blocks * GHASH_BLOCK_SIZE;
+               partial = 0;
        }
        if (len)
                memcpy(ctx->buf + partial, src, len);
@@ -88,7 +89,7 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
 
                memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
 
-               kernel_neon_begin_partial(6);
+               kernel_neon_begin_partial(8);
                pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL);
                kernel_neon_end();
        }
index 42c7eec..0b3fcf8 100644 (file)
@@ -30,7 +30,6 @@ generic-y += msgbuf.h
 generic-y += mutex.h
 generic-y += pci.h
 generic-y += poll.h
-generic-y += posix_types.h
 generic-y += preempt.h
 generic-y += resource.h
 generic-y += rwsem.h
index a5176cf..f2defe1 100644 (file)
@@ -138,19 +138,10 @@ static inline void __flush_icache_all(void)
 #define flush_icache_page(vma,page)    do { } while (0)
 
 /*
- * flush_cache_vmap() is used when creating mappings (eg, via vmap,
- * vmalloc, ioremap etc) in kernel space for pages.  On non-VIPT
- * caches, since the direct-mappings of these pages may contain cached
- * data, we need to do a full cache flush to ensure that writebacks
- * don't corrupt data placed into these pages via the new mappings.
+ * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache).
  */
 static inline void flush_cache_vmap(unsigned long start, unsigned long end)
 {
-       /*
-        * set_pte_at() called from vmap_pte_range() does not
-        * have a DSB after cleaning the cache line.
-        */
-       dsb(ish);
 }
 
 static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
index 4b23e75..7a2e076 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-static inline u32 icache_policy(void)
-{
-       return (read_cpuid_cachetype() >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK;
-}
+#include <linux/bitops.h>
+
+#define CTR_L1IP(ctr)  (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK)
+
+#define ICACHEF_ALIASING       BIT(0)
+#define ICACHEF_AIVIVT         BIT(1)
+
+extern unsigned long __icache_flags;
 
 /*
  * Whilst the D-side always behaves as PIPT on AArch64, aliasing is
@@ -41,12 +45,12 @@ static inline u32 icache_policy(void)
  */
 static inline int icache_is_aliasing(void)
 {
-       return icache_policy() != ICACHE_POLICY_PIPT;
+       return test_bit(ICACHEF_ALIASING, &__icache_flags);
 }
 
 static inline int icache_is_aivivt(void)
 {
-       return icache_policy() == ICACHE_POLICY_AIVIVT;
+       return test_bit(ICACHEF_AIVIVT, &__icache_flags);
 }
 
 static inline u32 cache_type_cwg(void)
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
new file mode 100644 (file)
index 0000000..0564430
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+  * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CPU_H
+#define __ASM_CPU_H
+
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+
+/*
+ * Records attributes of an individual CPU.
+ */
+struct cpuinfo_arm64 {
+       struct cpu      cpu;
+       u32             reg_ctr;
+       u32             reg_cntfrq;
+       u32             reg_dczid;
+       u32             reg_midr;
+
+       u64             reg_id_aa64isar0;
+       u64             reg_id_aa64isar1;
+       u64             reg_id_aa64mmfr0;
+       u64             reg_id_aa64mmfr1;
+       u64             reg_id_aa64pfr0;
+       u64             reg_id_aa64pfr1;
+
+       u32             reg_id_isar0;
+       u32             reg_id_isar1;
+       u32             reg_id_isar2;
+       u32             reg_id_isar3;
+       u32             reg_id_isar4;
+       u32             reg_id_isar5;
+       u32             reg_id_mmfr0;
+       u32             reg_id_mmfr1;
+       u32             reg_id_mmfr2;
+       u32             reg_id_mmfr3;
+       u32             reg_id_pfr0;
+       u32             reg_id_pfr1;
+};
+
+DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
+
+void cpuinfo_store_cpu(void);
+void __init cpuinfo_store_boot_cpu(void);
+
+#endif /* __ASM_CPU_H */
index 27f54a7..379d0b8 100644 (file)
@@ -18,6 +18,8 @@
 
 #define INVALID_HWID           ULONG_MAX
 
+#define MPIDR_UP_BITMASK       (0x1 << 30)
+#define MPIDR_MT_BITMASK       (0x1 << 24)
 #define MPIDR_HWID_BITMASK     0xff00ffffff
 
 #define MPIDR_LEVEL_BITS_SHIFT 3
        __val;                                                          \
 })
 
+#define MIDR_REVISION_MASK     0xf
+#define MIDR_REVISION(midr)    ((midr) & MIDR_REVISION_MASK)
+#define MIDR_PARTNUM_SHIFT     4
+#define MIDR_PARTNUM_MASK      (0xfff << MIDR_PARTNUM_SHIFT)
+#define MIDR_PARTNUM(midr)     \
+       (((midr) & MIDR_PARTNUM_MASK) >> MIDR_PARTNUM_SHIFT)
+#define MIDR_ARCHITECTURE_SHIFT        16
+#define MIDR_ARCHITECTURE_MASK (0xf << MIDR_ARCHITECTURE_SHIFT)
+#define MIDR_ARCHITECTURE(midr)        \
+       (((midr) & MIDR_ARCHITECTURE_MASK) >> MIDR_ARCHITECTURE_SHIFT)
+#define MIDR_VARIANT_SHIFT     20
+#define MIDR_VARIANT_MASK      (0xf << MIDR_VARIANT_SHIFT)
+#define MIDR_VARIANT(midr)     \
+       (((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT)
+#define MIDR_IMPLEMENTOR_SHIFT 24
+#define MIDR_IMPLEMENTOR_MASK  (0xff << MIDR_IMPLEMENTOR_SHIFT)
+#define MIDR_IMPLEMENTOR(midr) \
+       (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
+
 #define ARM_CPU_IMP_ARM                0x41
 #define ARM_CPU_IMP_APM                0x50
 
-#define ARM_CPU_PART_AEM_V8    0xD0F0
-#define ARM_CPU_PART_FOUNDATION        0xD000
-#define ARM_CPU_PART_CORTEX_A53        0xD030
-#define ARM_CPU_PART_CORTEX_A57        0xD070
+#define ARM_CPU_PART_AEM_V8    0xD0F
+#define ARM_CPU_PART_FOUNDATION        0xD00
+#define ARM_CPU_PART_CORTEX_A57        0xD07
+#define ARM_CPU_PART_CORTEX_A53        0xD03
 
-#define APM_CPU_PART_POTENZA   0x0000
+#define APM_CPU_PART_POTENZA   0x000
 
 #ifndef __ASSEMBLY__
 
@@ -65,12 +86,12 @@ static inline u64 __attribute_const__ read_cpuid_mpidr(void)
 
 static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
 {
-       return (read_cpuid_id() & 0xFF000000) >> 24;
+       return MIDR_IMPLEMENTOR(read_cpuid_id());
 }
 
 static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
 {
-       return (read_cpuid_id() & 0xFFF0);
+       return MIDR_PARTNUM(read_cpuid_id());
 }
 
 static inline u32 __attribute_const__ read_cpuid_cachetype(void)
index 3a4572e..dc82e52 100644 (file)
@@ -26,8 +26,6 @@
 #include <xen/xen.h>
 #include <asm/xen/hypervisor.h>
 
-#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-
 #define DMA_ERROR_CODE (~(dma_addr_t)0)
 extern struct dma_map_ops *dma_ops;
 extern struct dma_map_ops coherent_swiotlb_dma_ops;
index 768414d..007618b 100644 (file)
        str     w\tmpnr, [\state, #16 * 2 + 4]
 .endm
 
+.macro fpsimd_restore_fpcr state, tmp
+       /*
+        * Writes to fpcr may be self-synchronising, so avoid restoring
+        * the register if it hasn't changed.
+        */
+       mrs     \tmp, fpcr
+       cmp     \tmp, \state
+       b.eq    9999f
+       msr     fpcr, \state
+9999:
+.endm
+
+/* Clobbers \state */
 .macro fpsimd_restore state, tmpnr
        ldp     q0, q1, [\state, #16 * 0]
        ldp     q2, q3, [\state, #16 * 2]
@@ -60,7 +73,7 @@
        ldr     w\tmpnr, [\state, #16 * 2]
        msr     fpsr, x\tmpnr
        ldr     w\tmpnr, [\state, #16 * 2 + 4]
-       msr     fpcr, x\tmpnr
+       fpsimd_restore_fpcr x\tmpnr, \state
 .endm
 
 .altmacro
@@ -84,7 +97,7 @@
 .macro fpsimd_restore_partial state, tmpnr1, tmpnr2
        ldp     w\tmpnr1, w\tmpnr2, [\state]
        msr     fpsr, x\tmpnr1
-       msr     fpcr, x\tmpnr2
+       fpsimd_restore_fpcr x\tmpnr2, x\tmpnr1
        adr     x\tmpnr1, 0f
        ldr     w\tmpnr2, [\state, #8]
        add     \state, \state, x\tmpnr2, lsl #4
index 993bce5..ccc7087 100644 (file)
  * The module space lives between the addresses given by TASK_SIZE
  * and PAGE_OFFSET - it must be within 128MB of the kernel text.
  */
-#ifdef CONFIG_ARM64_64K_PAGES
-#define VA_BITS                        (42)
-#else
-#define VA_BITS                        (39)
-#endif
+#define VA_BITS                        (CONFIG_ARM64_VA_BITS)
 #define PAGE_OFFSET            (UL(0xffffffffffffffff) << (VA_BITS - 1))
 #define MODULES_END            (PAGE_OFFSET)
 #define MODULES_VADDR          (MODULES_END - SZ_64M)
@@ -56,6 +52,8 @@
 #define TASK_SIZE_32           UL(0x100000000)
 #define TASK_SIZE              (test_thread_flag(TIF_32BIT) ? \
                                TASK_SIZE_32 : TASK_SIZE_64)
+#define TASK_SIZE_OF(tsk)      (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
+                               TASK_SIZE_32 : TASK_SIZE_64)
 #else
 #define TASK_SIZE              TASK_SIZE_64
 #endif /* CONFIG_COMPAT */
index 46bf666..7a3f462 100644 (file)
 /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
 #define __HAVE_ARCH_GATE_AREA          1
 
-#ifndef __ASSEMBLY__
-
+/*
+ * The idmap and swapper page tables need some space reserved in the kernel
+ * image. Both require pgd, pud (4 levels only) and pmd tables to (section)
+ * map the kernel. With the 64K page configuration, swapper and idmap need to
+ * map to pte level. The swapper also maps the FDT (see __create_page_tables
+ * for more information).
+ */
 #ifdef CONFIG_ARM64_64K_PAGES
-#include <asm/pgtable-2level-types.h>
+#define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS)
 #else
-#include <asm/pgtable-3level-types.h>
+#define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS - 1)
 #endif
 
+#define SWAPPER_DIR_SIZE       (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
+#define IDMAP_DIR_SIZE         (SWAPPER_DIR_SIZE)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/pgtable-types.h>
+
 extern void __cpu_clear_user_page(void *p, unsigned long user);
 extern void __cpu_copy_user_page(void *to, const void *from,
                                 unsigned long user);
index 9bea6e7..d5bed02 100644 (file)
@@ -26,7 +26,7 @@
 
 #define check_pgt_cache()              do { } while (0)
 
-#ifndef CONFIG_ARM64_64K_PAGES
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
 
 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
 {
@@ -44,7 +44,27 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
        set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
 }
 
-#endif /* CONFIG_ARM64_64K_PAGES */
+#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
+
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+       return (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
+}
+
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+       BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
+       free_page((unsigned long)pud);
+}
+
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
+{
+       set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE));
+}
+
+#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
 
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
diff --git a/arch/arm64/include/asm/pgtable-2level-hwdef.h b/arch/arm64/include/asm/pgtable-2level-hwdef.h
deleted file mode 100644 (file)
index 2593b49..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_PGTABLE_2LEVEL_HWDEF_H
-#define __ASM_PGTABLE_2LEVEL_HWDEF_H
-
-/*
- * With LPAE and 64KB pages, there are 2 levels of page tables. Each level has
- * 8192 entries of 8 bytes each, occupying a 64KB page. Levels 0 and 1 are not
- * used. The 2nd level table (PGD for Linux) can cover a range of 4TB, each
- * entry representing 512MB. The user and kernel address spaces are limited to
- * 4TB in the 64KB page configuration.
- */
-#define PTRS_PER_PTE           8192
-#define PTRS_PER_PGD           8192
-
-/*
- * PGDIR_SHIFT determines the size a top-level page table entry can map.
- */
-#define PGDIR_SHIFT            29
-#define PGDIR_SIZE             (_AC(1, UL) << PGDIR_SHIFT)
-#define PGDIR_MASK             (~(PGDIR_SIZE-1))
-
-/*
- * section address mask and size definitions.
- */
-#define SECTION_SHIFT          29
-#define SECTION_SIZE           (_AC(1, UL) << SECTION_SHIFT)
-#define SECTION_MASK           (~(SECTION_SIZE-1))
-
-#endif
diff --git a/arch/arm64/include/asm/pgtable-2level-types.h b/arch/arm64/include/asm/pgtable-2level-types.h
deleted file mode 100644 (file)
index 5f101e6..0000000
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_PGTABLE_2LEVEL_TYPES_H
-#define __ASM_PGTABLE_2LEVEL_TYPES_H
-
-#include <asm/types.h>
-
-typedef u64 pteval_t;
-typedef u64 pgdval_t;
-typedef pgdval_t pmdval_t;
-
-#undef STRICT_MM_TYPECHECKS
-
-#ifdef STRICT_MM_TYPECHECKS
-
-/*
- * These are used to make use of C type-checking..
- */
-typedef struct { pteval_t pte; } pte_t;
-typedef struct { pgdval_t pgd; } pgd_t;
-typedef struct { pteval_t pgprot; } pgprot_t;
-
-#define pte_val(x)      ((x).pte)
-#define pgd_val(x)     ((x).pgd)
-#define pgprot_val(x)   ((x).pgprot)
-
-#define __pte(x)        ((pte_t) { (x) } )
-#define __pgd(x)       ((pgd_t) { (x) } )
-#define __pgprot(x)     ((pgprot_t) { (x) } )
-
-#else  /* !STRICT_MM_TYPECHECKS */
-
-typedef pteval_t pte_t;
-typedef pgdval_t pgd_t;
-typedef pteval_t pgprot_t;
-
-#define pte_val(x)     (x)
-#define pgd_val(x)     (x)
-#define pgprot_val(x)  (x)
-
-#define __pte(x)       (x)
-#define __pgd(x)       (x)
-#define __pgprot(x)    (x)
-
-#endif /* STRICT_MM_TYPECHECKS */
-
-#include <asm-generic/pgtable-nopmd.h>
-
-#endif /* __ASM_PGTABLE_2LEVEL_TYPES_H */
diff --git a/arch/arm64/include/asm/pgtable-3level-hwdef.h b/arch/arm64/include/asm/pgtable-3level-hwdef.h
deleted file mode 100644 (file)
index 3dbf941..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_PGTABLE_3LEVEL_HWDEF_H
-#define __ASM_PGTABLE_3LEVEL_HWDEF_H
-
-/*
- * With LPAE and 4KB pages, there are 3 levels of page tables. Each level has
- * 512 entries of 8 bytes each, occupying a 4K page. The first level table
- * covers a range of 512GB, each entry representing 1GB. The user and kernel
- * address spaces are limited to 512GB each.
- */
-#define PTRS_PER_PTE           512
-#define PTRS_PER_PMD           512
-#define PTRS_PER_PGD           512
-
-/*
- * PGDIR_SHIFT determines the size a top-level page table entry can map.
- */
-#define PGDIR_SHIFT            30
-#define PGDIR_SIZE             (_AC(1, UL) << PGDIR_SHIFT)
-#define PGDIR_MASK             (~(PGDIR_SIZE-1))
-
-/*
- * PMD_SHIFT determines the size a middle-level page table entry can map.
- */
-#define PMD_SHIFT              21
-#define PMD_SIZE               (_AC(1, UL) << PMD_SHIFT)
-#define PMD_MASK               (~(PMD_SIZE-1))
-
-/*
- * section address mask and size definitions.
- */
-#define SECTION_SHIFT          21
-#define SECTION_SIZE           (_AC(1, UL) << SECTION_SHIFT)
-#define SECTION_MASK           (~(SECTION_SIZE-1))
-
-#endif
diff --git a/arch/arm64/include/asm/pgtable-3level-types.h b/arch/arm64/include/asm/pgtable-3level-types.h
deleted file mode 100644 (file)
index 4e94424..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_PGTABLE_3LEVEL_TYPES_H
-#define __ASM_PGTABLE_3LEVEL_TYPES_H
-
-#include <asm/types.h>
-
-typedef u64 pteval_t;
-typedef u64 pmdval_t;
-typedef u64 pgdval_t;
-
-#undef STRICT_MM_TYPECHECKS
-
-#ifdef STRICT_MM_TYPECHECKS
-
-/*
- * These are used to make use of C type-checking..
- */
-typedef struct { pteval_t pte; } pte_t;
-typedef struct { pmdval_t pmd; } pmd_t;
-typedef struct { pgdval_t pgd; } pgd_t;
-typedef struct { pteval_t pgprot; } pgprot_t;
-
-#define pte_val(x)      ((x).pte)
-#define pmd_val(x)      ((x).pmd)
-#define pgd_val(x)     ((x).pgd)
-#define pgprot_val(x)   ((x).pgprot)
-
-#define __pte(x)        ((pte_t) { (x) } )
-#define __pmd(x)        ((pmd_t) { (x) } )
-#define __pgd(x)       ((pgd_t) { (x) } )
-#define __pgprot(x)     ((pgprot_t) { (x) } )
-
-#else  /* !STRICT_MM_TYPECHECKS */
-
-typedef pteval_t pte_t;
-typedef pmdval_t pmd_t;
-typedef pgdval_t pgd_t;
-typedef pteval_t pgprot_t;
-
-#define pte_val(x)     (x)
-#define pmd_val(x)     (x)
-#define pgd_val(x)     (x)
-#define pgprot_val(x)  (x)
-
-#define __pte(x)       (x)
-#define __pmd(x)       (x)
-#define __pgd(x)       (x)
-#define __pgprot(x)    (x)
-
-#endif /* STRICT_MM_TYPECHECKS */
-
-#include <asm-generic/pgtable-nopud.h>
-
-#endif /* __ASM_PGTABLE_3LEVEL_TYPES_H */
index 955e8c5..88174e0 100644 (file)
 #ifndef __ASM_PGTABLE_HWDEF_H
 #define __ASM_PGTABLE_HWDEF_H
 
-#ifdef CONFIG_ARM64_64K_PAGES
-#include <asm/pgtable-2level-hwdef.h>
-#else
-#include <asm/pgtable-3level-hwdef.h>
+#define PTRS_PER_PTE           (1 << (PAGE_SHIFT - 3))
+
+/*
+ * PMD_SHIFT determines the size a level 2 page table entry can map.
+ */
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+#define PMD_SHIFT              ((PAGE_SHIFT - 3) * 2 + 3)
+#define PMD_SIZE               (_AC(1, UL) << PMD_SHIFT)
+#define PMD_MASK               (~(PMD_SIZE-1))
+#define PTRS_PER_PMD           PTRS_PER_PTE
+#endif
+
+/*
+ * PUD_SHIFT determines the size a level 1 page table entry can map.
+ */
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+#define PUD_SHIFT              ((PAGE_SHIFT - 3) * 3 + 3)
+#define PUD_SIZE               (_AC(1, UL) << PUD_SHIFT)
+#define PUD_MASK               (~(PUD_SIZE-1))
+#define PTRS_PER_PUD           PTRS_PER_PTE
 #endif
 
+/*
+ * PGDIR_SHIFT determines the size a top-level page table entry can map
+ * (depending on the configuration, this level can be 0, 1 or 2).
+ */
+#define PGDIR_SHIFT            ((PAGE_SHIFT - 3) * CONFIG_ARM64_PGTABLE_LEVELS + 3)
+#define PGDIR_SIZE             (_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK             (~(PGDIR_SIZE-1))
+#define PTRS_PER_PGD           (1 << (VA_BITS - PGDIR_SHIFT))
+
+/*
+ * Section address mask and size definitions.
+ */
+#define SECTION_SHIFT          PMD_SHIFT
+#define SECTION_SIZE           (_AC(1, UL) << SECTION_SHIFT)
+#define SECTION_MASK           (~(SECTION_SIZE-1))
+
 /*
  * Hardware page table definitions.
  *
  * Level 1 descriptor (PUD).
  */
-
+#define PUD_TYPE_TABLE         (_AT(pudval_t, 3) << 0)
 #define PUD_TABLE_BIT          (_AT(pgdval_t, 1) << 1)
 #define PUD_TYPE_MASK          (_AT(pgdval_t, 3) << 0)
 #define PUD_TYPE_SECT          (_AT(pgdval_t, 1) << 0)
diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h
new file mode 100644 (file)
index 0000000..ca9df80
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Page table types definitions.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_PGTABLE_TYPES_H
+#define __ASM_PGTABLE_TYPES_H
+
+#include <asm/types.h>
+
+typedef u64 pteval_t;
+typedef u64 pmdval_t;
+typedef u64 pudval_t;
+typedef u64 pgdval_t;
+
+#undef STRICT_MM_TYPECHECKS
+
+#ifdef STRICT_MM_TYPECHECKS
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct { pteval_t pte; } pte_t;
+#define pte_val(x)     ((x).pte)
+#define __pte(x)       ((pte_t) { (x) } )
+
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+typedef struct { pmdval_t pmd; } pmd_t;
+#define pmd_val(x)     ((x).pmd)
+#define __pmd(x)       ((pmd_t) { (x) } )
+#endif
+
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+typedef struct { pudval_t pud; } pud_t;
+#define pud_val(x)     ((x).pud)
+#define __pud(x)       ((pud_t) { (x) } )
+#endif
+
+typedef struct { pgdval_t pgd; } pgd_t;
+#define pgd_val(x)     ((x).pgd)
+#define __pgd(x)       ((pgd_t) { (x) } )
+
+typedef struct { pteval_t pgprot; } pgprot_t;
+#define pgprot_val(x)  ((x).pgprot)
+#define __pgprot(x)    ((pgprot_t) { (x) } )
+
+#else  /* !STRICT_MM_TYPECHECKS */
+
+typedef pteval_t pte_t;
+#define pte_val(x)     (x)
+#define __pte(x)       (x)
+
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+typedef pmdval_t pmd_t;
+#define pmd_val(x)     (x)
+#define __pmd(x)       (x)
+#endif
+
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+typedef pudval_t pud_t;
+#define pud_val(x)     (x)
+#define __pud(x)       (x)
+#endif
+
+typedef pgdval_t pgd_t;
+#define pgd_val(x)     (x)
+#define __pgd(x)       (x)
+
+typedef pteval_t pgprot_t;
+#define pgprot_val(x)  (x)
+#define __pgprot(x)    (x)
+
+#endif /* STRICT_MM_TYPECHECKS */
+
+#if CONFIG_ARM64_PGTABLE_LEVELS == 2
+#include <asm-generic/pgtable-nopmd.h>
+#elif CONFIG_ARM64_PGTABLE_LEVELS == 3
+#include <asm-generic/pgtable-nopud.h>
+#endif
+
+#endif /* __ASM_PGTABLE_TYPES_H */
index 598cc38..ffe1ba0 100644 (file)
 
 /*
  * VMALLOC and SPARSEMEM_VMEMMAP ranges.
+ *
+ * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array
+ *     (rounded up to PUD_SIZE).
+ * VMALLOC_START: beginning of the kernel VA space
+ * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space,
+ *     fixed mappings and modules
  */
+#define VMEMMAP_SIZE           ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE)
 #define VMALLOC_START          (UL(0xffffffffffffffff) << VA_BITS)
-#define VMALLOC_END            (PAGE_OFFSET - UL(0x400000000) - SZ_64K)
+#define VMALLOC_END            (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
 
 #define vmemmap                        ((struct page *)(VMALLOC_END + SZ_64K))
 
 #ifndef __ASSEMBLY__
 extern void __pte_error(const char *file, int line, unsigned long val);
 extern void __pmd_error(const char *file, int line, unsigned long val);
+extern void __pud_error(const char *file, int line, unsigned long val);
 extern void __pgd_error(const char *file, int line, unsigned long val);
 
-#define pte_ERROR(pte)         __pte_error(__FILE__, __LINE__, pte_val(pte))
-#ifndef CONFIG_ARM64_64K_PAGES
-#define pmd_ERROR(pmd)         __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
-#endif
-#define pgd_ERROR(pgd)         __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
-
 #ifdef CONFIG_SMP
 #define PROT_DEFAULT           (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
 #define PROT_SECT_DEFAULT      (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
@@ -112,6 +114,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
 extern struct page *empty_zero_page;
 #define ZERO_PAGE(vaddr)       (empty_zero_page)
 
+#define pte_ERROR(pte)         __pte_error(__FILE__, __LINE__, pte_val(pte))
+
 #define pte_pfn(pte)           ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
 
 #define pfn_pte(pfn,prot)      (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
@@ -119,6 +123,10 @@ extern struct page *empty_zero_page;
 #define pte_none(pte)          (!pte_val(pte))
 #define pte_clear(mm,addr,ptep)        set_pte(ptep, __pte(0))
 #define pte_page(pte)          (pfn_to_page(pte_pfn(pte)))
+
+/* Find an entry in the third-level page table. */
+#define pte_index(addr)                (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+
 #define pte_offset_kernel(dir,addr)    (pmd_page_vaddr(*(dir)) + pte_index(addr))
 
 #define pte_offset_map(dir,addr)       pte_offset_kernel((dir), (addr))
@@ -138,6 +146,8 @@ extern struct page *empty_zero_page;
 
 #define pte_valid_user(pte) \
        ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
+#define pte_valid_not_user(pte) \
+       ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
 
 static inline pte_t pte_wrprotect(pte_t pte)
 {
@@ -184,6 +194,15 @@ static inline pte_t pte_mkspecial(pte_t pte)
 static inline void set_pte(pte_t *ptep, pte_t pte)
 {
        *ptep = pte;
+
+       /*
+        * Only if the new pte is valid and kernel, otherwise TLB maintenance
+        * or update_mmu_cache() have the necessary barriers.
+        */
+       if (pte_valid_not_user(pte)) {
+               dsb(ishst);
+               isb();
+       }
 }
 
 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
@@ -246,7 +265,7 @@ static inline pmd_t pte_pmd(pte_t pte)
 #define pmd_mkwrite(pmd)       pte_pmd(pte_mkwrite(pmd_pte(pmd)))
 #define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
 #define pmd_mkyoung(pmd)       pte_pmd(pte_mkyoung(pmd_pte(pmd)))
-#define pmd_mknotpresent(pmd)  (__pmd(pmd_val(pmd) &= ~PMD_TYPE_MASK))
+#define pmd_mknotpresent(pmd)  (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
 
 #define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         pte_write(pmd_pte(pmd))
@@ -292,7 +311,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 #define pmd_sect(pmd)          ((pmd_val(pmd) & PMD_TYPE_MASK) == \
                                 PMD_TYPE_SECT)
 
-#ifdef ARM64_64K_PAGES
+#ifdef CONFIG_ARM64_64K_PAGES
 #define pud_sect(pud)          (0)
 #else
 #define pud_sect(pud)          ((pud_val(pud) & PUD_TYPE_MASK) == \
@@ -303,6 +322,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
 {
        *pmdp = pmd;
        dsb(ishst);
+       isb();
 }
 
 static inline void pmd_clear(pmd_t *pmdp)
@@ -323,7 +343,9 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
  */
 #define mk_pte(page,prot)      pfn_pte(page_to_pfn(page),prot)
 
-#ifndef CONFIG_ARM64_64K_PAGES
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+
+#define pmd_ERROR(pmd)         __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
 
 #define pud_none(pud)          (!pud_val(pud))
 #define pud_bad(pud)           (!(pud_val(pud) & 2))
@@ -333,6 +355,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
 {
        *pudp = pud;
        dsb(ishst);
+       isb();
 }
 
 static inline void pud_clear(pud_t *pudp)
@@ -345,7 +368,51 @@ static inline pmd_t *pud_page_vaddr(pud_t pud)
        return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
 }
 
-#endif /* CONFIG_ARM64_64K_PAGES */
+/* Find an entry in the second-level page table. */
+#define pmd_index(addr)                (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+
+static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
+{
+       return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
+}
+
+#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
+
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+
+#define pud_ERROR(pud)         __pud_error(__FILE__, __LINE__, pud_val(pud))
+
+#define pgd_none(pgd)          (!pgd_val(pgd))
+#define pgd_bad(pgd)           (!(pgd_val(pgd) & 2))
+#define pgd_present(pgd)       (pgd_val(pgd))
+
+static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+       *pgdp = pgd;
+       dsb(ishst);
+}
+
+static inline void pgd_clear(pgd_t *pgdp)
+{
+       set_pgd(pgdp, __pgd(0));
+}
+
+static inline pud_t *pgd_page_vaddr(pgd_t pgd)
+{
+       return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK);
+}
+
+/* Find an entry in the frst-level page table. */
+#define pud_index(addr)                (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+
+static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
+{
+       return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
+}
+
+#endif  /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
+
+#define pgd_ERROR(pgd)         __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
 
 /* to find an entry in a page-table-directory */
 #define pgd_index(addr)                (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
@@ -355,18 +422,6 @@ static inline pmd_t *pud_page_vaddr(pud_t pud)
 /* to find an entry in a kernel page-table-directory */
 #define pgd_offset_k(addr)     pgd_offset(&init_mm, addr)
 
-/* Find an entry in the second-level page table.. */
-#ifndef CONFIG_ARM64_64K_PAGES
-#define pmd_index(addr)                (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
-static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
-{
-       return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
-}
-#endif
-
-/* Find an entry in the third-level page table.. */
-#define pte_index(addr)                (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
-
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
        const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
@@ -383,9 +438,6 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
 
-#define SWAPPER_DIR_SIZE       (3 * PAGE_SIZE)
-#define IDMAP_DIR_SIZE         (2 * PAGE_SIZE)
-
 /*
  * Encode and decode a swap entry:
  *     bits 0-1:       present (must be zero)
index 34de2a8..3df21fe 100644 (file)
@@ -129,6 +129,7 @@ extern void release_thread(struct task_struct *);
 unsigned long get_wchan(struct task_struct *p);
 
 #define cpu_relax()                    barrier()
+#define cpu_relax_lowlatency()                cpu_relax()
 
 /* Thread switching */
 extern struct task_struct *cpu_switch_to(struct task_struct *prev,
@@ -137,8 +138,8 @@ extern struct task_struct *cpu_switch_to(struct task_struct *prev,
 #define task_pt_regs(p) \
        ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
 
-#define KSTK_EIP(tsk)  task_pt_regs(tsk)->pc
-#define KSTK_ESP(tsk)  task_pt_regs(tsk)->sp
+#define KSTK_EIP(tsk)  ((unsigned long)task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk)  ((unsigned long)task_pt_regs(tsk)->sp)
 
 /*
  * Prefetching support
index a429b59..501000f 100644 (file)
 
 #include <uapi/asm/ptrace.h>
 
+/* Current Exception Level values, as contained in CurrentEL */
+#define CurrentEL_EL1          (1 << 2)
+#define CurrentEL_EL2          (2 << 2)
+
 /* AArch32-specific ptrace requests */
 #define COMPAT_PTRACE_GETREGS          12
 #define COMPAT_PTRACE_SETREGS          13
diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h
new file mode 100644 (file)
index 0000000..fe5e287
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * GCC stack protector support.
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function.  The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on ARM.  This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef __ASM_STACKPROTECTOR_H
+#define __ASM_STACKPROTECTOR_H
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+       unsigned long canary;
+
+       /* Try to get a semi random initial value. */
+       get_random_bytes(&canary, sizeof(canary));
+       canary ^= LINUX_VERSION_CODE;
+
+       current->stack_canary = canary;
+       __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
index 383771e..709a574 100644 (file)
@@ -16,6 +16,8 @@
 #ifndef __ASM_SYSCALL_H
 #define __ASM_SYSCALL_H
 
+#include <uapi/linux/audit.h>
+#include <linux/compat.h>
 #include <linux/err.h>
 
 extern const void *sys_call_table[];
@@ -105,4 +107,16 @@ static inline void syscall_set_arguments(struct task_struct *task,
        memcpy(&regs->regs[i], args, n * sizeof(args[0]));
 }
 
+/*
+ * We don't care about endianness (__AUDIT_ARCH_LE bit) here because
+ * AArch64 has the same system calls both on little- and big- endian.
+ */
+static inline int syscall_get_arch(void)
+{
+       if (is_compat_task())
+               return AUDIT_ARCH_ARM;
+
+       return AUDIT_ARCH_AARCH64;
+}
+
 #endif /* __ASM_SYSCALL_H */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
new file mode 100644 (file)
index 0000000..5c89df0
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * Macros for accessing system registers with older binutils.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_SYSREG_H
+#define __ASM_SYSREG_H
+
+#define sys_reg(op0, op1, crn, crm, op2) \
+       ((((op0)-2)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5))
+
+#ifdef __ASSEMBLY__
+
+       .irp    num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
+       .equ    __reg_num_x\num, \num
+       .endr
+       .equ    __reg_num_xzr, 31
+
+       .macro  mrs_s, rt, sreg
+       .inst   0xd5300000|(\sreg)|(__reg_num_\rt)
+       .endm
+
+       .macro  msr_s, sreg, rt
+       .inst   0xd5100000|(\sreg)|(__reg_num_\rt)
+       .endm
+
+#else
+
+asm(
+"      .irp    num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n"
+"      .equ    __reg_num_x\\num, \\num\n"
+"      .endr\n"
+"      .equ    __reg_num_xzr, 31\n"
+"\n"
+"      .macro  mrs_s, rt, sreg\n"
+"      .inst   0xd5300000|(\\sreg)|(__reg_num_\\rt)\n"
+"      .endm\n"
+"\n"
+"      .macro  msr_s, sreg, rt\n"
+"      .inst   0xd5100000|(\\sreg)|(__reg_num_\\rt)\n"
+"      .endm\n"
+);
+
+#endif
+
+#endif /* __ASM_SYSREG_H */
index e40b6d0..45108d8 100644 (file)
@@ -103,6 +103,7 @@ static inline struct thread_info *current_thread_info(void)
 #define TIF_NEED_RESCHED       1
 #define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
 #define TIF_FOREIGN_FPSTATE    3       /* CPU's FP state is not current's */
+#define TIF_NOHZ               7
 #define TIF_SYSCALL_TRACE      8
 #define TIF_SYSCALL_AUDIT      9
 #define TIF_SYSCALL_TRACEPOINT 10
@@ -118,6 +119,7 @@ static inline struct thread_info *current_thread_info(void)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_FOREIGN_FPSTATE   (1 << TIF_FOREIGN_FPSTATE)
+#define _TIF_NOHZ              (1 << TIF_NOHZ)
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SYSCALL_TRACEPOINT        (1 << TIF_SYSCALL_TRACEPOINT)
@@ -128,7 +130,8 @@ static inline struct thread_info *current_thread_info(void)
                                 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
 
 #define _TIF_SYSCALL_WORK      (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
-                                _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
+                                _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
+                                _TIF_NOHZ)
 
 #endif /* __KERNEL__ */
 #endif /* __ASM_THREAD_INFO_H */
index 80e2c08..62731ef 100644 (file)
@@ -91,7 +91,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
        tlb_remove_page(tlb, pte);
 }
 
-#ifndef CONFIG_ARM64_64K_PAGES
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
 static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
                                  unsigned long addr)
 {
@@ -100,6 +100,15 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
 }
 #endif
 
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp,
+                                 unsigned long addr)
+{
+       tlb_add_flush(tlb, addr);
+       tlb_remove_page(tlb, virt_to_page(pudp));
+}
+#endif
+
 static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp,
                                                unsigned long address)
 {
index b9349c4..73f0ce5 100644 (file)
@@ -98,8 +98,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
        dsb(ish);
 }
 
-static inline void flush_tlb_range(struct vm_area_struct *vma,
-                                       unsigned long start, unsigned long end)
+static inline void __flush_tlb_range(struct vm_area_struct *vma,
+                                    unsigned long start, unsigned long end)
 {
        unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
        unsigned long addr;
@@ -112,7 +112,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
        dsb(ish);
 }
 
-static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
        unsigned long addr;
        start >>= 12;
@@ -122,6 +122,30 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
        for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
                asm("tlbi vaae1is, %0" : : "r"(addr));
        dsb(ish);
+       isb();
+}
+
+/*
+ * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
+ * necessarily a performance improvement.
+ */
+#define MAX_TLB_RANGE  (1024UL << PAGE_SHIFT)
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+                                  unsigned long start, unsigned long end)
+{
+       if ((end - start) <= MAX_TLB_RANGE)
+               __flush_tlb_range(vma, start, end);
+       else
+               flush_tlb_mm(vma->vm_mm);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       if ((end - start) <= MAX_TLB_RANGE)
+               __flush_tlb_kernel_range(start, end);
+       else
+               flush_tlb_all();
 }
 
 /*
@@ -131,8 +155,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
                                    unsigned long addr, pte_t *ptep)
 {
        /*
-        * set_pte() does not have a DSB, so make sure that the page table
-        * write is visible.
+        * set_pte() does not have a DSB for user mappings, so make sure that
+        * the page table write is visible.
         */
        dsb(ishst);
 }
index e5f47df..4bc95d2 100644 (file)
 #define __ARCH_WANT_COMPAT_SYS_SENDFILE
 #define __ARCH_WANT_SYS_FORK
 #define __ARCH_WANT_SYS_VFORK
+
+/*
+ * Compat syscall numbers used by the AArch64 kernel.
+ */
+#define __NR_compat_restart_syscall    0
+#define __NR_compat_sigreturn          119
+#define __NR_compat_rt_sigreturn       173
+
+/*
+ * The following SVCs are ARM private.
+ */
+#define __ARM_NR_COMPAT_BASE           0x0f0000
+#define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
+#define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
+
+#define __NR_compat_syscalls           383
 #endif
+
 #define __ARCH_WANT_SYS_CLONE
 #include <uapi/asm/unistd.h>
 
index c8d8fc1..e242600 100644 (file)
 #define __SYSCALL(x, y)
 #endif
 
-__SYSCALL(0,   sys_restart_syscall)
-__SYSCALL(1,   sys_exit)
-__SYSCALL(2,   sys_fork)
-__SYSCALL(3,   sys_read)
-__SYSCALL(4,   sys_write)
-__SYSCALL(5,   compat_sys_open)
-__SYSCALL(6,   sys_close)
-__SYSCALL(7,   sys_ni_syscall)                 /* 7 was sys_waitpid */
-__SYSCALL(8,   sys_creat)
-__SYSCALL(9,   sys_link)
-__SYSCALL(10,  sys_unlink)
-__SYSCALL(11,  compat_sys_execve)
-__SYSCALL(12,  sys_chdir)
-__SYSCALL(13,  sys_ni_syscall)                 /* 13 was sys_time */
-__SYSCALL(14,  sys_mknod)
-__SYSCALL(15,  sys_chmod)
-__SYSCALL(16,  sys_lchown16)
-__SYSCALL(17,  sys_ni_syscall)                 /* 17 was sys_break */
-__SYSCALL(18,  sys_ni_syscall)                 /* 18 was sys_stat */
-__SYSCALL(19,  compat_sys_lseek)
-__SYSCALL(20,  sys_getpid)
-__SYSCALL(21,  compat_sys_mount)
-__SYSCALL(22,  sys_ni_syscall)                 /* 22 was sys_umount */
-__SYSCALL(23,  sys_setuid16)
-__SYSCALL(24,  sys_getuid16)
-__SYSCALL(25,  sys_ni_syscall)                 /* 25 was sys_stime */
-__SYSCALL(26,  compat_sys_ptrace)
-__SYSCALL(27,  sys_ni_syscall)                 /* 27 was sys_alarm */
-__SYSCALL(28,  sys_ni_syscall)                 /* 28 was sys_fstat */
-__SYSCALL(29,  sys_pause)
-__SYSCALL(30,  sys_ni_syscall)                 /* 30 was sys_utime */
-__SYSCALL(31,  sys_ni_syscall)                 /* 31 was sys_stty */
-__SYSCALL(32,  sys_ni_syscall)                 /* 32 was sys_gtty */
-__SYSCALL(33,  sys_access)
-__SYSCALL(34,  sys_nice)
-__SYSCALL(35,  sys_ni_syscall)                 /* 35 was sys_ftime */
-__SYSCALL(36,  sys_sync)
-__SYSCALL(37,  sys_kill)
-__SYSCALL(38,  sys_rename)
-__SYSCALL(39,  sys_mkdir)
-__SYSCALL(40,  sys_rmdir)
-__SYSCALL(41,  sys_dup)
-__SYSCALL(42,  sys_pipe)
-__SYSCALL(43,  compat_sys_times)
-__SYSCALL(44,  sys_ni_syscall)                 /* 44 was sys_prof */
-__SYSCALL(45,  sys_brk)
-__SYSCALL(46,  sys_setgid16)
-__SYSCALL(47,  sys_getgid16)
-__SYSCALL(48,  sys_ni_syscall)                 /* 48 was sys_signal */
-__SYSCALL(49,  sys_geteuid16)
-__SYSCALL(50,  sys_getegid16)
-__SYSCALL(51,  sys_acct)
-__SYSCALL(52,  sys_umount)
-__SYSCALL(53,  sys_ni_syscall)                 /* 53 was sys_lock */
-__SYSCALL(54,  compat_sys_ioctl)
-__SYSCALL(55,  compat_sys_fcntl)
-__SYSCALL(56,  sys_ni_syscall)                 /* 56 was sys_mpx */
-__SYSCALL(57,  sys_setpgid)
-__SYSCALL(58,  sys_ni_syscall)                 /* 58 was sys_ulimit */
-__SYSCALL(59,  sys_ni_syscall)                 /* 59 was sys_olduname */
-__SYSCALL(60,  sys_umask)
-__SYSCALL(61,  sys_chroot)
-__SYSCALL(62,  compat_sys_ustat)
-__SYSCALL(63,  sys_dup2)
-__SYSCALL(64,  sys_getppid)
-__SYSCALL(65,  sys_getpgrp)
-__SYSCALL(66,  sys_setsid)
-__SYSCALL(67,  compat_sys_sigaction)
-__SYSCALL(68,  sys_ni_syscall)                 /* 68 was sys_sgetmask */
-__SYSCALL(69,  sys_ni_syscall)                 /* 69 was sys_ssetmask */
-__SYSCALL(70,  sys_setreuid16)
-__SYSCALL(71,  sys_setregid16)
-__SYSCALL(72,  sys_sigsuspend)
-__SYSCALL(73,  compat_sys_sigpending)
-__SYSCALL(74,  sys_sethostname)
-__SYSCALL(75,  compat_sys_setrlimit)
-__SYSCALL(76,  sys_ni_syscall)                 /* 76 was compat_sys_getrlimit */
-__SYSCALL(77,  compat_sys_getrusage)
-__SYSCALL(78,  compat_sys_gettimeofday)
-__SYSCALL(79,  compat_sys_settimeofday)
-__SYSCALL(80,  sys_getgroups16)
-__SYSCALL(81,  sys_setgroups16)
-__SYSCALL(82,  sys_ni_syscall)                 /* 82 was compat_sys_select */
-__SYSCALL(83,  sys_symlink)
-__SYSCALL(84,  sys_ni_syscall)                 /* 84 was sys_lstat */
-__SYSCALL(85,  sys_readlink)
-__SYSCALL(86,  sys_uselib)
-__SYSCALL(87,  sys_swapon)
-__SYSCALL(88,  sys_reboot)
-__SYSCALL(89,  sys_ni_syscall)                 /* 89 was sys_readdir */
-__SYSCALL(90,  sys_ni_syscall)                 /* 90 was sys_mmap */
-__SYSCALL(91,  sys_munmap)
-__SYSCALL(92,  compat_sys_truncate)
-__SYSCALL(93,  compat_sys_ftruncate)
-__SYSCALL(94,  sys_fchmod)
-__SYSCALL(95,  sys_fchown16)
-__SYSCALL(96,  sys_getpriority)
-__SYSCALL(97,  sys_setpriority)
-__SYSCALL(98,  sys_ni_syscall)                 /* 98 was sys_profil */
-__SYSCALL(99,  compat_sys_statfs)
-__SYSCALL(100, compat_sys_fstatfs)
-__SYSCALL(101, sys_ni_syscall)                 /* 101 was sys_ioperm */
-__SYSCALL(102, sys_ni_syscall)                 /* 102 was sys_socketcall */
-__SYSCALL(103, sys_syslog)
-__SYSCALL(104, compat_sys_setitimer)
-__SYSCALL(105, compat_sys_getitimer)
-__SYSCALL(106, compat_sys_newstat)
-__SYSCALL(107, compat_sys_newlstat)
-__SYSCALL(108, compat_sys_newfstat)
-__SYSCALL(109, sys_ni_syscall)                 /* 109 was sys_uname */
-__SYSCALL(110, sys_ni_syscall)                 /* 110 was sys_iopl */
-__SYSCALL(111, sys_vhangup)
-__SYSCALL(112, sys_ni_syscall)                 /* 112 was sys_idle */
-__SYSCALL(113, sys_ni_syscall)                 /* 113 was sys_syscall */
-__SYSCALL(114, compat_sys_wait4)
-__SYSCALL(115, sys_swapoff)
-__SYSCALL(116, compat_sys_sysinfo)
-__SYSCALL(117, sys_ni_syscall)                 /* 117 was sys_ipc */
-__SYSCALL(118, sys_fsync)
-__SYSCALL(119, compat_sys_sigreturn_wrapper)
-__SYSCALL(120, sys_clone)
-__SYSCALL(121, sys_setdomainname)
-__SYSCALL(122, sys_newuname)
-__SYSCALL(123, sys_ni_syscall)                 /* 123 was sys_modify_ldt */
-__SYSCALL(124, compat_sys_adjtimex)
-__SYSCALL(125, sys_mprotect)
-__SYSCALL(126, compat_sys_sigprocmask)
-__SYSCALL(127, sys_ni_syscall)                 /* 127 was sys_create_module */
-__SYSCALL(128, sys_init_module)
-__SYSCALL(129, sys_delete_module)
-__SYSCALL(130, sys_ni_syscall)                 /* 130 was sys_get_kernel_syms */
-__SYSCALL(131, sys_quotactl)
-__SYSCALL(132, sys_getpgid)
-__SYSCALL(133, sys_fchdir)
-__SYSCALL(134, sys_bdflush)
-__SYSCALL(135, sys_sysfs)
-__SYSCALL(136, sys_personality)
-__SYSCALL(137, sys_ni_syscall)                 /* 137 was sys_afs_syscall */
-__SYSCALL(138, sys_setfsuid16)
-__SYSCALL(139, sys_setfsgid16)
-__SYSCALL(140, sys_llseek)
-__SYSCALL(141, compat_sys_getdents)
-__SYSCALL(142, compat_sys_select)
-__SYSCALL(143, sys_flock)
-__SYSCALL(144, sys_msync)
-__SYSCALL(145, compat_sys_readv)
-__SYSCALL(146, compat_sys_writev)
-__SYSCALL(147, sys_getsid)
-__SYSCALL(148, sys_fdatasync)
-__SYSCALL(149, compat_sys_sysctl)
-__SYSCALL(150, sys_mlock)
-__SYSCALL(151, sys_munlock)
-__SYSCALL(152, sys_mlockall)
-__SYSCALL(153, sys_munlockall)
-__SYSCALL(154, sys_sched_setparam)
-__SYSCALL(155, sys_sched_getparam)
-__SYSCALL(156, sys_sched_setscheduler)
-__SYSCALL(157, sys_sched_getscheduler)
-__SYSCALL(158, sys_sched_yield)
-__SYSCALL(159, sys_sched_get_priority_max)
-__SYSCALL(160, sys_sched_get_priority_min)
-__SYSCALL(161, compat_sys_sched_rr_get_interval)
-__SYSCALL(162, compat_sys_nanosleep)
-__SYSCALL(163, sys_mremap)
-__SYSCALL(164, sys_setresuid16)
-__SYSCALL(165, sys_getresuid16)
-__SYSCALL(166, sys_ni_syscall)                 /* 166 was sys_vm86 */
-__SYSCALL(167, sys_ni_syscall)                 /* 167 was sys_query_module */
-__SYSCALL(168, sys_poll)
-__SYSCALL(169, sys_ni_syscall)
-__SYSCALL(170, sys_setresgid16)
-__SYSCALL(171, sys_getresgid16)
-__SYSCALL(172, sys_prctl)
-__SYSCALL(173, compat_sys_rt_sigreturn_wrapper)
-__SYSCALL(174, compat_sys_rt_sigaction)
-__SYSCALL(175, compat_sys_rt_sigprocmask)
-__SYSCALL(176, compat_sys_rt_sigpending)
-__SYSCALL(177, compat_sys_rt_sigtimedwait)
-__SYSCALL(178, compat_sys_rt_sigqueueinfo)
-__SYSCALL(179, compat_sys_rt_sigsuspend)
-__SYSCALL(180, compat_sys_pread64_wrapper)
-__SYSCALL(181, compat_sys_pwrite64_wrapper)
-__SYSCALL(182, sys_chown16)
-__SYSCALL(183, sys_getcwd)
-__SYSCALL(184, sys_capget)
-__SYSCALL(185, sys_capset)
-__SYSCALL(186, compat_sys_sigaltstack)
-__SYSCALL(187, compat_sys_sendfile)
-__SYSCALL(188, sys_ni_syscall)                 /* 188 reserved */
-__SYSCALL(189, sys_ni_syscall)                 /* 189 reserved */
-__SYSCALL(190, sys_vfork)
-__SYSCALL(191, compat_sys_getrlimit)           /* SuS compliant getrlimit */
-__SYSCALL(192, sys_mmap_pgoff)
-__SYSCALL(193, compat_sys_truncate64_wrapper)
-__SYSCALL(194, compat_sys_ftruncate64_wrapper)
-__SYSCALL(195, sys_stat64)
-__SYSCALL(196, sys_lstat64)
-__SYSCALL(197, sys_fstat64)
-__SYSCALL(198, sys_lchown)
-__SYSCALL(199, sys_getuid)
-__SYSCALL(200, sys_getgid)
-__SYSCALL(201, sys_geteuid)
-__SYSCALL(202, sys_getegid)
-__SYSCALL(203, sys_setreuid)
-__SYSCALL(204, sys_setregid)
-__SYSCALL(205, sys_getgroups)
-__SYSCALL(206, sys_setgroups)
-__SYSCALL(207, sys_fchown)
-__SYSCALL(208, sys_setresuid)
-__SYSCALL(209, sys_getresuid)
-__SYSCALL(210, sys_setresgid)
-__SYSCALL(211, sys_getresgid)
-__SYSCALL(212, sys_chown)
-__SYSCALL(213, sys_setuid)
-__SYSCALL(214, sys_setgid)
-__SYSCALL(215, sys_setfsuid)
-__SYSCALL(216, sys_setfsgid)
-__SYSCALL(217, compat_sys_getdents64)
-__SYSCALL(218, sys_pivot_root)
-__SYSCALL(219, sys_mincore)
-__SYSCALL(220, sys_madvise)
-__SYSCALL(221, compat_sys_fcntl64)
-__SYSCALL(222, sys_ni_syscall)                 /* 222 for tux */
-__SYSCALL(223, sys_ni_syscall)                 /* 223 is unused */
-__SYSCALL(224, sys_gettid)
-__SYSCALL(225, compat_sys_readahead_wrapper)
-__SYSCALL(226, sys_setxattr)
-__SYSCALL(227, sys_lsetxattr)
-__SYSCALL(228, sys_fsetxattr)
-__SYSCALL(229, sys_getxattr)
-__SYSCALL(230, sys_lgetxattr)
-__SYSCALL(231, sys_fgetxattr)
-__SYSCALL(232, sys_listxattr)
-__SYSCALL(233, sys_llistxattr)
-__SYSCALL(234, sys_flistxattr)
-__SYSCALL(235, sys_removexattr)
-__SYSCALL(236, sys_lremovexattr)
-__SYSCALL(237, sys_fremovexattr)
-__SYSCALL(238, sys_tkill)
-__SYSCALL(239, sys_sendfile64)
-__SYSCALL(240, compat_sys_futex)
-__SYSCALL(241, compat_sys_sched_setaffinity)
-__SYSCALL(242, compat_sys_sched_getaffinity)
-__SYSCALL(243, compat_sys_io_setup)
-__SYSCALL(244, sys_io_destroy)
-__SYSCALL(245, compat_sys_io_getevents)
-__SYSCALL(246, compat_sys_io_submit)
-__SYSCALL(247, sys_io_cancel)
-__SYSCALL(248, sys_exit_group)
-__SYSCALL(249, compat_sys_lookup_dcookie)
-__SYSCALL(250, sys_epoll_create)
-__SYSCALL(251, sys_epoll_ctl)
-__SYSCALL(252, sys_epoll_wait)
-__SYSCALL(253, sys_remap_file_pages)
-__SYSCALL(254, sys_ni_syscall)                 /* 254 for set_thread_area */
-__SYSCALL(255, sys_ni_syscall)                 /* 255 for get_thread_area */
-__SYSCALL(256, sys_set_tid_address)
-__SYSCALL(257, compat_sys_timer_create)
-__SYSCALL(258, compat_sys_timer_settime)
-__SYSCALL(259, compat_sys_timer_gettime)
-__SYSCALL(260, sys_timer_getoverrun)
-__SYSCALL(261, sys_timer_delete)
-__SYSCALL(262, compat_sys_clock_settime)
-__SYSCALL(263, compat_sys_clock_gettime)
-__SYSCALL(264, compat_sys_clock_getres)
-__SYSCALL(265, compat_sys_clock_nanosleep)
-__SYSCALL(266, compat_sys_statfs64_wrapper)
-__SYSCALL(267, compat_sys_fstatfs64_wrapper)
-__SYSCALL(268, sys_tgkill)
-__SYSCALL(269, compat_sys_utimes)
-__SYSCALL(270, compat_sys_fadvise64_64_wrapper)
-__SYSCALL(271, sys_pciconfig_iobase)
-__SYSCALL(272, sys_pciconfig_read)
-__SYSCALL(273, sys_pciconfig_write)
-__SYSCALL(274, compat_sys_mq_open)
-__SYSCALL(275, sys_mq_unlink)
-__SYSCALL(276, compat_sys_mq_timedsend)
-__SYSCALL(277, compat_sys_mq_timedreceive)
-__SYSCALL(278, compat_sys_mq_notify)
-__SYSCALL(279, compat_sys_mq_getsetattr)
-__SYSCALL(280, compat_sys_waitid)
-__SYSCALL(281, sys_socket)
-__SYSCALL(282, sys_bind)
-__SYSCALL(283, sys_connect)
-__SYSCALL(284, sys_listen)
-__SYSCALL(285, sys_accept)
-__SYSCALL(286, sys_getsockname)
-__SYSCALL(287, sys_getpeername)
-__SYSCALL(288, sys_socketpair)
-__SYSCALL(289, sys_send)
-__SYSCALL(290, sys_sendto)
-__SYSCALL(291, compat_sys_recv)
-__SYSCALL(292, compat_sys_recvfrom)
-__SYSCALL(293, sys_shutdown)
-__SYSCALL(294, compat_sys_setsockopt)
-__SYSCALL(295, compat_sys_getsockopt)
-__SYSCALL(296, compat_sys_sendmsg)
-__SYSCALL(297, compat_sys_recvmsg)
-__SYSCALL(298, sys_semop)
-__SYSCALL(299, sys_semget)
-__SYSCALL(300, compat_sys_semctl)
-__SYSCALL(301, compat_sys_msgsnd)
-__SYSCALL(302, compat_sys_msgrcv)
-__SYSCALL(303, sys_msgget)
-__SYSCALL(304, compat_sys_msgctl)
-__SYSCALL(305, compat_sys_shmat)
-__SYSCALL(306, sys_shmdt)
-__SYSCALL(307, sys_shmget)
-__SYSCALL(308, compat_sys_shmctl)
-__SYSCALL(309, sys_add_key)
-__SYSCALL(310, sys_request_key)
-__SYSCALL(311, compat_sys_keyctl)
-__SYSCALL(312, compat_sys_semtimedop)
-__SYSCALL(313, sys_ni_syscall)
-__SYSCALL(314, sys_ioprio_set)
-__SYSCALL(315, sys_ioprio_get)
-__SYSCALL(316, sys_inotify_init)
-__SYSCALL(317, sys_inotify_add_watch)
-__SYSCALL(318, sys_inotify_rm_watch)
-__SYSCALL(319, compat_sys_mbind)
-__SYSCALL(320, compat_sys_get_mempolicy)
-__SYSCALL(321, compat_sys_set_mempolicy)
-__SYSCALL(322, compat_sys_openat)
-__SYSCALL(323, sys_mkdirat)
-__SYSCALL(324, sys_mknodat)
-__SYSCALL(325, sys_fchownat)
-__SYSCALL(326, compat_sys_futimesat)
-__SYSCALL(327, sys_fstatat64)
-__SYSCALL(328, sys_unlinkat)
-__SYSCALL(329, sys_renameat)
-__SYSCALL(330, sys_linkat)
-__SYSCALL(331, sys_symlinkat)
-__SYSCALL(332, sys_readlinkat)
-__SYSCALL(333, sys_fchmodat)
-__SYSCALL(334, sys_faccessat)
-__SYSCALL(335, compat_sys_pselect6)
-__SYSCALL(336, compat_sys_ppoll)
-__SYSCALL(337, sys_unshare)
-__SYSCALL(338, compat_sys_set_robust_list)
-__SYSCALL(339, compat_sys_get_robust_list)
-__SYSCALL(340, sys_splice)
-__SYSCALL(341, compat_sys_sync_file_range2_wrapper)
-__SYSCALL(342, sys_tee)
-__SYSCALL(343, compat_sys_vmsplice)
-__SYSCALL(344, compat_sys_move_pages)
-__SYSCALL(345, sys_getcpu)
-__SYSCALL(346, compat_sys_epoll_pwait)
-__SYSCALL(347, compat_sys_kexec_load)
-__SYSCALL(348, compat_sys_utimensat)
-__SYSCALL(349, compat_sys_signalfd)
-__SYSCALL(350, sys_timerfd_create)
-__SYSCALL(351, sys_eventfd)
-__SYSCALL(352, compat_sys_fallocate_wrapper)
-__SYSCALL(353, compat_sys_timerfd_settime)
-__SYSCALL(354, compat_sys_timerfd_gettime)
-__SYSCALL(355, compat_sys_signalfd4)
-__SYSCALL(356, sys_eventfd2)
-__SYSCALL(357, sys_epoll_create1)
-__SYSCALL(358, sys_dup3)
-__SYSCALL(359, sys_pipe2)
-__SYSCALL(360, sys_inotify_init1)
-__SYSCALL(361, compat_sys_preadv)
-__SYSCALL(362, compat_sys_pwritev)
-__SYSCALL(363, compat_sys_rt_tgsigqueueinfo)
-__SYSCALL(364, sys_perf_event_open)
-__SYSCALL(365, compat_sys_recvmmsg)
-__SYSCALL(366, sys_accept4)
-__SYSCALL(367, sys_fanotify_init)
-__SYSCALL(368, compat_sys_fanotify_mark)
-__SYSCALL(369, sys_prlimit64)
-__SYSCALL(370, sys_name_to_handle_at)
-__SYSCALL(371, compat_sys_open_by_handle_at)
-__SYSCALL(372, compat_sys_clock_adjtime)
-__SYSCALL(373, sys_syncfs)
-__SYSCALL(374, compat_sys_sendmmsg)
-__SYSCALL(375, sys_setns)
-__SYSCALL(376, compat_sys_process_vm_readv)
-__SYSCALL(377, compat_sys_process_vm_writev)
-__SYSCALL(378, sys_kcmp)
-__SYSCALL(379, sys_finit_module)
-__SYSCALL(380, sys_sched_setattr)
-__SYSCALL(381, sys_sched_getattr)
-__SYSCALL(382, sys_renameat2)
-
-#define __NR_compat_syscalls           383
-
-/*
- * Compat syscall numbers used by the AArch64 kernel.
- */
-#define __NR_compat_restart_syscall    0
-#define __NR_compat_sigreturn          119
-#define __NR_compat_rt_sigreturn       173
-
-
-/*
- * The following SVCs are ARM private.
- */
-#define __ARM_NR_COMPAT_BASE           0x0f0000
-#define __ARM_NR_compat_cacheflush     (__ARM_NR_COMPAT_BASE+2)
-#define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE+5)
+#define __NR_restart_syscall 0
+__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
+#define __NR_exit 1
+__SYSCALL(__NR_exit, sys_exit)
+#define __NR_fork 2
+__SYSCALL(__NR_fork, sys_fork)
+#define __NR_read 3
+__SYSCALL(__NR_read, sys_read)
+#define __NR_write 4
+__SYSCALL(__NR_write, sys_write)
+#define __NR_open 5
+__SYSCALL(__NR_open, compat_sys_open)
+#define __NR_close 6
+__SYSCALL(__NR_close, sys_close)
+                       /* 7 was sys_waitpid */
+__SYSCALL(7, sys_ni_syscall)
+#define __NR_creat 8
+__SYSCALL(__NR_creat, sys_creat)
+#define __NR_link 9
+__SYSCALL(__NR_link, sys_link)
+#define __NR_unlink 10
+__SYSCALL(__NR_unlink, sys_unlink)
+#define __NR_execve 11
+__SYSCALL(__NR_execve, compat_sys_execve)
+#define __NR_chdir 12
+__SYSCALL(__NR_chdir, sys_chdir)
+                       /* 13 was sys_time */
+__SYSCALL(13, sys_ni_syscall)
+#define __NR_mknod 14
+__SYSCALL(__NR_mknod, sys_mknod)
+#define __NR_chmod 15
+__SYSCALL(__NR_chmod, sys_chmod)
+#define __NR_lchown 16
+__SYSCALL(__NR_lchown, sys_lchown16)
+                       /* 17 was sys_break */
+__SYSCALL(17, sys_ni_syscall)
+                       /* 18 was sys_stat */
+__SYSCALL(18, sys_ni_syscall)
+#define __NR_lseek 19
+__SYSCALL(__NR_lseek, compat_sys_lseek)
+#define __NR_getpid 20
+__SYSCALL(__NR_getpid, sys_getpid)
+#define __NR_mount 21
+__SYSCALL(__NR_mount, compat_sys_mount)
+                       /* 22 was sys_umount */
+__SYSCALL(22, sys_ni_syscall)
+#define __NR_setuid 23
+__SYSCALL(__NR_setuid, sys_setuid16)
+#define __NR_getuid 24
+__SYSCALL(__NR_getuid, sys_getuid16)
+                       /* 25 was sys_stime */
+__SYSCALL(25, sys_ni_syscall)
+#define __NR_ptrace 26
+__SYSCALL(__NR_ptrace, compat_sys_ptrace)
+                       /* 27 was sys_alarm */
+__SYSCALL(27, sys_ni_syscall)
+                       /* 28 was sys_fstat */
+__SYSCALL(28, sys_ni_syscall)
+#define __NR_pause 29
+__SYSCALL(__NR_pause, sys_pause)
+                       /* 30 was sys_utime */
+__SYSCALL(30, sys_ni_syscall)
+                       /* 31 was sys_stty */
+__SYSCALL(31, sys_ni_syscall)
+                       /* 32 was sys_gtty */
+__SYSCALL(32, sys_ni_syscall)
+#define __NR_access 33
+__SYSCALL(__NR_access, sys_access)
+#define __NR_nice 34
+__SYSCALL(__NR_nice, sys_nice)
+                       /* 35 was sys_ftime */
+__SYSCALL(35, sys_ni_syscall)
+#define __NR_sync 36
+__SYSCALL(__NR_sync, sys_sync)
+#define __NR_kill 37
+__SYSCALL(__NR_kill, sys_kill)
+#define __NR_rename 38
+__SYSCALL(__NR_rename, sys_rename)
+#define __NR_mkdir 39
+__SYSCALL(__NR_mkdir, sys_mkdir)
+#define __NR_rmdir 40
+__SYSCALL(__NR_rmdir, sys_rmdir)
+#define __NR_dup 41
+__SYSCALL(__NR_dup, sys_dup)
+#define __NR_pipe 42
+__SYSCALL(__NR_pipe, sys_pipe)
+#define __NR_times 43
+__SYSCALL(__NR_times, compat_sys_times)
+                       /* 44 was sys_prof */
+__SYSCALL(44, sys_ni_syscall)
+#define __NR_brk 45
+__SYSCALL(__NR_brk, sys_brk)
+#define __NR_setgid 46
+__SYSCALL(__NR_setgid, sys_setgid16)
+#define __NR_getgid 47
+__SYSCALL(__NR_getgid, sys_getgid16)
+                       /* 48 was sys_signal */
+__SYSCALL(48, sys_ni_syscall)
+#define __NR_geteuid 49
+__SYSCALL(__NR_geteuid, sys_geteuid16)
+#define __NR_getegid 50
+__SYSCALL(__NR_getegid, sys_getegid16)
+#define __NR_acct 51
+__SYSCALL(__NR_acct, sys_acct)
+#define __NR_umount2 52
+__SYSCALL(__NR_umount2, sys_umount)
+                       /* 53 was sys_lock */
+__SYSCALL(53, sys_ni_syscall)
+#define __NR_ioctl 54
+__SYSCALL(__NR_ioctl, compat_sys_ioctl)
+#define __NR_fcntl 55
+__SYSCALL(__NR_fcntl, compat_sys_fcntl)
+                       /* 56 was sys_mpx */
+__SYSCALL(56, sys_ni_syscall)
+#define __NR_setpgid 57
+__SYSCALL(__NR_setpgid, sys_setpgid)
+                       /* 58 was sys_ulimit */
+__SYSCALL(58, sys_ni_syscall)
+                       /* 59 was sys_olduname */
+__SYSCALL(59, sys_ni_syscall)
+#define __NR_umask 60
+__SYSCALL(__NR_umask, sys_umask)
+#define __NR_chroot 61
+__SYSCALL(__NR_chroot, sys_chroot)
+#define __NR_ustat 62
+__SYSCALL(__NR_ustat, compat_sys_ustat)
+#define __NR_dup2 63
+__SYSCALL(__NR_dup2, sys_dup2)
+#define __NR_getppid 64
+__SYSCALL(__NR_getppid, sys_getppid)
+#define __NR_getpgrp 65
+__SYSCALL(__NR_getpgrp, sys_getpgrp)
+#define __NR_setsid 66
+__SYSCALL(__NR_setsid, sys_setsid)
+#define __NR_sigaction 67
+__SYSCALL(__NR_sigaction, compat_sys_sigaction)
+                       /* 68 was sys_sgetmask */
+__SYSCALL(68, sys_ni_syscall)
+                       /* 69 was sys_ssetmask */
+__SYSCALL(69, sys_ni_syscall)
+#define __NR_setreuid 70
+__SYSCALL(__NR_setreuid, sys_setreuid16)
+#define __NR_setregid 71
+__SYSCALL(__NR_setregid, sys_setregid16)
+#define __NR_sigsuspend 72
+__SYSCALL(__NR_sigsuspend, sys_sigsuspend)
+#define __NR_sigpending 73
+__SYSCALL(__NR_sigpending, compat_sys_sigpending)
+#define __NR_sethostname 74
+__SYSCALL(__NR_sethostname, sys_sethostname)
+#define __NR_setrlimit 75
+__SYSCALL(__NR_setrlimit, compat_sys_setrlimit)
+                       /* 76 was compat_sys_getrlimit */
+__SYSCALL(76, sys_ni_syscall)
+#define __NR_getrusage 77
+__SYSCALL(__NR_getrusage, compat_sys_getrusage)
+#define __NR_gettimeofday 78
+__SYSCALL(__NR_gettimeofday, compat_sys_gettimeofday)
+#define __NR_settimeofday 79
+__SYSCALL(__NR_settimeofday, compat_sys_settimeofday)
+#define __NR_getgroups 80
+__SYSCALL(__NR_getgroups, sys_getgroups16)
+#define __NR_setgroups 81
+__SYSCALL(__NR_setgroups, sys_setgroups16)
+                       /* 82 was compat_sys_select */
+__SYSCALL(82, sys_ni_syscall)
+#define __NR_symlink 83
+__SYSCALL(__NR_symlink, sys_symlink)
+                       /* 84 was sys_lstat */
+__SYSCALL(84, sys_ni_syscall)
+#define __NR_readlink 85
+__SYSCALL(__NR_readlink, sys_readlink)
+#define __NR_uselib 86
+__SYSCALL(__NR_uselib, sys_uselib)
+#define __NR_swapon 87
+__SYSCALL(__NR_swapon, sys_swapon)
+#define __NR_reboot 88
+__SYSCALL(__NR_reboot, sys_reboot)
+                       /* 89 was sys_readdir */
+__SYSCALL(89, sys_ni_syscall)
+                       /* 90 was sys_mmap */
+__SYSCALL(90, sys_ni_syscall)
+#define __NR_munmap 91
+__SYSCALL(__NR_munmap, sys_munmap)
+#define __NR_truncate 92
+__SYSCALL(__NR_truncate, compat_sys_truncate)
+#define __NR_ftruncate 93
+__SYSCALL(__NR_ftruncate, compat_sys_ftruncate)
+#define __NR_fchmod 94
+__SYSCALL(__NR_fchmod, sys_fchmod)
+#define __NR_fchown 95
+__SYSCALL(__NR_fchown, sys_fchown16)
+#define __NR_getpriority 96
+__SYSCALL(__NR_getpriority, sys_getpriority)
+#define __NR_setpriority 97
+__SYSCALL(__NR_setpriority, sys_setpriority)
+                       /* 98 was sys_profil */
+__SYSCALL(98, sys_ni_syscall)
+#define __NR_statfs 99
+__SYSCALL(__NR_statfs, compat_sys_statfs)
+#define __NR_fstatfs 100
+__SYSCALL(__NR_fstatfs, compat_sys_fstatfs)
+                       /* 101 was sys_ioperm */
+__SYSCALL(101, sys_ni_syscall)
+                       /* 102 was sys_socketcall */
+__SYSCALL(102, sys_ni_syscall)
+#define __NR_syslog 103
+__SYSCALL(__NR_syslog, sys_syslog)
+#define __NR_setitimer 104
+__SYSCALL(__NR_setitimer, compat_sys_setitimer)
+#define __NR_getitimer 105
+__SYSCALL(__NR_getitimer, compat_sys_getitimer)
+#define __NR_stat 106
+__SYSCALL(__NR_stat, compat_sys_newstat)
+#define __NR_lstat 107
+__SYSCALL(__NR_lstat, compat_sys_newlstat)
+#define __NR_fstat 108
+__SYSCALL(__NR_fstat, compat_sys_newfstat)
+                       /* 109 was sys_uname */
+__SYSCALL(109, sys_ni_syscall)
+                       /* 110 was sys_iopl */
+__SYSCALL(110, sys_ni_syscall)
+#define __NR_vhangup 111
+__SYSCALL(__NR_vhangup, sys_vhangup)
+                       /* 112 was sys_idle */
+__SYSCALL(112, sys_ni_syscall)
+                       /* 113 was sys_syscall */
+__SYSCALL(113, sys_ni_syscall)
+#define __NR_wait4 114
+__SYSCALL(__NR_wait4, compat_sys_wait4)
+#define __NR_swapoff 115
+__SYSCALL(__NR_swapoff, sys_swapoff)
+#define __NR_sysinfo 116
+__SYSCALL(__NR_sysinfo, compat_sys_sysinfo)
+                       /* 117 was sys_ipc */
+__SYSCALL(117, sys_ni_syscall)
+#define __NR_fsync 118
+__SYSCALL(__NR_fsync, sys_fsync)
+#define __NR_sigreturn 119
+__SYSCALL(__NR_sigreturn, compat_sys_sigreturn_wrapper)
+#define __NR_clone 120
+__SYSCALL(__NR_clone, sys_clone)
+#define __NR_setdomainname 121
+__SYSCALL(__NR_setdomainname, sys_setdomainname)
+#define __NR_uname 122
+__SYSCALL(__NR_uname, sys_newuname)
+                       /* 123 was sys_modify_ldt */
+__SYSCALL(123, sys_ni_syscall)
+#define __NR_adjtimex 124
+__SYSCALL(__NR_adjtimex, compat_sys_adjtimex)
+#define __NR_mprotect 125
+__SYSCALL(__NR_mprotect, sys_mprotect)
+#define __NR_sigprocmask 126
+__SYSCALL(__NR_sigprocmask, compat_sys_sigprocmask)
+                       /* 127 was sys_create_module */
+__SYSCALL(127, sys_ni_syscall)
+#define __NR_init_module 128
+__SYSCALL(__NR_init_module, sys_init_module)
+#define __NR_delete_module 129
+__SYSCALL(__NR_delete_module, sys_delete_module)
+                       /* 130 was sys_get_kernel_syms */
+__SYSCALL(130, sys_ni_syscall)
+#define __NR_quotactl 131
+__SYSCALL(__NR_quotactl, sys_quotactl)
+#define __NR_getpgid 132
+__SYSCALL(__NR_getpgid, sys_getpgid)
+#define __NR_fchdir 133
+__SYSCALL(__NR_fchdir, sys_fchdir)
+#define __NR_bdflush 134
+__SYSCALL(__NR_bdflush, sys_bdflush)
+#define __NR_sysfs 135
+__SYSCALL(__NR_sysfs, sys_sysfs)
+#define __NR_personality 136
+__SYSCALL(__NR_personality, sys_personality)
+                       /* 137 was sys_afs_syscall */
+__SYSCALL(137, sys_ni_syscall)
+#define __NR_setfsuid 138
+__SYSCALL(__NR_setfsuid, sys_setfsuid16)
+#define __NR_setfsgid 139
+__SYSCALL(__NR_setfsgid, sys_setfsgid16)
+#define __NR__llseek 140
+__SYSCALL(__NR__llseek, sys_llseek)
+#define __NR_getdents 141
+__SYSCALL(__NR_getdents, compat_sys_getdents)
+#define __NR__newselect 142
+__SYSCALL(__NR__newselect, compat_sys_select)
+#define __NR_flock 143
+__SYSCALL(__NR_flock, sys_flock)
+#define __NR_msync 144
+__SYSCALL(__NR_msync, sys_msync)
+#define __NR_readv 145
+__SYSCALL(__NR_readv, compat_sys_readv)
+#define __NR_writev 146
+__SYSCALL(__NR_writev, compat_sys_writev)
+#define __NR_getsid 147
+__SYSCALL(__NR_getsid, sys_getsid)
+#define __NR_fdatasync 148
+__SYSCALL(__NR_fdatasync, sys_fdatasync)
+#define __NR__sysctl 149
+__SYSCALL(__NR__sysctl, compat_sys_sysctl)
+#define __NR_mlock 150
+__SYSCALL(__NR_mlock, sys_mlock)
+#define __NR_munlock 151
+__SYSCALL(__NR_munlock, sys_munlock)
+#define __NR_mlockall 152
+__SYSCALL(__NR_mlockall, sys_mlockall)
+#define __NR_munlockall 153
+__SYSCALL(__NR_munlockall, sys_munlockall)
+#define __NR_sched_setparam 154
+__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
+#define __NR_sched_getparam 155
+__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
+#define __NR_sched_setscheduler 156
+__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
+#define __NR_sched_getscheduler 157
+__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
+#define __NR_sched_yield 158
+__SYSCALL(__NR_sched_yield, sys_sched_yield)
+#define __NR_sched_get_priority_max 159
+__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
+#define __NR_sched_get_priority_min 160
+__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+#define __NR_sched_rr_get_interval 161
+__SYSCALL(__NR_sched_rr_get_interval, compat_sys_sched_rr_get_interval)
+#define __NR_nanosleep 162
+__SYSCALL(__NR_nanosleep, compat_sys_nanosleep)
+#define __NR_mremap 163
+__SYSCALL(__NR_mremap, sys_mremap)
+#define __NR_setresuid 164
+__SYSCALL(__NR_setresuid, sys_setresuid16)
+#define __NR_getresuid 165
+__SYSCALL(__NR_getresuid, sys_getresuid16)
+                       /* 166 was sys_vm86 */
+__SYSCALL(166, sys_ni_syscall)
+                       /* 167 was sys_query_module */
+__SYSCALL(167, sys_ni_syscall)
+#define __NR_poll 168
+__SYSCALL(__NR_poll, sys_poll)
+#define __NR_nfsservctl 169
+__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
+#define __NR_setresgid 170
+__SYSCALL(__NR_setresgid, sys_setresgid16)
+#define __NR_getresgid 171
+__SYSCALL(__NR_getresgid, sys_getresgid16)
+#define __NR_prctl 172
+__SYSCALL(__NR_prctl, sys_prctl)
+#define __NR_rt_sigreturn 173
+__SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn_wrapper)
+#define __NR_rt_sigaction 174
+__SYSCALL(__NR_rt_sigaction, compat_sys_rt_sigaction)
+#define __NR_rt_sigprocmask 175
+__SYSCALL(__NR_rt_sigprocmask, compat_sys_rt_sigprocmask)
+#define __NR_rt_sigpending 176
+__SYSCALL(__NR_rt_sigpending, compat_sys_rt_sigpending)
+#define __NR_rt_sigtimedwait 177
+__SYSCALL(__NR_rt_sigtimedwait, compat_sys_rt_sigtimedwait)
+#define __NR_rt_sigqueueinfo 178
+__SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo)
+#define __NR_rt_sigsuspend 179
+__SYSCALL(__NR_rt_sigsuspend, compat_sys_rt_sigsuspend)
+#define __NR_pread64 180
+__SYSCALL(__NR_pread64, compat_sys_pread64_wrapper)
+#define __NR_pwrite64 181
+__SYSCALL(__NR_pwrite64, compat_sys_pwrite64_wrapper)
+#define __NR_chown 182
+__SYSCALL(__NR_chown, sys_chown16)
+#define __NR_getcwd 183
+__SYSCALL(__NR_getcwd, sys_getcwd)
+#define __NR_capget 184
+__SYSCALL(__NR_capget, sys_capget)
+#define __NR_capset 185
+__SYSCALL(__NR_capset, sys_capset)
+#define __NR_sigaltstack 186
+__SYSCALL(__NR_sigaltstack, compat_sys_sigaltstack)
+#define __NR_sendfile 187
+__SYSCALL(__NR_sendfile, compat_sys_sendfile)
+                       /* 188 reserved */
+__SYSCALL(188, sys_ni_syscall)
+                       /* 189 reserved */
+__SYSCALL(189, sys_ni_syscall)
+#define __NR_vfork 190
+__SYSCALL(__NR_vfork, sys_vfork)
+#define __NR_ugetrlimit 191    /* SuS compliant getrlimit */
+__SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit)               /* SuS compliant getrlimit */
+#define __NR_mmap2 192
+__SYSCALL(__NR_mmap2, sys_mmap_pgoff)
+#define __NR_truncate64 193
+__SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper)
+#define __NR_ftruncate64 194
+__SYSCALL(__NR_ftruncate64, compat_sys_ftruncate64_wrapper)
+#define __NR_stat64 195
+__SYSCALL(__NR_stat64, sys_stat64)
+#define __NR_lstat64 196
+__SYSCALL(__NR_lstat64, sys_lstat64)
+#define __NR_fstat64 197
+__SYSCALL(__NR_fstat64, sys_fstat64)
+#define __NR_lchown32 198
+__SYSCALL(__NR_lchown32, sys_lchown)
+#define __NR_getuid32 199
+__SYSCALL(__NR_getuid32, sys_getuid)
+#define __NR_getgid32 200
+__SYSCALL(__NR_getgid32, sys_getgid)
+#define __NR_geteuid32 201
+__SYSCALL(__NR_geteuid32, sys_geteuid)
+#define __NR_getegid32 202
+__SYSCALL(__NR_getegid32, sys_getegid)
+#define __NR_setreuid32 203
+__SYSCALL(__NR_setreuid32, sys_setreuid)
+#define __NR_setregid32 204
+__SYSCALL(__NR_setregid32, sys_setregid)
+#define __NR_getgroups32 205
+__SYSCALL(__NR_getgroups32, sys_getgroups)
+#define __NR_setgroups32 206
+__SYSCALL(__NR_setgroups32, sys_setgroups)
+#define __NR_fchown32 207
+__SYSCALL(__NR_fchown32, sys_fchown)
+#define __NR_setresuid32 208
+__SYSCALL(__NR_setresuid32, sys_setresuid)
+#define __NR_getresuid32 209
+__SYSCALL(__NR_getresuid32, sys_getresuid)
+#define __NR_setresgid32 210
+__SYSCALL(__NR_setresgid32, sys_setresgid)
+#define __NR_getresgid32 211
+__SYSCALL(__NR_getresgid32, sys_getresgid)
+#define __NR_chown32 212
+__SYSCALL(__NR_chown32, sys_chown)
+#define __NR_setuid32 213
+__SYSCALL(__NR_setuid32, sys_setuid)
+#define __NR_setgid32 214
+__SYSCALL(__NR_setgid32, sys_setgid)
+#define __NR_setfsuid32 215
+__SYSCALL(__NR_setfsuid32, sys_setfsuid)
+#define __NR_setfsgid32 216
+__SYSCALL(__NR_setfsgid32, sys_setfsgid)
+#define __NR_getdents64 217
+__SYSCALL(__NR_getdents64, compat_sys_getdents64)
+#define __NR_pivot_root 218
+__SYSCALL(__NR_pivot_root, sys_pivot_root)
+#define __NR_mincore 219
+__SYSCALL(__NR_mincore, sys_mincore)
+#define __NR_madvise 220
+__SYSCALL(__NR_madvise, sys_madvise)
+#define __NR_fcntl64 221
+__SYSCALL(__NR_fcntl64, compat_sys_fcntl64)
+                       /* 222 for tux */
+__SYSCALL(222, sys_ni_syscall)
+                       /* 223 is unused */
+__SYSCALL(223, sys_ni_syscall)
+#define __NR_gettid 224
+__SYSCALL(__NR_gettid, sys_gettid)
+#define __NR_readahead 225
+__SYSCALL(__NR_readahead, compat_sys_readahead_wrapper)
+#define __NR_setxattr 226
+__SYSCALL(__NR_setxattr, sys_setxattr)
+#define __NR_lsetxattr 227
+__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
+#define __NR_fsetxattr 228
+__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
+#define __NR_getxattr 229
+__SYSCALL(__NR_getxattr, sys_getxattr)
+#define __NR_lgetxattr 230
+__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
+#define __NR_fgetxattr 231
+__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
+#define __NR_listxattr 232
+__SYSCALL(__NR_listxattr, sys_listxattr)
+#define __NR_llistxattr 233
+__SYSCALL(__NR_llistxattr, sys_llistxattr)
+#define __NR_flistxattr 234
+__SYSCALL(__NR_flistxattr, sys_flistxattr)
+#define __NR_removexattr 235
+__SYSCALL(__NR_removexattr, sys_removexattr)
+#define __NR_lremovexattr 236
+__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
+#define __NR_fremovexattr 237
+__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
+#define __NR_tkill 238
+__SYSCALL(__NR_tkill, sys_tkill)
+#define __NR_sendfile64 239
+__SYSCALL(__NR_sendfile64, sys_sendfile64)
+#define __NR_futex 240
+__SYSCALL(__NR_futex, compat_sys_futex)
+#define __NR_sched_setaffinity 241
+__SYSCALL(__NR_sched_setaffinity, compat_sys_sched_setaffinity)
+#define __NR_sched_getaffinity 242
+__SYSCALL(__NR_sched_getaffinity, compat_sys_sched_getaffinity)
+#define __NR_io_setup 243
+__SYSCALL(__NR_io_setup, compat_sys_io_setup)
+#define __NR_io_destroy 244
+__SYSCALL(__NR_io_destroy, sys_io_destroy)
+#define __NR_io_getevents 245
+__SYSCALL(__NR_io_getevents, compat_sys_io_getevents)
+#define __NR_io_submit 246
+__SYSCALL(__NR_io_submit, compat_sys_io_submit)
+#define __NR_io_cancel 247
+__SYSCALL(__NR_io_cancel, sys_io_cancel)
+#define __NR_exit_group 248
+__SYSCALL(__NR_exit_group, sys_exit_group)
+#define __NR_lookup_dcookie 249
+__SYSCALL(__NR_lookup_dcookie, compat_sys_lookup_dcookie)
+#define __NR_epoll_create 250
+__SYSCALL(__NR_epoll_create, sys_epoll_create)
+#define __NR_epoll_ctl 251
+__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
+#define __NR_epoll_wait 252
+__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
+#define __NR_remap_file_pages 253
+__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
+                       /* 254 for set_thread_area */
+__SYSCALL(254, sys_ni_syscall)
+                       /* 255 for get_thread_area */
+__SYSCALL(255, sys_ni_syscall)
+#define __NR_set_tid_address 256
+__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
+#define __NR_timer_create 257
+__SYSCALL(__NR_timer_create, compat_sys_timer_create)
+#define __NR_timer_settime 258
+__SYSCALL(__NR_timer_settime, compat_sys_timer_settime)
+#define __NR_timer_gettime 259
+__SYSCALL(__NR_timer_gettime, compat_sys_timer_gettime)
+#define __NR_timer_getoverrun 260
+__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+#define __NR_timer_delete 261
+__SYSCALL(__NR_timer_delete, sys_timer_delete)
+#define __NR_clock_settime 262
+__SYSCALL(__NR_clock_settime, compat_sys_clock_settime)
+#define __NR_clock_gettime 263
+__SYSCALL(__NR_clock_gettime, compat_sys_clock_gettime)
+#define __NR_clock_getres 264
+__SYSCALL(__NR_clock_getres, compat_sys_clock_getres)
+#define __NR_clock_nanosleep 265
+__SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep)
+#define __NR_statfs64 266
+__SYSCALL(__NR_statfs64, compat_sys_statfs64_wrapper)
+#define __NR_fstatfs64 267
+__SYSCALL(__NR_fstatfs64, compat_sys_fstatfs64_wrapper)
+#define __NR_tgkill 268
+__SYSCALL(__NR_tgkill, sys_tgkill)
+#define __NR_utimes 269
+__SYSCALL(__NR_utimes, compat_sys_utimes)
+#define __NR_arm_fadvise64_64 270
+__SYSCALL(__NR_arm_fadvise64_64, compat_sys_fadvise64_64_wrapper)
+#define __NR_pciconfig_iobase 271
+__SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase)
+#define __NR_pciconfig_read 272
+__SYSCALL(__NR_pciconfig_read, sys_pciconfig_read)
+#define __NR_pciconfig_write 273
+__SYSCALL(__NR_pciconfig_write, sys_pciconfig_write)
+#define __NR_mq_open 274
+__SYSCALL(__NR_mq_open, compat_sys_mq_open)
+#define __NR_mq_unlink 275
+__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+#define __NR_mq_timedsend 276
+__SYSCALL(__NR_mq_timedsend, compat_sys_mq_timedsend)
+#define __NR_mq_timedreceive 277
+__SYSCALL(__NR_mq_timedreceive, compat_sys_mq_timedreceive)
+#define __NR_mq_notify 278
+__SYSCALL(__NR_mq_notify, compat_sys_mq_notify)
+#define __NR_mq_getsetattr 279
+__SYSCALL(__NR_mq_getsetattr, compat_sys_mq_getsetattr)
+#define __NR_waitid 280
+__SYSCALL(__NR_waitid, compat_sys_waitid)
+#define __NR_socket 281
+__SYSCALL(__NR_socket, sys_socket)
+#define __NR_bind 282
+__SYSCALL(__NR_bind, sys_bind)
+#define __NR_connect 283
+__SYSCALL(__NR_connect, sys_connect)
+#define __NR_listen 284
+__SYSCALL(__NR_listen, sys_listen)
+#define __NR_accept 285
+__SYSCALL(__NR_accept, sys_accept)
+#define __NR_getsockname 286
+__SYSCALL(__NR_getsockname, sys_getsockname)
+#define __NR_getpeername 287
+__SYSCALL(__NR_getpeername, sys_getpeername)
+#define __NR_socketpair 288
+__SYSCALL(__NR_socketpair, sys_socketpair)
+#define __NR_send 289
+__SYSCALL(__NR_send, sys_send)
+#define __NR_sendto 290
+__SYSCALL(__NR_sendto, sys_sendto)
+#define __NR_recv 291
+__SYSCALL(__NR_recv, compat_sys_recv)
+#define __NR_recvfrom 292
+__SYSCALL(__NR_recvfrom, compat_sys_recvfrom)
+#define __NR_shutdown 293
+__SYSCALL(__NR_shutdown, sys_shutdown)
+#define __NR_setsockopt 294
+__SYSCALL(__NR_setsockopt, compat_sys_setsockopt)
+#define __NR_getsockopt 295
+__SYSCALL(__NR_getsockopt, compat_sys_getsockopt)
+#define __NR_sendmsg 296
+__SYSCALL(__NR_sendmsg, compat_sys_sendmsg)
+#define __NR_recvmsg 297
+__SYSCALL(__NR_recvmsg, compat_sys_recvmsg)
+#define __NR_semop 298
+__SYSCALL(__NR_semop, sys_semop)
+#define __NR_semget 299
+__SYSCALL(__NR_semget, sys_semget)
+#define __NR_semctl 300
+__SYSCALL(__NR_semctl, compat_sys_semctl)
+#define __NR_msgsnd 301
+__SYSCALL(__NR_msgsnd, compat_sys_msgsnd)
+#define __NR_msgrcv 302
+__SYSCALL(__NR_msgrcv, compat_sys_msgrcv)
+#define __NR_msgget 303
+__SYSCALL(__NR_msgget, sys_msgget)
+#define __NR_msgctl 304
+__SYSCALL(__NR_msgctl, compat_sys_msgctl)
+#define __NR_shmat 305
+__SYSCALL(__NR_shmat, compat_sys_shmat)
+#define __NR_shmdt 306
+__SYSCALL(__NR_shmdt, sys_shmdt)
+#define __NR_shmget 307
+__SYSCALL(__NR_shmget, sys_shmget)
+#define __NR_shmctl 308
+__SYSCALL(__NR_shmctl, compat_sys_shmctl)
+#define __NR_add_key 309
+__SYSCALL(__NR_add_key, sys_add_key)
+#define __NR_request_key 310
+__SYSCALL(__NR_request_key, sys_request_key)
+#define __NR_keyctl 311
+__SYSCALL(__NR_keyctl, compat_sys_keyctl)
+#define __NR_semtimedop 312
+__SYSCALL(__NR_semtimedop, compat_sys_semtimedop)
+#define __NR_vserver 313
+__SYSCALL(__NR_vserver, sys_ni_syscall)
+#define __NR_ioprio_set 314
+__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
+#define __NR_ioprio_get 315
+__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
+#define __NR_inotify_init 316
+__SYSCALL(__NR_inotify_init, sys_inotify_init)
+#define __NR_inotify_add_watch 317
+__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
+#define __NR_inotify_rm_watch 318
+__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
+#define __NR_mbind 319
+__SYSCALL(__NR_mbind, compat_sys_mbind)
+#define __NR_get_mempolicy 320
+__SYSCALL(__NR_get_mempolicy, compat_sys_get_mempolicy)
+#define __NR_set_mempolicy 321
+__SYSCALL(__NR_set_mempolicy, compat_sys_set_mempolicy)
+#define __NR_openat 322
+__SYSCALL(__NR_openat, compat_sys_openat)
+#define __NR_mkdirat 323
+__SYSCALL(__NR_mkdirat, sys_mkdirat)
+#define __NR_mknodat 324
+__SYSCALL(__NR_mknodat, sys_mknodat)
+#define __NR_fchownat 325
+__SYSCALL(__NR_fchownat, sys_fchownat)
+#define __NR_futimesat 326
+__SYSCALL(__NR_futimesat, compat_sys_futimesat)
+#define __NR_fstatat64 327
+__SYSCALL(__NR_fstatat64, sys_fstatat64)
+#define __NR_unlinkat 328
+__SYSCALL(__NR_unlinkat, sys_unlinkat)
+#define __NR_renameat 329
+__SYSCALL(__NR_renameat, sys_renameat)
+#define __NR_linkat 330
+__SYSCALL(__NR_linkat, sys_linkat)
+#define __NR_symlinkat 331
+__SYSCALL(__NR_symlinkat, sys_symlinkat)
+#define __NR_readlinkat 332
+__SYSCALL(__NR_readlinkat, sys_readlinkat)
+#define __NR_fchmodat 333
+__SYSCALL(__NR_fchmodat, sys_fchmodat)
+#define __NR_faccessat 334
+__SYSCALL(__NR_faccessat, sys_faccessat)
+#define __NR_pselect6 335
+__SYSCALL(__NR_pselect6, compat_sys_pselect6)
+#define __NR_ppoll 336
+__SYSCALL(__NR_ppoll, compat_sys_ppoll)
+#define __NR_unshare 337
+__SYSCALL(__NR_unshare, sys_unshare)
+#define __NR_set_robust_list 338
+__SYSCALL(__NR_set_robust_list, compat_sys_set_robust_list)
+#define __NR_get_robust_list 339
+__SYSCALL(__NR_get_robust_list, compat_sys_get_robust_list)
+#define __NR_splice 340
+__SYSCALL(__NR_splice, sys_splice)
+#define __NR_sync_file_range2 341
+__SYSCALL(__NR_sync_file_range2, compat_sys_sync_file_range2_wrapper)
+#define __NR_tee 342
+__SYSCALL(__NR_tee, sys_tee)
+#define __NR_vmsplice 343
+__SYSCALL(__NR_vmsplice, compat_sys_vmsplice)
+#define __NR_move_pages 344
+__SYSCALL(__NR_move_pages, compat_sys_move_pages)
+#define __NR_getcpu 345
+__SYSCALL(__NR_getcpu, sys_getcpu)
+#define __NR_epoll_pwait 346
+__SYSCALL(__NR_epoll_pwait, compat_sys_epoll_pwait)
+#define __NR_kexec_load 347
+__SYSCALL(__NR_kexec_load, compat_sys_kexec_load)
+#define __NR_utimensat 348
+__SYSCALL(__NR_utimensat, compat_sys_utimensat)
+#define __NR_signalfd 349
+__SYSCALL(__NR_signalfd, compat_sys_signalfd)
+#define __NR_timerfd_create 350
+__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+#define __NR_eventfd 351
+__SYSCALL(__NR_eventfd, sys_eventfd)
+#define __NR_fallocate 352
+__SYSCALL(__NR_fallocate, compat_sys_fallocate_wrapper)
+#define __NR_timerfd_settime 353
+__SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime)
+#define __NR_timerfd_gettime 354
+__SYSCALL(__NR_timerfd_gettime, compat_sys_timerfd_gettime)
+#define __NR_signalfd4 355
+__SYSCALL(__NR_signalfd4, compat_sys_signalfd4)
+#define __NR_eventfd2 356
+__SYSCALL(__NR_eventfd2, sys_eventfd2)
+#define __NR_epoll_create1 357
+__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
+#define __NR_dup3 358
+__SYSCALL(__NR_dup3, sys_dup3)
+#define __NR_pipe2 359
+__SYSCALL(__NR_pipe2, sys_pipe2)
+#define __NR_inotify_init1 360
+__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
+#define __NR_preadv 361
+__SYSCALL(__NR_preadv, compat_sys_preadv)
+#define __NR_pwritev 362
+__SYSCALL(__NR_pwritev, compat_sys_pwritev)
+#define __NR_rt_tgsigqueueinfo 363
+__SYSCALL(__NR_rt_tgsigqueueinfo, compat_sys_rt_tgsigqueueinfo)
+#define __NR_perf_event_open 364
+__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
+#define __NR_recvmmsg 365
+__SYSCALL(__NR_recvmmsg, compat_sys_recvmmsg)
+#define __NR_accept4 366
+__SYSCALL(__NR_accept4, sys_accept4)
+#define __NR_fanotify_init 367
+__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
+#define __NR_fanotify_mark 368
+__SYSCALL(__NR_fanotify_mark, compat_sys_fanotify_mark)
+#define __NR_prlimit64 369
+__SYSCALL(__NR_prlimit64, sys_prlimit64)
+#define __NR_name_to_handle_at 370
+__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
+#define __NR_open_by_handle_at 371
+__SYSCALL(__NR_open_by_handle_at, compat_sys_open_by_handle_at)
+#define __NR_clock_adjtime 372
+__SYSCALL(__NR_clock_adjtime, compat_sys_clock_adjtime)
+#define __NR_syncfs 373
+__SYSCALL(__NR_syncfs, sys_syncfs)
+#define __NR_sendmmsg 374
+__SYSCALL(__NR_sendmmsg, compat_sys_sendmmsg)
+#define __NR_setns 375
+__SYSCALL(__NR_setns, sys_setns)
+#define __NR_process_vm_readv 376
+__SYSCALL(__NR_process_vm_readv, compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 377
+__SYSCALL(__NR_process_vm_writev, compat_sys_process_vm_writev)
+#define __NR_kcmp 378
+__SYSCALL(__NR_kcmp, sys_kcmp)
+#define __NR_finit_module 379
+__SYSCALL(__NR_finit_module, sys_finit_module)
+#define __NR_sched_setattr 380
+__SYSCALL(__NR_sched_setattr, sys_sched_setattr)
+#define __NR_sched_getattr 381
+__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
+#define __NR_renameat2 382
+__SYSCALL(__NR_renameat2, sys_renameat2)
diff --git a/arch/arm64/include/uapi/asm/posix_types.h b/arch/arm64/include/uapi/asm/posix_types.h
new file mode 100644 (file)
index 0000000..7985ff6
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef __ASM_POSIX_TYPES_H
+#define __ASM_POSIX_TYPES_H
+
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
+#define __kernel_old_uid_t __kernel_old_uid_t
+
+#include <asm-generic/posix_types.h>
+
+#endif /*  __ASM_POSIX_TYPES_H */
index b72cf40..ee469be 100644 (file)
@@ -58,7 +58,7 @@ struct fpsimd_context {
 
 struct esr_context {
        struct _aarch64_ctx head;
-       u64 esr;
+       __u64 esr;
 };
 
 #endif /* _UAPI__ASM_SIGCONTEXT_H */
index cdaedad..27c72ef 100644 (file)
@@ -15,7 +15,8 @@ CFLAGS_REMOVE_return_address.o = -pg
 arm64-obj-y            := cputable.o debug-monitors.o entry.o irq.o fpsimd.o   \
                           entry-fpsimd.o process.o ptrace.o setup.o signal.o   \
                           sys.o stacktrace.o time.o traps.o io.o vdso.o        \
-                          hyp-stub.o psci.o cpu_ops.o insn.o return_address.o
+                          hyp-stub.o psci.o cpu_ops.o insn.o return_address.o  \
+                          cpuinfo.o
 
 arm64-obj-$(CONFIG_COMPAT)             += sys32.o kuser32.o signal32.o         \
                                           sys_compat.o
index d62d12f..cce9524 100644 (file)
@@ -30,8 +30,8 @@ const struct cpu_operations *cpu_ops[NR_CPUS];
 static const struct cpu_operations *supported_cpu_ops[] __initconst = {
 #ifdef CONFIG_SMP
        &smp_spin_table_ops,
-       &cpu_psci_ops,
 #endif
+       &cpu_psci_ops,
        NULL,
 };
 
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
new file mode 100644 (file)
index 0000000..f798f66
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+ * Record and handle CPU attributes.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <asm/arch_timer.h>
+#include <asm/cachetype.h>
+#include <asm/cpu.h>
+#include <asm/cputype.h>
+
+#include <linux/bitops.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/smp.h>
+
+/*
+ * In case the boot CPU is hotpluggable, we record its initial state and
+ * current state separately. Certain system registers may contain different
+ * values depending on configuration at or after reset.
+ */
+DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
+static struct cpuinfo_arm64 boot_cpu_data;
+
+static char *icache_policy_str[] = {
+       [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
+       [ICACHE_POLICY_AIVIVT] = "AIVIVT",
+       [ICACHE_POLICY_VIPT] = "VIPT",
+       [ICACHE_POLICY_PIPT] = "PIPT",
+};
+
+unsigned long __icache_flags;
+
+static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
+{
+       unsigned int cpu = smp_processor_id();
+       u32 l1ip = CTR_L1IP(info->reg_ctr);
+
+       if (l1ip != ICACHE_POLICY_PIPT)
+               set_bit(ICACHEF_ALIASING, &__icache_flags);
+       if (l1ip == ICACHE_POLICY_AIVIVT);
+               set_bit(ICACHEF_AIVIVT, &__icache_flags);
+
+       pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
+}
+
+static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu)
+{
+       if ((boot & mask) == (cur & mask))
+               return 0;
+
+       pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016lx, CPU%d: %#016lx\n",
+               name, (unsigned long)boot, cpu, (unsigned long)cur);
+
+       return 1;
+}
+
+#define CHECK_MASK(field, mask, boot, cur, cpu) \
+       check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu)
+
+#define CHECK(field, boot, cur, cpu) \
+       CHECK_MASK(field, ~0ULL, boot, cur, cpu)
+
+/*
+ * Verify that CPUs don't have unexpected differences that will cause problems.
+ */
+static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
+{
+       unsigned int cpu = smp_processor_id();
+       struct cpuinfo_arm64 *boot = &boot_cpu_data;
+       unsigned int diff = 0;
+
+       /*
+        * The kernel can handle differing I-cache policies, but otherwise
+        * caches should look identical. Userspace JITs will make use of
+        * *minLine.
+        */
+       diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu);
+
+       /*
+        * Userspace may perform DC ZVA instructions. Mismatched block sizes
+        * could result in too much or too little memory being zeroed if a
+        * process is preempted and migrated between CPUs.
+        */
+       diff |= CHECK(dczid, boot, cur, cpu);
+
+       /* If different, timekeeping will be broken (especially with KVM) */
+       diff |= CHECK(cntfrq, boot, cur, cpu);
+
+       /*
+        * Even in big.LITTLE, processors should be identical instruction-set
+        * wise.
+        */
+       diff |= CHECK(id_aa64isar0, boot, cur, cpu);
+       diff |= CHECK(id_aa64isar1, boot, cur, cpu);
+
+       /*
+        * Differing PARange support is fine as long as all peripherals and
+        * memory are mapped within the minimum PARange of all CPUs.
+        * Linux should not care about secure memory.
+        * ID_AA64MMFR1 is currently RES0.
+        */
+       diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu);
+       diff |= CHECK(id_aa64mmfr1, boot, cur, cpu);
+
+       /*
+        * EL3 is not our concern.
+        * ID_AA64PFR1 is currently RES0.
+        */
+       diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu);
+       diff |= CHECK(id_aa64pfr1, boot, cur, cpu);
+
+       /*
+        * If we have AArch32, we care about 32-bit features for compat. These
+        * registers should be RES0 otherwise.
+        */
+       diff |= CHECK(id_isar0, boot, cur, cpu);
+       diff |= CHECK(id_isar1, boot, cur, cpu);
+       diff |= CHECK(id_isar2, boot, cur, cpu);
+       diff |= CHECK(id_isar3, boot, cur, cpu);
+       diff |= CHECK(id_isar4, boot, cur, cpu);
+       diff |= CHECK(id_isar5, boot, cur, cpu);
+       diff |= CHECK(id_mmfr0, boot, cur, cpu);
+       diff |= CHECK(id_mmfr1, boot, cur, cpu);
+       diff |= CHECK(id_mmfr2, boot, cur, cpu);
+       diff |= CHECK(id_mmfr3, boot, cur, cpu);
+       diff |= CHECK(id_pfr0, boot, cur, cpu);
+       diff |= CHECK(id_pfr1, boot, cur, cpu);
+
+       /*
+        * Mismatched CPU features are a recipe for disaster. Don't even
+        * pretend to support them.
+        */
+       WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC,
+                       "Unsupported CPU feature variation.");
+}
+
+static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
+{
+       info->reg_cntfrq = arch_timer_get_cntfrq();
+       info->reg_ctr = read_cpuid_cachetype();
+       info->reg_dczid = read_cpuid(DCZID_EL0);
+       info->reg_midr = read_cpuid_id();
+
+       info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
+       info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
+       info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
+       info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
+       info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
+       info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
+
+       info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
+       info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
+       info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
+       info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
+       info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
+       info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
+       info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
+       info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
+       info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
+       info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
+       info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
+       info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+
+       cpuinfo_detect_icache_policy(info);
+}
+
+void cpuinfo_store_cpu(void)
+{
+       struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
+       __cpuinfo_store_cpu(info);
+       cpuinfo_sanity_check(info);
+}
+
+void __init cpuinfo_store_boot_cpu(void)
+{
+       struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
+       __cpuinfo_store_cpu(info);
+
+       boot_cpu_data = *info;
+}
index a7fb874..fe5b940 100644 (file)
@@ -315,20 +315,20 @@ static int brk_handler(unsigned long addr, unsigned int esr,
 {
        siginfo_t info;
 
-       if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
-               return 0;
+       if (user_mode(regs)) {
+               info = (siginfo_t) {
+                       .si_signo = SIGTRAP,
+                       .si_errno = 0,
+                       .si_code  = TRAP_BRKPT,
+                       .si_addr  = (void __user *)instruction_pointer(regs),
+               };
 
-       if (!user_mode(regs))
+               force_sig_info(SIGTRAP, &info, current);
+       } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
+               pr_warning("Unexpected kernel BRK exception at EL1\n");
                return -EFAULT;
+       }
 
-       info = (siginfo_t) {
-               .si_signo = SIGTRAP,
-               .si_errno = 0,
-               .si_code  = TRAP_BRKPT,
-               .si_addr  = (void __user *)instruction_pointer(regs),
-       };
-
-       force_sig_info(SIGTRAP, &info, current);
        return 0;
 }
 
index 66716c9..619b1dd 100644 (file)
@@ -78,8 +78,7 @@ ENTRY(efi_stub_entry)
 
        /* Turn off Dcache and MMU */
        mrs     x0, CurrentEL
-       cmp     x0, #PSR_MODE_EL2t
-       ccmp    x0, #PSR_MODE_EL2h, #0x4, ne
+       cmp     x0, #CurrentEL_EL2
        b.ne    1f
        mrs     x0, sctlr_el2
        bic     x0, x0, #1 << 0 // clear SCTLR.M
index 60e98a6..e786e6c 100644 (file)
@@ -12,8 +12,6 @@
 #include <linux/efi.h>
 #include <linux/libfdt.h>
 #include <asm/sections.h>
-#include <generated/compile.h>
-#include <generated/utsrelease.h>
 
 /*
  * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from
index d358cca..c44a82f 100644 (file)
@@ -52,7 +52,7 @@ ENDPROC(fpsimd_load_state)
 ENTRY(fpsimd_save_partial_state)
        fpsimd_save_partial x0, 1, 8, 9
        ret
-ENDPROC(fpsimd_load_partial_state)
+ENDPROC(fpsimd_save_partial_state)
 
 /*
  * Load the bottom n FP registers.
index b051871..38e704e 100644 (file)
  *     - ftrace_graph_caller to set up an exit hook
  */
 ENTRY(_mcount)
-#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
-       ldr     x0, =ftrace_trace_stop
-       ldr     x0, [x0]                // if ftrace_trace_stop
-       ret                             //   return;
-#endif
        mcount_enter
 
        ldr     x0, =ftrace_trace_function
@@ -205,7 +200,7 @@ ENDPROC(ftrace_graph_caller)
  *
  * Run ftrace_return_to_handler() before going back to parent.
  * @fp is checked against the value passed by ftrace_graph_caller()
- * only when CONFIG_FUNCTION_GRAPH_FP_TEST is enabled.
+ * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
  */
 ENTRY(return_to_handler)
        str     x0, [sp, #-16]!
index bf017f4..f0b5e51 100644 (file)
 #include <asm/esr.h>
 #include <asm/thread_info.h>
 #include <asm/unistd.h>
-#include <asm/unistd32.h>
+
+/*
+ * Context tracking subsystem.  Used to instrument transitions
+ * between user and kernel mode.
+ */
+       .macro ct_user_exit, syscall = 0
+#ifdef CONFIG_CONTEXT_TRACKING
+       bl      context_tracking_user_exit
+       .if \syscall == 1
+       /*
+        * Save/restore needed during syscalls.  Restore syscall arguments from
+        * the values already saved on stack during kernel_entry.
+        */
+       ldp     x0, x1, [sp]
+       ldp     x2, x3, [sp, #S_X2]
+       ldp     x4, x5, [sp, #S_X4]
+       ldp     x6, x7, [sp, #S_X6]
+       .endif
+#endif
+       .endm
+
+       .macro ct_user_enter
+#ifdef CONFIG_CONTEXT_TRACKING
+       bl      context_tracking_user_enter
+#endif
+       .endm
 
 /*
  * Bad Abort numbers
        .macro  kernel_exit, el, ret = 0
        ldp     x21, x22, [sp, #S_PC]           // load ELR, SPSR
        .if     \el == 0
+       ct_user_enter
        ldr     x23, [sp, #S_SP]                // load return stack pointer
        .endif
        .if     \ret
@@ -279,7 +305,6 @@ el1_sp_pc:
         */
        mrs     x0, far_el1
        enable_dbg
-       mov     x1, x25
        mov     x2, sp
        b       do_sp_pc_abort
 el1_undef:
@@ -354,7 +379,6 @@ el0_sync:
        lsr     x24, x25, #ESR_EL1_EC_SHIFT     // exception class
        cmp     x24, #ESR_EL1_EC_SVC64          // SVC in 64-bit state
        b.eq    el0_svc
-       adr     lr, ret_to_user
        cmp     x24, #ESR_EL1_EC_DABT_EL0       // data abort in EL0
        b.eq    el0_da
        cmp     x24, #ESR_EL1_EC_IABT_EL0       // instruction abort in EL0
@@ -383,7 +407,6 @@ el0_sync_compat:
        lsr     x24, x25, #ESR_EL1_EC_SHIFT     // exception class
        cmp     x24, #ESR_EL1_EC_SVC32          // SVC in 32-bit state
        b.eq    el0_svc_compat
-       adr     lr, ret_to_user
        cmp     x24, #ESR_EL1_EC_DABT_EL0       // data abort in EL0
        b.eq    el0_da
        cmp     x24, #ESR_EL1_EC_IABT_EL0       // instruction abort in EL0
@@ -426,48 +449,59 @@ el0_da:
        /*
         * Data abort handling
         */
-       mrs     x0, far_el1
-       bic     x0, x0, #(0xff << 56)
+       mrs     x26, far_el1
        // enable interrupts before calling the main handler
        enable_dbg_and_irq
+       ct_user_exit
+       bic     x0, x26, #(0xff << 56)
        mov     x1, x25
        mov     x2, sp
+       adr     lr, ret_to_user
        b       do_mem_abort
 el0_ia:
        /*
         * Instruction abort handling
         */
-       mrs     x0, far_el1
+       mrs     x26, far_el1
        // enable interrupts before calling the main handler
        enable_dbg_and_irq
+       ct_user_exit
+       mov     x0, x26
        orr     x1, x25, #1 << 24               // use reserved ISS bit for instruction aborts
        mov     x2, sp
+       adr     lr, ret_to_user
        b       do_mem_abort
 el0_fpsimd_acc:
        /*
         * Floating Point or Advanced SIMD access
         */
        enable_dbg
+       ct_user_exit
        mov     x0, x25
        mov     x1, sp
+       adr     lr, ret_to_user
        b       do_fpsimd_acc
 el0_fpsimd_exc:
        /*
         * Floating Point or Advanced SIMD exception
         */
        enable_dbg
+       ct_user_exit
        mov     x0, x25
        mov     x1, sp
+       adr     lr, ret_to_user
        b       do_fpsimd_exc
 el0_sp_pc:
        /*
         * Stack or PC alignment exception handling
         */
-       mrs     x0, far_el1
+       mrs     x26, far_el1
        // enable interrupts before calling the main handler
        enable_dbg_and_irq
+       mov     x0, x26
        mov     x1, x25
        mov     x2, sp
+       adr     lr, ret_to_user
        b       do_sp_pc_abort
 el0_undef:
        /*
@@ -475,7 +509,9 @@ el0_undef:
         */
        // enable interrupts before calling the main handler
        enable_dbg_and_irq
+       ct_user_exit
        mov     x0, sp
+       adr     lr, ret_to_user
        b       do_undefinstr
 el0_dbg:
        /*
@@ -487,12 +523,15 @@ el0_dbg:
        mov     x2, sp
        bl      do_debug_exception
        enable_dbg
+       ct_user_exit
        b       ret_to_user
 el0_inv:
        enable_dbg
+       ct_user_exit
        mov     x0, sp
        mov     x1, #BAD_SYNC
        mrs     x2, esr_el1
+       adr     lr, ret_to_user
        b       bad_mode
 ENDPROC(el0_sync)
 
@@ -505,6 +544,7 @@ el0_irq_naked:
        bl      trace_hardirqs_off
 #endif
 
+       ct_user_exit
        irq_handler
 
 #ifdef CONFIG_TRACE_IRQFLAGS
@@ -609,6 +649,7 @@ el0_svc:
 el0_svc_naked:                                 // compat entry point
        stp     x0, scno, [sp, #S_ORIG_X0]      // save the original x0 and syscall number
        enable_dbg_and_irq
+       ct_user_exit 1
 
        ldr     x16, [tsk, #TI_FLAGS]           // check for syscall hooks
        tst     x16, #_TIF_SYSCALL_WORK
index a96d3a6..144f105 100644 (file)
@@ -22,6 +22,7 @@
 
 #include <linux/linkage.h>
 #include <linux/init.h>
+#include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/assembler.h>
 #include <asm/ptrace.h>
 #include <asm/page.h>
 #include <asm/virt.h>
 
-/*
- * swapper_pg_dir is the virtual address of the initial page table. We place
- * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has
- * 2 pages and is placed below swapper_pg_dir.
- */
 #define KERNEL_RAM_VADDR       (PAGE_OFFSET + TEXT_OFFSET)
 
-#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000
-#error KERNEL_RAM_VADDR must start at 0xXXX80000
+#if (TEXT_OFFSET & 0xf) != 0
+#error TEXT_OFFSET must be at least 16B aligned
+#elif (PAGE_OFFSET & 0xfffff) != 0
+#error PAGE_OFFSET must be at least 2MB aligned
+#elif TEXT_OFFSET > 0xfffff
+#error TEXT_OFFSET must be less than 2MB
 #endif
 
-#define SWAPPER_DIR_SIZE       (3 * PAGE_SIZE)
-#define IDMAP_DIR_SIZE         (2 * PAGE_SIZE)
-
-       .globl  swapper_pg_dir
-       .equ    swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE
-
-       .globl  idmap_pg_dir
-       .equ    idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE
-
-       .macro  pgtbl, ttb0, ttb1, phys
-       add     \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE
-       sub     \ttb0, \ttb1, #IDMAP_DIR_SIZE
+       .macro  pgtbl, ttb0, ttb1, virt_to_phys
+       ldr     \ttb1, =swapper_pg_dir
+       ldr     \ttb0, =idmap_pg_dir
+       add     \ttb1, \ttb1, \virt_to_phys
+       add     \ttb0, \ttb0, \virt_to_phys
        .endm
 
 #ifdef CONFIG_ARM64_64K_PAGES
 #define BLOCK_SHIFT    PAGE_SHIFT
 #define BLOCK_SIZE     PAGE_SIZE
+#define TABLE_SHIFT    PMD_SHIFT
 #else
 #define BLOCK_SHIFT    SECTION_SHIFT
 #define BLOCK_SIZE     SECTION_SIZE
+#define TABLE_SHIFT    PUD_SHIFT
 #endif
 
 #define KERNEL_START   KERNEL_RAM_VADDR
@@ -120,9 +115,9 @@ efi_head:
        b       stext                           // branch to kernel start, magic
        .long   0                               // reserved
 #endif
-       .quad   TEXT_OFFSET                     // Image load offset from start of RAM
-       .quad   0                               // reserved
-       .quad   0                               // reserved
+       .quad   _kernel_offset_le               // Image load offset from start of RAM, little-endian
+       .quad   _kernel_size_le                 // Effective size of kernel image, little-endian
+       .quad   _kernel_flags_le                // Informative flags, little-endian
        .quad   0                               // reserved
        .quad   0                               // reserved
        .quad   0                               // reserved
@@ -270,8 +265,7 @@ ENDPROC(stext)
  */
 ENTRY(el2_setup)
        mrs     x0, CurrentEL
-       cmp     x0, #PSR_MODE_EL2t
-       ccmp    x0, #PSR_MODE_EL2h, #0x4, ne
+       cmp     x0, #CurrentEL_EL2
        b.ne    1f
        mrs     x0, sctlr_el2
 CPU_BE(        orr     x0, x0, #(1 << 25)      )       // Set the EE bit for EL2
@@ -296,6 +290,23 @@ CPU_LE(    bic     x0, x0, #(3 << 24)      )       // Clear the EE and E0E bits for EL1
        msr     cnthctl_el2, x0
        msr     cntvoff_el2, xzr                // Clear virtual offset
 
+#ifdef CONFIG_ARM_GIC_V3
+       /* GICv3 system register access */
+       mrs     x0, id_aa64pfr0_el1
+       ubfx    x0, x0, #24, #4
+       cmp     x0, #1
+       b.ne    3f
+
+       mrs_s   x0, ICC_SRE_EL2
+       orr     x0, x0, #ICC_SRE_EL2_SRE        // Set ICC_SRE_EL2.SRE==1
+       orr     x0, x0, #ICC_SRE_EL2_ENABLE     // Set ICC_SRE_EL2.Enable==1
+       msr_s   ICC_SRE_EL2, x0
+       isb                                     // Make sure SRE is now set
+       msr_s   ICH_HCR_EL2, xzr                // Reset ICC_HCR_EL2 to defaults
+
+3:
+#endif
+
        /* Populate ID registers. */
        mrs     x0, midr_el1
        mrs     x1, mpidr_el1
@@ -414,7 +425,7 @@ ENTRY(secondary_startup)
        mov     x23, x0                         // x23=current cpu_table
        cbz     x23, __error_p                  // invalid processor (x23=0)?
 
-       pgtbl   x25, x26, x24                   // x25=TTBR0, x26=TTBR1
+       pgtbl   x25, x26, x28                   // x25=TTBR0, x26=TTBR1
        ldr     x12, [x23, #CPU_INFO_SETUP]
        add     x12, x12, x28                   // __virt_to_phys
        blr     x12                             // initialise processor
@@ -456,8 +467,13 @@ ENDPROC(__enable_mmu)
  *  x27 = *virtual* address to jump to upon completion
  *
  * other registers depend on the function called upon completion
+ *
+ * We align the entire function to the smallest power of two larger than it to
+ * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET
+ * close to the end of a 512MB or 1GB block we might require an additional
+ * table to map the entire function.
  */
-       .align  6
+       .align  4
 __turn_mmu_on:
        msr     sctlr_el1, x0
        isb
@@ -480,17 +496,38 @@ ENDPROC(__calc_phys_offset)
        .quad   PAGE_OFFSET
 
 /*
- * Macro to populate the PGD for the corresponding block entry in the next
- * level (tbl) for the given virtual address.
+ * Macro to create a table entry to the next page.
+ *
+ *     tbl:    page table address
+ *     virt:   virtual address
+ *     shift:  #imm page table shift
+ *     ptrs:   #imm pointers per table page
+ *
+ * Preserves:  virt
+ * Corrupts:   tmp1, tmp2
+ * Returns:    tbl -> next level table page address
+ */
+       .macro  create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
+       lsr     \tmp1, \virt, #\shift
+       and     \tmp1, \tmp1, #\ptrs - 1        // table index
+       add     \tmp2, \tbl, #PAGE_SIZE
+       orr     \tmp2, \tmp2, #PMD_TYPE_TABLE   // address of next table and entry type
+       str     \tmp2, [\tbl, \tmp1, lsl #3]
+       add     \tbl, \tbl, #PAGE_SIZE          // next level table page
+       .endm
+
+/*
+ * Macro to populate the PGD (and possibily PUD) for the corresponding
+ * block entry in the next level (tbl) for the given virtual address.
  *
- * Preserves:  pgd, tbl, virt
+ * Preserves:  tbl, next, virt
  * Corrupts:   tmp1, tmp2
  */
-       .macro  create_pgd_entry, pgd, tbl, virt, tmp1, tmp2
-       lsr     \tmp1, \virt, #PGDIR_SHIFT
-       and     \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index
-       orr     \tmp2, \tbl, #3                 // PGD entry table type
-       str     \tmp2, [\pgd, \tmp1, lsl #3]
+       .macro  create_pgd_entry, tbl, virt, tmp1, tmp2
+       create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
+#if SWAPPER_PGTABLE_LEVELS == 3
+       create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
+#endif
        .endm
 
 /*
@@ -523,7 +560,7 @@ ENDPROC(__calc_phys_offset)
  *   - pgd entry for fixed mappings (TTBR1)
  */
 __create_page_tables:
-       pgtbl   x25, x26, x24                   // idmap_pg_dir and swapper_pg_dir addresses
+       pgtbl   x25, x26, x28                   // idmap_pg_dir and swapper_pg_dir addresses
        mov     x27, lr
 
        /*
@@ -551,10 +588,10 @@ __create_page_tables:
        /*
         * Create the identity mapping.
         */
-       add     x0, x25, #PAGE_SIZE             // section table address
+       mov     x0, x25                         // idmap_pg_dir
        ldr     x3, =KERNEL_START
        add     x3, x3, x28                     // __pa(KERNEL_START)
-       create_pgd_entry x25, x0, x3, x5, x6
+       create_pgd_entry x0, x3, x5, x6
        ldr     x6, =KERNEL_END
        mov     x5, x3                          // __pa(KERNEL_START)
        add     x6, x6, x28                     // __pa(KERNEL_END)
@@ -563,9 +600,9 @@ __create_page_tables:
        /*
         * Map the kernel image (starting with PHYS_OFFSET).
         */
-       add     x0, x26, #PAGE_SIZE             // section table address
+       mov     x0, x26                         // swapper_pg_dir
        mov     x5, #PAGE_OFFSET
-       create_pgd_entry x26, x0, x5, x3, x6
+       create_pgd_entry x0, x5, x3, x6
        ldr     x6, =KERNEL_END
        mov     x3, x24                         // phys offset
        create_block_map x0, x7, x3, x5, x6
@@ -586,13 +623,6 @@ __create_page_tables:
        sub     x6, x6, #1                      // inclusive range
        create_block_map x0, x7, x3, x5, x6
 1:
-       /*
-        * Create the pgd entry for the fixed mappings.
-        */
-       ldr     x5, =FIXADDR_TOP                // Fixed mapping virtual address
-       add     x0, x26, #2 * PAGE_SIZE         // section table address
-       create_pgd_entry x26, x0, x5, x6, x7
-
        /*
         * Since the page tables have been populated with non-cacheable
         * accesses (MMU disabled), invalidate the idmap and swapper page
@@ -612,7 +642,7 @@ ENDPROC(__create_page_tables)
 __switch_data:
        .quad   __mmap_switched
        .quad   __bss_start                     // x6
-       .quad   _end                            // x7
+       .quad   __bss_stop                      // x7
        .quad   processor_id                    // x4
        .quad   __fdt_pointer                   // x5
        .quad   memstart_addr                   // x6
index 0959611..a272f33 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <linux/init.h>
 #include <linux/linkage.h>
+#include <linux/irqchip/arm-gic-v3.h>
 
 #include <asm/assembler.h>
 #include <asm/ptrace.h>
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
new file mode 100644 (file)
index 0000000..8fae075
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Linker script macros to generate Image header fields.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_IMAGE_H
+#define __ASM_IMAGE_H
+
+#ifndef LINKER_SCRIPT
+#error This file should only be included in vmlinux.lds.S
+#endif
+
+/*
+ * There aren't any ELF relocations we can use to endian-swap values known only
+ * at link time (e.g. the subtraction of two symbol addresses), so we must get
+ * the linker to endian-swap certain values before emitting them.
+ */
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define DATA_LE64(data)                                        \
+       ((((data) & 0x00000000000000ff) << 56) |        \
+        (((data) & 0x000000000000ff00) << 40) |        \
+        (((data) & 0x0000000000ff0000) << 24) |        \
+        (((data) & 0x00000000ff000000) << 8)  |        \
+        (((data) & 0x000000ff00000000) >> 8)  |        \
+        (((data) & 0x0000ff0000000000) >> 24) |        \
+        (((data) & 0x00ff000000000000) >> 40) |        \
+        (((data) & 0xff00000000000000) >> 56))
+#else
+#define DATA_LE64(data) ((data) & 0xffffffffffffffff)
+#endif
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#define __HEAD_FLAG_BE 1
+#else
+#define __HEAD_FLAG_BE 0
+#endif
+
+#define __HEAD_FLAGS   (__HEAD_FLAG_BE << 0)
+
+/*
+ * These will output as part of the Image header, which should be little-endian
+ * regardless of the endianness of the kernel. While constant values could be
+ * endian swapped in head.S, all are done here for consistency.
+ */
+#define HEAD_SYMBOLS                                           \
+       _kernel_size_le         = DATA_LE64(_end - _text);      \
+       _kernel_offset_le       = DATA_LE64(TEXT_OFFSET);       \
+       _kernel_flags_le        = DATA_LE64(__HEAD_FLAGS);
+
+#endif /* __ASM_IMAGE_H */
index 7787208..997e6b2 100644 (file)
@@ -28,7 +28,7 @@
  * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
  */
 
-#include <asm/unistd32.h>
+#include <asm/unistd.h>
 
        .align  5
        .globl  __kuser_helper_start
index 43b7c34..1309d64 100644 (file)
 #include <asm/processor.h>
 #include <asm/stacktrace.h>
 
+#ifdef CONFIG_CC_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
 static void setup_restart(void)
 {
        /*
index 9e9798f..5539547 100644 (file)
@@ -235,7 +235,7 @@ static void psci_sys_poweroff(void)
  * PSCI Function IDs for v0.2+ are well defined so use
  * standard values.
  */
-static int psci_0_2_init(struct device_node *np)
+static int __init psci_0_2_init(struct device_node *np)
 {
        int err, ver;
 
@@ -296,7 +296,7 @@ out_put_node:
 /*
  * PSCI < v0.2 get PSCI Function IDs via DT.
  */
-static int psci_0_1_init(struct device_node *np)
+static int __init psci_0_1_init(struct device_node *np)
 {
        u32 id;
        int err;
@@ -434,9 +434,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
        return 0;
 }
 #endif
+#endif
 
 const struct cpu_operations cpu_psci_ops = {
        .name           = "psci",
+#ifdef CONFIG_SMP
        .cpu_init       = cpu_psci_cpu_init,
        .cpu_prepare    = cpu_psci_cpu_prepare,
        .cpu_boot       = cpu_psci_cpu_boot,
@@ -445,6 +447,6 @@ const struct cpu_operations cpu_psci_ops = {
        .cpu_die        = cpu_psci_cpu_die,
        .cpu_kill       = cpu_psci_cpu_kill,
 #endif
+#endif
 };
 
-#endif
index 3e926b9..0310811 100644 (file)
@@ -19,6 +19,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/audit.h>
 #include <linux/compat.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -39,6 +40,7 @@
 #include <asm/compat.h>
 #include <asm/debug-monitors.h>
 #include <asm/pgtable.h>
+#include <asm/syscall.h>
 #include <asm/traps.h>
 #include <asm/system_misc.h>
 
@@ -655,11 +657,16 @@ static int compat_gpr_get(struct task_struct *target,
                        reg = task_pt_regs(target)->regs[idx];
                }
 
-               ret = copy_to_user(ubuf, &reg, sizeof(reg));
-               if (ret)
-                       break;
+               if (kbuf) {
+                       memcpy(kbuf, &reg, sizeof(reg));
+                       kbuf += sizeof(reg);
+               } else {
+                       ret = copy_to_user(ubuf, &reg, sizeof(reg));
+                       if (ret)
+                               break;
 
-               ubuf += sizeof(reg);
+                       ubuf += sizeof(reg);
+               }
        }
 
        return ret;
@@ -689,11 +696,16 @@ static int compat_gpr_set(struct task_struct *target,
                unsigned int idx = start + i;
                compat_ulong_t reg;
 
-               ret = copy_from_user(&reg, ubuf, sizeof(reg));
-               if (ret)
-                       return ret;
+               if (kbuf) {
+                       memcpy(&reg, kbuf, sizeof(reg));
+                       kbuf += sizeof(reg);
+               } else {
+                       ret = copy_from_user(&reg, ubuf, sizeof(reg));
+                       if (ret)
+                               return ret;
 
-               ubuf += sizeof(reg);
+                       ubuf += sizeof(reg);
+               }
 
                switch (idx) {
                case 15:
@@ -827,6 +839,7 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
                                    compat_ulong_t val)
 {
        int ret;
+       mm_segment_t old_fs = get_fs();
 
        if (off & 3 || off >= COMPAT_USER_SZ)
                return -EIO;
@@ -834,10 +847,13 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
        if (off >= sizeof(compat_elf_gregset_t))
                return 0;
 
+       set_fs(KERNEL_DS);
        ret = copy_regset_from_user(tsk, &user_aarch32_view,
                                    REGSET_COMPAT_GPR, off,
                                    sizeof(compat_ulong_t),
                                    &val);
+       set_fs(old_fs);
+
        return ret;
 }
 
@@ -1099,11 +1115,20 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
        if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
                trace_sys_enter(regs, regs->syscallno);
 
+#ifdef CONFIG_AUDITSYSCALL
+       audit_syscall_entry(syscall_get_arch(), regs->syscallno,
+               regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]);
+#endif
+
        return regs->syscallno;
 }
 
 asmlinkage void syscall_trace_exit(struct pt_regs *regs)
 {
+#ifdef CONFIG_AUDITSYSCALL
+       audit_syscall_exit(regs);
+#endif
+
        if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
                trace_sys_exit(regs, regs_return_value(regs));
 
index 46d1125..f6f0ccf 100644 (file)
@@ -45,6 +45,7 @@
 #include <linux/efi.h>
 
 #include <asm/fixmap.h>
+#include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/elf.h>
 #include <asm/cputable.h>
@@ -77,7 +78,6 @@ unsigned int compat_elf_hwcap2 __read_mostly;
 #endif
 
 static const char *cpu_name;
-static const char *machine_name;
 phys_addr_t __fdt_pointer __initdata;
 
 /*
@@ -219,6 +219,8 @@ static void __init setup_processor(void)
        sprintf(init_utsname()->machine, ELF_PLATFORM);
        elf_hwcap = 0;
 
+       cpuinfo_store_boot_cpu();
+
        /*
         * Check for sane CTR_EL0.CWG value.
         */
@@ -307,8 +309,6 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys)
                while (true)
                        cpu_relax();
        }
-
-       machine_name = of_flat_dt_get_machine_name();
 }
 
 /*
@@ -417,14 +417,12 @@ static int __init arm64_device_init(void)
 }
 arch_initcall_sync(arm64_device_init);
 
-static DEFINE_PER_CPU(struct cpu, cpu_data);
-
 static int __init topology_init(void)
 {
        int i;
 
        for_each_possible_cpu(i) {
-               struct cpu *cpu = &per_cpu(cpu_data, i);
+               struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
                cpu->hotpluggable = 1;
                register_cpu(cpu, i);
        }
@@ -449,10 +447,21 @@ static int c_show(struct seq_file *m, void *v)
 {
        int i;
 
-       seq_printf(m, "Processor\t: %s rev %d (%s)\n",
-                  cpu_name, read_cpuid_id() & 15, ELF_PLATFORM);
+       /*
+        * Dump out the common processor features in a single line. Userspace
+        * should read the hwcaps with getauxval(AT_HWCAP) rather than
+        * attempting to parse this.
+        */
+       seq_puts(m, "features\t:");
+       for (i = 0; hwcap_str[i]; i++)
+               if (elf_hwcap & (1 << i))
+                       seq_printf(m, " %s", hwcap_str[i]);
+       seq_puts(m, "\n\n");
 
        for_each_online_cpu(i) {
+               struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
+               u32 midr = cpuinfo->reg_midr;
+
                /*
                 * glibc reads /proc/cpuinfo to determine the number of
                 * online processors, looking for lines beginning with
@@ -461,25 +470,13 @@ static int c_show(struct seq_file *m, void *v)
 #ifdef CONFIG_SMP
                seq_printf(m, "processor\t: %d\n", i);
 #endif
+               seq_printf(m, "implementer\t: 0x%02x\n",
+                          MIDR_IMPLEMENTOR(midr));
+               seq_printf(m, "variant\t\t: 0x%x\n", MIDR_VARIANT(midr));
+               seq_printf(m, "partnum\t\t: 0x%03x\n", MIDR_PARTNUM(midr));
+               seq_printf(m, "revision\t: 0x%x\n\n", MIDR_REVISION(midr));
        }
 
-       /* dump out the processor features */
-       seq_puts(m, "Features\t: ");
-
-       for (i = 0; hwcap_str[i]; i++)
-               if (elf_hwcap & (1 << i))
-                       seq_printf(m, "%s ", hwcap_str[i]);
-
-       seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
-       seq_printf(m, "CPU architecture: AArch64\n");
-       seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15);
-       seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff);
-       seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
-
-       seq_puts(m, "\n");
-
-       seq_printf(m, "Hardware\t: %s\n", machine_name);
-
        return 0;
 }
 
index 3491c63..c5ee208 100644 (file)
@@ -27,7 +27,7 @@
 #include <asm/fpsimd.h>
 #include <asm/signal32.h>
 #include <asm/uaccess.h>
-#include <asm/unistd32.h>
+#include <asm/unistd.h>
 
 struct compat_sigcontext {
        /* We always set these two fields to 0 */
index 40f38f4..3e2f5eb 100644 (file)
@@ -39,6 +39,7 @@
 
 #include <asm/atomic.h>
 #include <asm/cacheflush.h>
+#include <asm/cpu.h>
 #include <asm/cputype.h>
 #include <asm/cpu_ops.h>
 #include <asm/mmu_context.h>
@@ -154,6 +155,11 @@ asmlinkage void secondary_start_kernel(void)
        if (cpu_ops[cpu]->cpu_postboot)
                cpu_ops[cpu]->cpu_postboot();
 
+       /*
+        * Log the CPU info before it is marked online and might get read.
+        */
+       cpuinfo_store_cpu();
+
        /*
         * Enable GIC and timers.
         */
index 1fa9ce4..55a99b9 100644 (file)
@@ -119,7 +119,7 @@ int cpu_suspend(unsigned long arg)
 extern struct sleep_save_sp sleep_save_sp;
 extern phys_addr_t sleep_idmap_phys;
 
-static int cpu_suspend_init(void)
+static int __init cpu_suspend_init(void)
 {
        void *ctx_ptr;
 
index 26e9c4e..de2b022 100644 (file)
@@ -26,7 +26,7 @@
 #include <linux/uaccess.h>
 
 #include <asm/cacheflush.h>
-#include <asm/unistd32.h>
+#include <asm/unistd.h>
 
 static inline void
 do_compat_cache_op(unsigned long start, unsigned long end, int flags)
index 43514f9..b6ee26b 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/sched.h>
 
+#include <asm/cputype.h>
 #include <asm/topology.h>
 
 static int __init get_cpu_for_node(struct device_node *node)
@@ -188,13 +189,9 @@ static int __init parse_dt_topology(void)
         * Check that all cores are in the topology; the SMP code will
         * only mark cores described in the DT as possible.
         */
-       for_each_possible_cpu(cpu) {
-               if (cpu_topology[cpu].cluster_id == -1) {
-                       pr_err("CPU%d: No topology information specified\n",
-                              cpu);
+       for_each_possible_cpu(cpu)
+               if (cpu_topology[cpu].cluster_id == -1)
                        ret = -EINVAL;
-               }
-       }
 
 out_map:
        of_node_put(map);
@@ -219,14 +216,6 @@ static void update_siblings_masks(unsigned int cpuid)
        struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
        int cpu;
 
-       if (cpuid_topo->cluster_id == -1) {
-               /*
-                * DT does not contain topology information for this cpu.
-                */
-               pr_debug("CPU%u: No topology information configured\n", cpuid);
-               return;
-       }
-
        /* update core and thread sibling masks */
        for_each_possible_cpu(cpu) {
                cpu_topo = &cpu_topology[cpu];
@@ -249,6 +238,36 @@ static void update_siblings_masks(unsigned int cpuid)
 
 void store_cpu_topology(unsigned int cpuid)
 {
+       struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
+       u64 mpidr;
+
+       if (cpuid_topo->cluster_id != -1)
+               goto topology_populated;
+
+       mpidr = read_cpuid_mpidr();
+
+       /* Uniprocessor systems can rely on default topology values */
+       if (mpidr & MPIDR_UP_BITMASK)
+               return;
+
+       /* Create cpu topology mapping based on MPIDR. */
+       if (mpidr & MPIDR_MT_BITMASK) {
+               /* Multiprocessor system : Multi-threads per core */
+               cpuid_topo->thread_id  = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+               cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+               cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2);
+       } else {
+               /* Multiprocessor system : Single-thread per core */
+               cpuid_topo->thread_id  = -1;
+               cpuid_topo->core_id    = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+               cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1);
+       }
+
+       pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
+                cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id,
+                cpuid_topo->thread_id, mpidr);
+
+topology_populated:
        update_siblings_masks(cpuid);
 }
 
index c43cfa9..02cd3f0 100644 (file)
@@ -156,7 +156,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
                frame.pc = thread_saved_pc(tsk);
        }
 
-       printk("Call trace:\n");
+       pr_emerg("Call trace:\n");
        while (1) {
                unsigned long where = frame.pc;
                int ret;
@@ -331,17 +331,22 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
 
 void __pte_error(const char *file, int line, unsigned long val)
 {
-       printk("%s:%d: bad pte %016lx.\n", file, line, val);
+       pr_crit("%s:%d: bad pte %016lx.\n", file, line, val);
 }
 
 void __pmd_error(const char *file, int line, unsigned long val)
 {
-       printk("%s:%d: bad pmd %016lx.\n", file, line, val);
+       pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val);
+}
+
+void __pud_error(const char *file, int line, unsigned long val)
+{
+       pr_crit("%s:%d: bad pud %016lx.\n", file, line, val);
 }
 
 void __pgd_error(const char *file, int line, unsigned long val)
 {
-       printk("%s:%d: bad pgd %016lx.\n", file, line, val);
+       pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
 }
 
 void __init trap_init(void)
index 50384fe..24f2e8c 100644 (file)
@@ -88,22 +88,29 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp)
 {
        struct mm_struct *mm = current->mm;
        unsigned long addr = AARCH32_VECTORS_BASE;
-       int ret;
+       static struct vm_special_mapping spec = {
+               .name   = "[vectors]",
+               .pages  = vectors_page,
+
+       };
+       void *ret;
 
        down_write(&mm->mmap_sem);
        current->mm->context.vdso = (void *)addr;
 
        /* Map vectors page at the high address. */
-       ret = install_special_mapping(mm, addr, PAGE_SIZE,
-                                     VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
-                                     vectors_page);
+       ret = _install_special_mapping(mm, addr, PAGE_SIZE,
+                                      VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC,
+                                      &spec);
 
        up_write(&mm->mmap_sem);
 
-       return ret;
+       return PTR_ERR_OR_ZERO(ret);
 }
 #endif /* CONFIG_COMPAT */
 
+static struct vm_special_mapping vdso_spec[2];
+
 static int __init vdso_init(void)
 {
        int i;
@@ -114,8 +121,8 @@ static int __init vdso_init(void)
        }
 
        vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
-       pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n",
-               vdso_pages + 1, vdso_pages, 1L, &vdso_start);
+       pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n",
+               vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data);
 
        /* Allocate the vDSO pagelist, plus a page for the data. */
        vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
@@ -123,12 +130,23 @@ static int __init vdso_init(void)
        if (vdso_pagelist == NULL)
                return -ENOMEM;
 
+       /* Grab the vDSO data page. */
+       vdso_pagelist[0] = virt_to_page(vdso_data);
+
        /* Grab the vDSO code pages. */
        for (i = 0; i < vdso_pages; i++)
-               vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE);
+               vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE);
 
-       /* Grab the vDSO data page. */
-       vdso_pagelist[i] = virt_to_page(vdso_data);
+       /* Populate the special mapping structures */
+       vdso_spec[0] = (struct vm_special_mapping) {
+               .name   = "[vvar]",
+               .pages  = vdso_pagelist,
+       };
+
+       vdso_spec[1] = (struct vm_special_mapping) {
+               .name   = "[vdso]",
+               .pages  = &vdso_pagelist[1],
+       };
 
        return 0;
 }
@@ -138,52 +156,42 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
                                int uses_interp)
 {
        struct mm_struct *mm = current->mm;
-       unsigned long vdso_base, vdso_mapping_len;
-       int ret;
+       unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
+       void *ret;
 
+       vdso_text_len = vdso_pages << PAGE_SHIFT;
        /* Be sure to map the data page */
-       vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT;
+       vdso_mapping_len = vdso_text_len + PAGE_SIZE;
 
        down_write(&mm->mmap_sem);
        vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
        if (IS_ERR_VALUE(vdso_base)) {
-               ret = vdso_base;
+               ret = ERR_PTR(vdso_base);
                goto up_fail;
        }
-       mm->context.vdso = (void *)vdso_base;
-
-       ret = install_special_mapping(mm, vdso_base, vdso_mapping_len,
-                                     VM_READ|VM_EXEC|
-                                     VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
-                                     vdso_pagelist);
-       if (ret) {
-               mm->context.vdso = NULL;
+       ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
+                                      VM_READ|VM_MAYREAD,
+                                      &vdso_spec[0]);
+       if (IS_ERR(ret))
                goto up_fail;
-       }
 
-up_fail:
-       up_write(&mm->mmap_sem);
+       vdso_base += PAGE_SIZE;
+       mm->context.vdso = (void *)vdso_base;
+       ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
+                                      VM_READ|VM_EXEC|
+                                      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+                                      &vdso_spec[1]);
+       if (IS_ERR(ret))
+               goto up_fail;
 
-       return ret;
-}
 
-const char *arch_vma_name(struct vm_area_struct *vma)
-{
-       /*
-        * We can re-use the vdso pointer in mm_context_t for identifying
-        * the vectors page for compat applications. The vDSO will always
-        * sit above TASK_UNMAPPED_BASE and so we don't need to worry about
-        * it conflicting with the vectors base.
-        */
-       if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) {
-#ifdef CONFIG_COMPAT
-               if (vma->vm_start == AARCH32_VECTORS_BASE)
-                       return "[vectors]";
-#endif
-               return "[vdso]";
-       }
+       up_write(&mm->mmap_sem);
+       return 0;
 
-       return NULL;
+up_fail:
+       mm->context.vdso = NULL;
+       up_write(&mm->mmap_sem);
+       return PTR_ERR(ret);
 }
 
 /*
index 6d20b7d..ff3bdde 100644 (file)
@@ -43,13 +43,13 @@ $(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
        $(call if_changed,vdsosym)
 
 # Assembly rules for the .S files
-$(obj-vdso): %.o: %.S
+$(obj-vdso): %.o: %.S FORCE
        $(call if_changed_dep,vdsoas)
 
 # Actual build commands
-quiet_cmd_vdsold = VDSOL $@
+quiet_cmd_vdsold = VDSOL   $@
       cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
-quiet_cmd_vdsoas = VDSOA $@
+quiet_cmd_vdsoas = VDSOA   $@
       cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
 
 # Install commands for the unstripped file
index 8154b8d..beca249 100644 (file)
@@ -28,6 +28,7 @@ OUTPUT_ARCH(aarch64)
 
 SECTIONS
 {
+       PROVIDE(_vdso_data = . - PAGE_SIZE);
        . = VDSO_LBASE + SIZEOF_HEADERS;
 
        .hash           : { *(.hash) }                  :text
@@ -57,9 +58,6 @@ SECTIONS
        _end = .;
        PROVIDE(end = .);
 
-       . = ALIGN(PAGE_SIZE);
-       PROVIDE(_vdso_data = .);
-
        /DISCARD/       : {
                *(.note.GNU-stack)
                *(.data .data.* .gnu.linkonce.d.* .sdata*)
index f1e6d5c..97f0c04 100644 (file)
@@ -9,6 +9,8 @@
 #include <asm/memory.h>
 #include <asm/page.h>
 
+#include "image.h"
+
 #define ARM_EXIT_KEEP(x)
 #define ARM_EXIT_DISCARD(x)    x
 
@@ -104,9 +106,18 @@ SECTIONS
        _edata = .;
 
        BSS_SECTION(0, 0, 0)
+
+       . = ALIGN(PAGE_SIZE);
+       idmap_pg_dir = .;
+       . += IDMAP_DIR_SIZE;
+       swapper_pg_dir = .;
+       . += SWAPPER_DIR_SIZE;
+
        _end = .;
 
        STABS_DEBUG
+
+       HEAD_SYMBOLS
 }
 
 /*
@@ -114,3 +125,8 @@ SECTIONS
  */
 ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end),
        "HYP init code too big")
+
+/*
+ * If padding is applied before .head.text, virt<->phys conversions will fail.
+ */
+ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned")
index 9aecbac..13bbc3b 100644 (file)
@@ -27,8 +27,10 @@ void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
        copy_page(kto, kfrom);
        __flush_dcache_area(kto, PAGE_SIZE);
 }
+EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
 
 void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
 {
        clear_page(kaddr);
 }
+EXPORT_SYMBOL_GPL(__cpu_clear_user_page);
index bcc965e..41cb6d3 100644 (file)
@@ -62,6 +62,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
                        break;
 
                pud = pud_offset(pgd, addr);
+               printk(", *pud=%016llx", pud_val(*pud));
                if (pud_none(*pud) || pud_bad(*pud))
                        break;
 
index e4193e3..0d64089 100644 (file)
@@ -79,7 +79,8 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
                return;
 
        if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
-               __flush_dcache_area(page_address(page), PAGE_SIZE);
+               __flush_dcache_area(page_address(page),
+                               PAGE_SIZE << compound_order(page));
                __flush_icache_all();
        } else if (icache_is_aivivt()) {
                __flush_icache_all();
index 091d428..5b4526e 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/dma-contiguous.h>
 
+#include <asm/fixmap.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
 #include <asm/sizes.h>
@@ -60,6 +61,17 @@ static int __init early_initrd(char *p)
 early_param("initrd", early_initrd);
 #endif
 
+/*
+ * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
+ * currently assumes that for memory starting above 4G, 32-bit devices will
+ * use a DMA offset.
+ */
+static phys_addr_t max_zone_dma_phys(void)
+{
+       phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
+       return min(offset + (1ULL << 32), memblock_end_of_DRAM());
+}
+
 static void __init zone_sizes_init(unsigned long min, unsigned long max)
 {
        struct memblock_region *reg;
@@ -70,9 +82,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA)) {
-               unsigned long max_dma_phys =
-                       (unsigned long)dma_to_phys(NULL, DMA_BIT_MASK(32) + 1);
-               max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
+               max_dma = PFN_DOWN(max_zone_dma_phys());
                zone_size[ZONE_DMA] = max_dma - min;
        }
        zone_size[ZONE_NORMAL] = max - max_dma;
@@ -126,22 +136,24 @@ static void arm64_memory_present(void)
 
 void __init arm64_memblock_init(void)
 {
-       /* Register the kernel text, kernel data and initrd with memblock */
+       phys_addr_t dma_phys_limit = 0;
+
+       /*
+        * Register the kernel text, kernel data, initrd, and initial
+        * pagetables with memblock.
+        */
        memblock_reserve(__pa(_text), _end - _text);
 #ifdef CONFIG_BLK_DEV_INITRD
        if (initrd_start)
                memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start);
 #endif
 
-       /*
-        * Reserve the page tables.  These are already in use,
-        * and can only be in node 0.
-        */
-       memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE);
-       memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
-
        early_init_fdt_scan_reserved_mem();
-       dma_contiguous_reserve(0);
+
+       /* 4GB maximum for 32-bit only capable devices */
+       if (IS_ENABLED(CONFIG_ZONE_DMA))
+               dma_phys_limit = max_zone_dma_phys();
+       dma_contiguous_reserve(dma_phys_limit);
 
        memblock_allow_resize();
        memblock_dump_all();
@@ -254,26 +266,33 @@ void __init mem_init(void)
 
 #define MLK(b, t) b, t, ((t) - (b)) >> 10
 #define MLM(b, t) b, t, ((t) - (b)) >> 20
+#define MLG(b, t) b, t, ((t) - (b)) >> 30
 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
 
        pr_notice("Virtual kernel memory layout:\n"
-                 "    vmalloc : 0x%16lx - 0x%16lx   (%6ld MB)\n"
+                 "    vmalloc : 0x%16lx - 0x%16lx   (%6ld GB)\n"
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
-                 "    vmemmap : 0x%16lx - 0x%16lx   (%6ld MB)\n"
+                 "    vmemmap : 0x%16lx - 0x%16lx   (%6ld GB maximum)\n"
+                 "              0x%16lx - 0x%16lx   (%6ld MB actual)\n"
 #endif
+                 "    PCI I/O : 0x%16lx - 0x%16lx   (%6ld MB)\n"
+                 "    fixed   : 0x%16lx - 0x%16lx   (%6ld KB)\n"
                  "    modules : 0x%16lx - 0x%16lx   (%6ld MB)\n"
                  "    memory  : 0x%16lx - 0x%16lx   (%6ld MB)\n"
-                 "      .init : 0x%p" " - 0x%p" "   (%6ld kB)\n"
-                 "      .text : 0x%p" " - 0x%p" "   (%6ld kB)\n"
-                 "      .data : 0x%p" " - 0x%p" "   (%6ld kB)\n",
-                 MLM(VMALLOC_START, VMALLOC_END),
+                 "      .init : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+                 "      .text : 0x%p" " - 0x%p" "   (%6ld KB)\n"
+                 "      .data : 0x%p" " - 0x%p" "   (%6ld KB)\n",
+                 MLG(VMALLOC_START, VMALLOC_END),
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
+                 MLG((unsigned long)vmemmap,
+                     (unsigned long)vmemmap + VMEMMAP_SIZE),
                  MLM((unsigned long)virt_to_page(PAGE_OFFSET),
                      (unsigned long)virt_to_page(high_memory)),
 #endif
+                 MLM((unsigned long)PCI_IOBASE, (unsigned long)PCI_IOBASE + SZ_16M),
+                 MLK(FIXADDR_START, FIXADDR_TOP),
                  MLM(MODULES_VADDR, MODULES_END),
                  MLM(PAGE_OFFSET, (unsigned long)high_memory),
-
                  MLK_ROUNDUP(__init_begin, __init_end),
                  MLK_ROUNDUP(_text, _etext),
                  MLK_ROUNDUP(_sdata, _edata));
index 7ec3283..fa324bd 100644 (file)
@@ -103,19 +103,28 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
 }
 EXPORT_SYMBOL(ioremap_cache);
 
-#ifndef CONFIG_ARM64_64K_PAGES
 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+#if CONFIG_ARM64_PGTABLE_LEVELS > 2
+static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
+#endif
+#if CONFIG_ARM64_PGTABLE_LEVELS > 3
+static pte_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
 #endif
 
-static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+static inline pud_t * __init early_ioremap_pud(unsigned long addr)
 {
        pgd_t *pgd;
-       pud_t *pud;
 
        pgd = pgd_offset_k(addr);
        BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
 
-       pud = pud_offset(pgd, addr);
+       return pud_offset(pgd, addr);
+}
+
+static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+{
+       pud_t *pud = early_ioremap_pud(addr);
+
        BUG_ON(pud_none(*pud) || pud_bad(*pud));
 
        return pmd_offset(pud, addr);
@@ -132,13 +141,18 @@ static inline pte_t * __init early_ioremap_pte(unsigned long addr)
 
 void __init early_ioremap_init(void)
 {
+       pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
+       unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN);
 
-       pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
-#ifndef CONFIG_ARM64_64K_PAGES
-       /* need to populate pmd for 4k pagesize only */
+       pgd = pgd_offset_k(addr);
+       pgd_populate(&init_mm, pgd, bm_pud);
+       pud = pud_offset(pgd, addr);
+       pud_populate(&init_mm, pud, bm_pmd);
+       pmd = pmd_offset(pud, addr);
        pmd_populate_kernel(&init_mm, pmd, bm_pte);
-#endif
+
        /*
         * The boot-ioremap range spans multiple pmds, for which
         * we are not prepared:
index c43f1dd..c555672 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/setup.h>
 #include <asm/sizes.h>
 #include <asm/tlb.h>
+#include <asm/memblock.h>
 #include <asm/mmu_context.h>
 
 #include "mm.h"
@@ -204,9 +205,16 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
                                  unsigned long end, unsigned long phys,
                                  int map_io)
 {
-       pud_t *pud = pud_offset(pgd, addr);
+       pud_t *pud;
        unsigned long next;
 
+       if (pgd_none(*pgd)) {
+               pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t));
+               pgd_populate(&init_mm, pgd, pud);
+       }
+       BUG_ON(pgd_bad(*pgd));
+
+       pud = pud_offset(pgd, addr);
        do {
                next = pud_addr_end(addr, end);
 
@@ -290,10 +298,10 @@ static void __init map_mem(void)
         * memory addressable from the initial direct kernel mapping.
         *
         * The initial direct kernel mapping, located at swapper_pg_dir,
-        * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be
+        * gives us PUD_SIZE memory starting from PHYS_OFFSET (which must be
         * aligned to 2MB as per Documentation/arm64/booting.txt).
         */
-       limit = PHYS_OFFSET + PGDIR_SIZE;
+       limit = PHYS_OFFSET + PUD_SIZE;
        memblock_set_current_limit(limit);
 
        /* map all the memory banks */
index 972adcc..941593c 100644 (file)
@@ -92,6 +92,7 @@ extern struct avr32_cpuinfo boot_cpu_data;
 #define TASK_UNMAPPED_BASE     (PAGE_ALIGN(TASK_SIZE / 3))
 
 #define cpu_relax()            barrier()
+#define cpu_relax_lowlatency()        cpu_relax()
 #define cpu_sync_pipeline()    asm volatile("sub pc, -2" : : : "memory")
 
 struct cpu_context {
index f81e7b9..ed30699 100644 (file)
@@ -18,7 +18,6 @@ config BLACKFIN
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_TRACER
-       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_IDE
        select HAVE_KERNEL_GZIP if RAMKERNEL
        select HAVE_KERNEL_BZIP2 if RAMKERNEL
index a7e9bfd..fcec5ce 100644 (file)
@@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN_V3=y
+CONFIG_SPI_ADI_V3=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index d0e72e9..7acd466 100644 (file)
@@ -99,7 +99,7 @@ unsigned long get_wchan(struct task_struct *p);
 #define        KSTK_ESP(tsk)   ((tsk) == current ? rdusp() : (tsk)->thread.usp)
 
 #define cpu_relax()            smp_mb()
-
+#define cpu_relax_lowlatency() cpu_relax()
 
 /* Get the Silicon Revision of the chip */
 static inline uint32_t __pure bfin_revid(void)
index 7eed00b..28d0595 100644 (file)
@@ -33,15 +33,6 @@ ENDPROC(__mcount)
  * function will be waiting there.  mmmm pie.
  */
 ENTRY(_ftrace_caller)
-# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
-       /* optional micro optimization: return if stopped */
-       p1.l = _function_trace_stop;
-       p1.h = _function_trace_stop;
-       r3 = [p1];
-       cc = r3 == 0;
-       if ! cc jump _ftrace_stub (bp);
-# endif
-
        /* save first/second/third function arg and the return register */
        [--sp] = r2;
        [--sp] = r0;
@@ -83,15 +74,6 @@ ENDPROC(_ftrace_caller)
 
 /* See documentation for _ftrace_caller */
 ENTRY(__mcount)
-# ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
-       /* optional micro optimization: return if stopped */
-       p1.l = _function_trace_stop;
-       p1.h = _function_trace_stop;
-       r3 = [p1];
-       cc = r3 == 0;
-       if ! cc jump _ftrace_stub (bp);
-# endif
-
        /* save third function arg early so we can do testing below */
        [--sp] = r2;
 
index 974e554..ea20320 100644 (file)
@@ -389,14 +389,6 @@ static int bfin_pmu_event_init(struct perf_event *event)
        if (attr->exclude_hv || attr->exclude_idle)
                return -EPERM;
 
-       /*
-        * All of the on-chip counters are "limited", in that they have
-        * no interrupts, and are therefore unable to do sampling without
-        * further work and timer assistance.
-        */
-       if (hwc->sample_period)
-               return -EINVAL;
-
        ret = 0;
        switch (attr->type) {
        case PERF_TYPE_RAW:
@@ -490,6 +482,13 @@ static int __init bfin_pmu_init(void)
 {
        int ret;
 
+       /*
+        * All of the on-chip counters are "limited", in that they have
+        * no interrupts, and are therefore unable to do sampling without
+        * further work and timer assistance.
+        */
+       pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
        ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
        if (!ret)
                perf_cpu_notifier(bfin_pmu_notifier);
index ba35864..c9eec84 100644 (file)
@@ -145,7 +145,7 @@ SECTIONS
 
        .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
 #else
-       .init.data : AT(__data_lma + __data_len)
+       .init.data : AT(__data_lma + __data_len + 32)
        {
                __sinitdata = .;
                INIT_DATA
index 63b0e4f..0ccf0cf 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/flash.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
index c65c6db..1e7290e 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index af58454..c7495dc 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index a021122..6b988ad 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index 90138e6..1fe7ff2 100644 (file)
@@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary",  "pinctrl-adi2.0", NULL, "rotary"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0",  "pinctrl-adi2.0", NULL, "can0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1",  "pinctrl-adi2.0", NULL, "can1"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043",  "pinctrl-adi2.0", NULL, "ppi0_24b"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043",  "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0",  "pinctrl-adi2.0", NULL, "sport0"),
@@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x",  "pinctrl-adi2.0", NULL, "atapi_alter"),
 #endif
        PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0",  "pinctrl-adi2.0", NULL, "nfc0"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys",  "pinctrl-adi2.0", NULL, "keys_4x4"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys",  "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+       PIN_MAP_MUX_GROUP("bf54x-keys", "4bit",  "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+       PIN_MAP_MUX_GROUP("bf54x-keys", "8bit",  "pinctrl-adi2.0", "keys_8x8grp", "keys"),
 };
 
 static int __init ezkit_init(void)
index 430b16d..6ab9515 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/spi/flash.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/gpio.h>
 #include <linux/jiffies.h>
 #include <linux/i2c-pca-platform.h>
 #include <linux/delay.h>
index 9f777df..e862f78 100644 (file)
@@ -18,6 +18,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index 88dee43..2de71e8 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/spi/spi.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/gpio.h>
 #include <linux/delay.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
index 1ba4600..e2c0b02 100644 (file)
@@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
 {
 #define CONFIG_SMC_GCTL_VAL     0x00000010
 
-       if (!devm_pinctrl_get_select_default(&pdev->dev))
-               return -EBUSY;
        bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
        bfin_write32(SMC_B0CTL, 0x01002011);
        bfin_write32(SMC_B0TIM, 0x08170977);
@@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
 
 void bf609_nor_flash_exit(struct platform_device *pdev)
 {
-       devm_pinctrl_put(pdev->dev.pins->p);
        bfin_write32(SMC_GCTL, 0);
 }
 
@@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary",  "pinctrl-adi2.0", NULL, "rotary"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0",  "pinctrl-adi2.0", NULL, "can0"),
        PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0",  "pinctrl-adi2.0", NULL, "smc0"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2",  "pinctrl-adi2.0", NULL, "ppi2_16b"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0",  "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#if IS_ENABLED(CONFIG_VIDEO_MT9M114)
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_8b"),
-#elif IS_ENABLED(CONFIG_VIDEO_VS6624)
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#else
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_24b"),
-#endif
+       PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_display.0", "8bit",  "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_display.0", "16bit",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit",  "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit",  "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit",  "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1",  "pinctrl-adi2.0", NULL, "sport1"),
index 3ca0fb9..a1efd93 100644 (file)
@@ -10,6 +10,7 @@
 #define __MACH_BF609_PM_H__
 
 #include <linux/suspend.h>
+#include <linux/platform_device.h>
 
 extern int bfin609_pm_enter(suspend_state_t state);
 extern int bf609_pm_prepare(void);
@@ -19,6 +20,6 @@ void bf609_hibernate(void);
 void bfin_sec_raise_irq(unsigned int sid);
 void coreb_enable(void);
 
-int bf609_nor_flash_init(void);
-void bf609_nor_flash_exit(void);
+int bf609_nor_flash_init(struct platform_device *pdev);
+void bf609_nor_flash_exit(struct platform_device *pdev);
 #endif
index 0cdd695..b1bfcf4 100644 (file)
@@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = {
 #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
 static int smc_pm_syscore_suspend(void)
 {
-       bf609_nor_flash_exit();
+       bf609_nor_flash_exit(NULL);
        return 0;
 }
 
 static void smc_pm_syscore_resume(void)
 {
-       bf609_nor_flash_init();
+       bf609_nor_flash_init(NULL);
 }
 
 static struct syscore_ops smc_pm_syscore_ops = {
index 867b7ce..1f94784 100644 (file)
@@ -1208,8 +1208,6 @@ int __init init_arch_irq(void)
 
        bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
 
-       bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
-
        /* Enable interrupts IVG7-15 */
        bfin_irq_flags |= IMASK_IVG15 |
            IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
index b9eb3da..f2ef31b 100644 (file)
@@ -121,6 +121,7 @@ extern unsigned long get_wchan(struct task_struct *p);
 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
 
 #define cpu_relax()            do { } while (0)
+#define cpu_relax_lowlatency()        cpu_relax()
 
 extern const struct seq_operations cpuinfo_op;
 
index 15b815d..862126b 100644 (file)
@@ -63,6 +63,7 @@ static inline void release_thread(struct task_struct *dead_task)
 #define init_stack      (init_thread_union.stack)
 
 #define cpu_relax()     barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 void default_idle(void);
 
index 45a8254..d850113 100644 (file)
@@ -56,6 +56,7 @@ struct thread_struct {
 }
 
 #define cpu_relax() __vmyield()
+#define cpu_relax_lowlatency() cpu_relax()
 
 /*
  * Decides where the kernel will search for a free chunk of vm space during
index 1a871b7..344387a 100644 (file)
@@ -242,7 +242,7 @@ struct ioc {
        struct pci_dev  *sac_only_dev;
 };
 
-static struct ioc *ioc_list;
+static struct ioc *ioc_list, *ioc_found;
 static int reserve_sba_gart = 1;
 
 static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
@@ -1809,20 +1809,13 @@ static struct ioc_iommu ioc_iommu_info[] __initdata = {
        { SX2000_IOC_ID, "sx2000", NULL },
 };
 
-static struct ioc *
-ioc_init(unsigned long hpa, void *handle)
+static void ioc_init(unsigned long hpa, struct ioc *ioc)
 {
-       struct ioc *ioc;
        struct ioc_iommu *info;
 
-       ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
-       if (!ioc)
-               return NULL;
-
        ioc->next = ioc_list;
        ioc_list = ioc;
 
-       ioc->handle = handle;
        ioc->ioc_hpa = ioremap(hpa, 0x1000);
 
        ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
@@ -1863,8 +1856,6 @@ ioc_init(unsigned long hpa, void *handle)
                "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
                ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
                hpa, ioc->iov_size >> 20, ioc->ibase);
-
-       return ioc;
 }
 
 
@@ -2031,22 +2022,21 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
 #endif
 }
 
-static int
-acpi_sba_ioc_add(struct acpi_device *device,
-                const struct acpi_device_id *not_used)
+static void acpi_sba_ioc_add(struct ioc *ioc)
 {
-       struct ioc *ioc;
+       acpi_handle handle = ioc->handle;
        acpi_status status;
        u64 hpa, length;
        struct acpi_device_info *adi;
 
-       status = hp_acpi_csr_space(device->handle, &hpa, &length);
+       ioc_found = ioc->next;
+       status = hp_acpi_csr_space(handle, &hpa, &length);
        if (ACPI_FAILURE(status))
-               return 1;
+               goto err;
 
-       status = acpi_get_object_info(device->handle, &adi);
+       status = acpi_get_object_info(handle, &adi);
        if (ACPI_FAILURE(status))
-               return 1;
+               goto err;
 
        /*
         * For HWP0001, only SBA appears in ACPI namespace.  It encloses the PCI
@@ -2067,13 +2057,13 @@ acpi_sba_ioc_add(struct acpi_device *device,
        if (!iovp_shift)
                iovp_shift = 12;
 
-       ioc = ioc_init(hpa, device->handle);
-       if (!ioc)
-               return 1;
-
+       ioc_init(hpa, ioc);
        /* setup NUMA node association */
-       sba_map_ioc_to_node(ioc, device->handle);
-       return 0;
+       sba_map_ioc_to_node(ioc, handle);
+       return;
+
+ err:
+       kfree(ioc);
 }
 
 static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
@@ -2081,9 +2071,26 @@ static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
        {"HWP0004", 0},
        {"", 0},
 };
+
+static int acpi_sba_ioc_attach(struct acpi_device *device,
+                              const struct acpi_device_id *not_used)
+{
+       struct ioc *ioc;
+
+       ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
+       if (!ioc)
+               return -ENOMEM;
+
+       ioc->next = ioc_found;
+       ioc_found = ioc;
+       ioc->handle = device->handle;
+       return 1;
+}
+
+
 static struct acpi_scan_handler acpi_sba_ioc_handler = {
        .ids    = hp_ioc_iommu_device_ids,
-       .attach = acpi_sba_ioc_add,
+       .attach = acpi_sba_ioc_attach,
 };
 
 static int __init acpi_sba_ioc_init_acpi(void)
@@ -2118,9 +2125,12 @@ sba_init(void)
 #endif
 
        /*
-        * ioc_list should be populated by the acpi_sba_ioc_handler's .attach()
+        * ioc_found should be populated by the acpi_sba_ioc_handler's .attach()
         * routine, but that only happens if acpi_scan_init() has already run.
         */
+       while (ioc_found)
+               acpi_sba_ioc_add(ioc_found);
+
        if (!ioc_list) {
 #ifdef CONFIG_IA64_GENERIC
                /*
index efd1b92..c736713 100644 (file)
@@ -548,6 +548,7 @@ ia64_eoi (void)
 }
 
 #define cpu_relax()    ia64_hint(ia64_hint_pause)
+#define cpu_relax_lowlatency() cpu_relax()
 
 static inline int
 ia64_get_irr(unsigned int vector)
index 1dd275d..7b48587 100644 (file)
@@ -8,6 +8,7 @@
 #define force_o_largefile()    \
                (personality(current->personality) != PER_LINUX32)
 
+#include <linux/personality.h>
 #include <asm-generic/fcntl.h>
 
 #endif /* _ASM_IA64_FCNTL_H */
index 1fe9aa5..ec73b2c 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/pci.h>
 #include <linux/init.h>
 #include <linux/vgaarb.h>
+#include <linux/screen_info.h>
 
 #include <asm/machvec.h>
 
@@ -37,6 +38,27 @@ static void pci_fixup_video(struct pci_dev *pdev)
                return;
        /* Maybe, this machine supports legacy memory map. */
 
+       if (!vga_default_device()) {
+               resource_size_t start, end;
+               int i;
+
+               /* Does firmware framebuffer belong to us? */
+               for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+                       if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+                               continue;
+
+                       start = pci_resource_start(pdev, i);
+                       end  = pci_resource_end(pdev, i);
+
+                       if (!start || !end)
+                               continue;
+
+                       if (screen_info.lfb_base >= start &&
+                           (screen_info.lfb_base + screen_info.lfb_size) < end)
+                               vga_set_default_device(pdev);
+               }
+       }
+
        /* Is VGA routed to us? */
        bus = pdev->bus;
        while (bus) {
index cad775a..b2eb484 100644 (file)
@@ -114,7 +114,7 @@ bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
                if (mode & BTE_USE_ANY) {
                        nasid_to_try[1] = my_nasid;
                } else {
-                       nasid_to_try[1] = (int)NULL;
+                       nasid_to_try[1] = 0;
                }
        } else {
                /* try local then remote */
@@ -122,7 +122,7 @@ bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification)
                if (mode & BTE_USE_ANY) {
                        nasid_to_try[1] = NASID_GET(dest);
                } else {
-                       nasid_to_try[1] = (int)NULL;
+                       nasid_to_try[1] = 0;
                }
        }
 
index 53b01b8..36182c8 100644 (file)
@@ -579,7 +579,7 @@ void sn_cpu_init(void)
                       (sn_prom_type == 1) ? "real" : "fake");
        }
 
-       memset(pda, 0, sizeof(pda));
+       memset(pda, 0, sizeof(*pda));
        if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2,
                                &sn_hub_info->nasid_bitmask,
                                &sn_hub_info->nasid_shift,
index 5767367..9f8fd9b 100644 (file)
@@ -133,5 +133,6 @@ unsigned long get_wchan(struct task_struct *p);
 #define KSTK_ESP(tsk)  ((tsk)->thread.sp)
 
 #define cpu_relax()    barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 #endif /* _ASM_M32R_PROCESSOR_H */
index b0768a6..20dda1d 100644 (file)
@@ -176,5 +176,6 @@ unsigned long get_wchan(struct task_struct *p);
 #define task_pt_regs(tsk)      ((struct pt_regs *) ((tsk)->thread.esp0))
 
 #define cpu_relax()    barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 #endif
index f868506..0931388 100644 (file)
 
 #include <asm/tlb.h>
 
-/* FIXME - when we get this compiling */
-/* erm, now that it's compiling, what do we do with it? */
-#define _KERNPG_TABLE 0
-
 extern const char bad_pmd_string[];
 
 #define pmd_alloc_one(mm,address)       ({ BUG(); ((pmd_t *)2); })
index dbb118e..a547884 100644 (file)
@@ -921,7 +921,8 @@ L(nocon):
        jls     1f
        lsrl    #1,%d1
 1:
-       movel   %d1,m68k_init_mapped_size
+       lea     %pc@(m68k_init_mapped_size),%a0
+       movel   %d1,%a0@
        mmu_map #PAGE_OFFSET,%pc@(L(phys_kernel_start)),%d1,\
                %pc@(m68k_supervisor_cachemode)
 
index 958f1ad..3857737 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/export.h>
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
@@ -30,6 +31,7 @@
 
 
 unsigned long (*mach_random_get_entropy)(void);
+EXPORT_SYMBOL_GPL(mach_random_get_entropy);
 
 
 /*
index 499b761..0b389a8 100644 (file)
@@ -13,7 +13,6 @@ config METAG
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_TRACER
-       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_KERNEL_BZIP2
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZO
index a8a3747..881071c 100644 (file)
@@ -155,6 +155,7 @@ unsigned long get_wchan(struct task_struct *p);
 #define user_stack_pointer(regs)        ((regs)->ctx.AX[0].U0)
 
 #define cpu_relax()     barrier()
+#define cpu_relax_lowlatency()  cpu_relax()
 
 extern void setup_priv(void);
 
index e70bff7..3acc288 100644 (file)
@@ -16,13 +16,6 @@ _mcount_wrapper:
        .global _ftrace_caller
        .type   _ftrace_caller,function
 _ftrace_caller:
-       MOVT    D0Re0,#HI(_function_trace_stop)
-       ADD     D0Re0,D0Re0,#LO(_function_trace_stop)
-       GETD    D0Re0,[D0Re0]
-       CMP     D0Re0,#0
-       BEQ     $Lcall_stub
-       MOV     PC,D0.4
-$Lcall_stub:
        MSETL   [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4
        MOV     D1Ar1, D0.4
        MOV     D0Ar2, D1RtP
@@ -42,13 +35,6 @@ _ftrace_call:
        .global _mcount_wrapper
        .type   _mcount_wrapper,function
 _mcount_wrapper:
-       MOVT    D0Re0,#HI(_function_trace_stop)
-       ADD     D0Re0,D0Re0,#LO(_function_trace_stop)
-       GETD    D0Re0,[D0Re0]
-       CMP     D0Re0,#0
-       BEQ     $Lcall_mcount
-       MOV     PC,D0.4
-$Lcall_mcount:
        MSETL   [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4
        MOV     D1Ar1, D0.4
        MOV     D0Ar2, D1RtP
index 5cc4d4d..02c0873 100644 (file)
@@ -567,16 +567,6 @@ static int _hw_perf_event_init(struct perf_event *event)
        if (mapping == -1)
                return -EINVAL;
 
-       /*
-        * Early cores have "limited" counters - they have no overflow
-        * interrupts - and so are unable to do sampling without extra work
-        * and timer assistance.
-        */
-       if (metag_pmu->max_period == 0) {
-               if (hwc->sample_period)
-                       return -EINVAL;
-       }
-
        /*
         * Don't assign an index until the event is placed into the hardware.
         * -1 signifies that we're still deciding where to put it. On SMP
@@ -866,6 +856,15 @@ static int __init init_hw_perf_events(void)
        pr_info("enabled with %s PMU driver, %d counters available\n",
                        metag_pmu->name, metag_pmu->max_events);
 
+       /*
+        * Early cores have "limited" counters - they have no overflow
+        * interrupts - and so are unable to do sampling without extra work
+        * and timer assistance.
+        */
+       if (metag_pmu->max_period == 0) {
+               metag_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+       }
+
        /* Initialise the active events and reservation mutex */
        atomic_set(&metag_pmu->active_events, 0);
        mutex_init(&metag_pmu->reserve_mutex);
index 9ae0854..40e1c1d 100644 (file)
@@ -22,7 +22,6 @@ config MICROBLAZE
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_GRAPH_TRACER
-       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_FUNCTION_TRACER
        select HAVE_MEMBLOCK
        select HAVE_MEMBLOCK_NODE_MAP
index 9d31b05..497a988 100644 (file)
@@ -22,6 +22,7 @@
 extern const struct seq_operations cpuinfo_op;
 
 # define cpu_relax()           barrier()
+# define cpu_relax_lowlatency()        cpu_relax()
 
 #define task_pt_regs(tsk) \
                (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1)
index bbcd253..fc7b48a 100644 (file)
@@ -27,6 +27,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
        unsigned long return_hooker = (unsigned long)
                                &return_to_handler;
 
+       if (unlikely(ftrace_graph_is_dead()))
+               return;
+
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return;
 
index fc1e132..fed9da5 100644 (file)
@@ -91,11 +91,6 @@ ENTRY(ftrace_caller)
 #endif /* CONFIG_DYNAMIC_FTRACE */
        SAVE_REGS
        swi     r15, r1, 0;
-       /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST begin of checking */
-       lwi     r5, r0, function_trace_stop;
-       bneid   r5, end;
-       nop;
-       /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST end of checking */
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 #ifndef CONFIG_DYNAMIC_FTRACE
        lwi     r5, r0, ftrace_graph_return;
index 7a469ac..10f270b 100644 (file)
@@ -15,7 +15,6 @@ config MIPS
        select HAVE_BPF_JIT if !CPU_MICROMIPS
        select ARCH_HAVE_CUSTOM_GPIO_H
        select HAVE_FUNCTION_TRACER
-       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_C_RECORDMCOUNT
@@ -269,6 +268,7 @@ config LANTIQ
 config LASAT
        bool "LASAT Networks platforms"
        select CEVT_R4K
+       select CRC32
        select CSRC_R4K
        select DMA_NONCOHERENT
        select SYS_HAS_EARLY_PRINTK
index b0aa955..7a3fc67 100644 (file)
@@ -359,13 +359,17 @@ enum emulation_result {
 #define MIPS3_PG_FRAME         0x3fffffc0
 
 #define VPN2_MASK              0xffffe000
-#define TLB_IS_GLOBAL(x)       (((x).tlb_lo0 & MIPS3_PG_G) &&  \
+#define TLB_IS_GLOBAL(x)       (((x).tlb_lo0 & MIPS3_PG_G) &&          \
                                 ((x).tlb_lo1 & MIPS3_PG_G))
 #define TLB_VPN2(x)            ((x).tlb_hi & VPN2_MASK)
 #define TLB_ASID(x)            ((x).tlb_hi & ASID_MASK)
-#define TLB_IS_VALID(x, va)    (((va) & (1 << PAGE_SHIFT))     \
-                                ? ((x).tlb_lo1 & MIPS3_PG_V)   \
+#define TLB_IS_VALID(x, va)    (((va) & (1 << PAGE_SHIFT))             \
+                                ? ((x).tlb_lo1 & MIPS3_PG_V)           \
                                 : ((x).tlb_lo0 & MIPS3_PG_V))
+#define TLB_HI_VPN2_HIT(x, y)  ((TLB_VPN2(x) & ~(x).tlb_mask) ==       \
+                                ((y) & VPN2_MASK & ~(x).tlb_mask))
+#define TLB_HI_ASID_HIT(x, y)  (TLB_IS_GLOBAL(x) ||                    \
+                                TLB_ASID(x) == ((y) & ASID_MASK))
 
 struct kvm_mips_tlb {
        long tlb_mask;
@@ -760,7 +764,7 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
                               struct kvm_vcpu *vcpu);
 
 /* Misc */
-extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
+extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
 extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
 
 
index ad70cba..d5098bc 100644 (file)
@@ -367,6 +367,7 @@ unsigned long get_wchan(struct task_struct *p);
 #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
 
 #define cpu_relax()    barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 /*
  * Return_address is a replacement for __builtin_return_address(count)
index 0b8bd28..4520adc 100644 (file)
@@ -19,6 +19,9 @@
 #include <asm/mipsmtregs.h>
 #include <asm/uaccess.h> /* for segment_eq() */
 
+extern void (*r4k_blast_dcache)(void);
+extern void (*r4k_blast_icache)(void);
+
 /*
  * This macro return a properly sign-extended address suitable as base address
  * for indexed cache operations.  Two issues here:
index f54bdbe..eeeb0f4 100644 (file)
@@ -32,8 +32,6 @@ struct sigcontext32 {
        __u32           sc_lo2;
        __u32           sc_hi3;
        __u32           sc_lo3;
-       __u64           sc_msaregs[32]; /* Most significant 64 bits */
-       __u32           sc_msa_csr;
 };
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
 #endif /* _ASM_SIGCONTEXT_H */
index f8d63b3..708c5d4 100644 (file)
@@ -67,6 +67,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
 #define Ip_u2s3u1(op)                                                  \
 void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
 
+#define Ip_s3s1s2(op)                                                  \
+void ISAOPC(op)(u32 **buf, int a, int b, int c)
+
 #define Ip_u2u1s3(op)                                                  \
 void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
 
@@ -147,6 +150,7 @@ Ip_u2s3u1(_scd);
 Ip_u2s3u1(_sd);
 Ip_u2u1u3(_sll);
 Ip_u3u2u1(_sllv);
+Ip_s3s1s2(_slt);
 Ip_u2u1s3(_sltiu);
 Ip_u3u1u2(_sltu);
 Ip_u2u1u3(_sra);
index 4b71602..4bfdb9d 100644 (file)
@@ -273,6 +273,7 @@ enum mm_32a_minor_op {
        mm_and_op = 0x250,
        mm_or32_op = 0x290,
        mm_xor32_op = 0x310,
+       mm_slt_op = 0x350,
        mm_sltu_op = 0x390,
 };
 
index 681c176..6c9906f 100644 (file)
 #include <linux/types.h>
 #include <asm/sgidefs.h>
 
-/* Bits which may be set in sc_used_math */
-#define USEDMATH_FP    (1 << 0)
-#define USEDMATH_MSA   (1 << 1)
-
 #if _MIPS_SIM == _MIPS_SIM_ABI32
 
 /*
@@ -41,8 +37,6 @@ struct sigcontext {
        unsigned long           sc_lo2;
        unsigned long           sc_hi3;
        unsigned long           sc_lo3;
-       unsigned long long      sc_msaregs[32]; /* Most significant 64 bits */
-       unsigned long           sc_msa_csr;
 };
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
@@ -76,8 +70,6 @@ struct sigcontext {
        __u32   sc_used_math;
        __u32   sc_dsp;
        __u32   sc_reserved;
-       __u64   sc_msaregs[32];
-       __u32   sc_msa_csr;
 };
 
 
index 02f075d..4bb5107 100644 (file)
@@ -293,7 +293,6 @@ void output_sc_defines(void)
        OFFSET(SC_LO2, sigcontext, sc_lo2);
        OFFSET(SC_HI3, sigcontext, sc_hi3);
        OFFSET(SC_LO3, sigcontext, sc_lo3);
-       OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
        BLANK();
 }
 #endif
@@ -308,7 +307,6 @@ void output_sc_defines(void)
        OFFSET(SC_MDLO, sigcontext, sc_mdlo);
        OFFSET(SC_PC, sigcontext, sc_pc);
        OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
-       OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
        BLANK();
 }
 #endif
@@ -320,7 +318,6 @@ void output_sc32_defines(void)
        OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
        OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
        OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
-       OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs);
        BLANK();
 }
 #endif
index 60e7e5e..8b65387 100644 (file)
@@ -302,6 +302,9 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
            &return_to_handler;
        int faulted, insns;
 
+       if (unlikely(ftrace_graph_is_dead()))
+               return;
+
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return;
 
index 4858642..a734b2c 100644 (file)
@@ -126,7 +126,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
 
        board_bind_eic_interrupt = &msc_bind_eic_interrupt;
 
-       for (; nirq >= 0; nirq--, imp++) {
+       for (; nirq > 0; nirq--, imp++) {
                int n = imp->im_irq;
 
                switch (imp->im_type) {
index 539b629..00940d1 100644 (file)
@@ -74,10 +74,6 @@ _mcount:
 #endif
 
        /* When tracing is activated, it calls ftrace_caller+8 (aka here) */
-       lw      t1, function_trace_stop
-       bnez    t1, ftrace_stub
-        nop
-
        MCOUNT_SAVE_REGS
 #ifdef KBUILD_MCOUNT_RA_ADDRESS
        PTR_S   MCOUNT_RA_ADDRESS_REG, PT_R12(sp)
@@ -105,9 +101,6 @@ ftrace_stub:
 #else  /* ! CONFIG_DYNAMIC_FTRACE */
 
 NESTED(_mcount, PT_SIZE, ra)
-       lw      t1, function_trace_stop
-       bnez    t1, ftrace_stub
-        nop
        PTR_LA  t1, ftrace_stub
        PTR_L   t2, ftrace_trace_function /* Prepare t2 for (1) */
        bne     t1, t2, static_trace
index 5aa4c6f..c4c2069 100644 (file)
@@ -101,7 +101,7 @@ static void coupled_barrier(atomic_t *a, unsigned online)
        if (!coupled_coherence)
                return;
 
-       smp_mb__before_atomic_inc();
+       smp_mb__before_atomic();
        atomic_inc(a);
 
        while (atomic_read(a) < online)
@@ -158,7 +158,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
 
        /* Indicate that this CPU might not be coherent */
        cpumask_clear_cpu(cpu, &cpu_coherent_mask);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        /* Create a non-coherent mapping of the core ready_count */
        core_ready_count = per_cpu(ready_count, core);
index 7181427..8352523 100644 (file)
@@ -13,7 +13,6 @@
  * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
  */
 #include <asm/asm.h>
-#include <asm/asmmacro.h>
 #include <asm/errno.h>
 #include <asm/fpregdef.h>
 #include <asm/mipsregs.h>
@@ -246,218 +245,6 @@ LEAF(_restore_fp_context32)
        END(_restore_fp_context32)
 #endif
 
-#ifdef CONFIG_CPU_HAS_MSA
-
-       .macro  save_sc_msareg  wr, off, sc, tmp
-#ifdef CONFIG_64BIT
-       copy_u_d \tmp, \wr, 1
-       EX sd   \tmp, (\off+(\wr*8))(\sc)
-#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
-       copy_u_w \tmp, \wr, 2
-       EX sw   \tmp, (\off+(\wr*8)+0)(\sc)
-       copy_u_w \tmp, \wr, 3
-       EX sw   \tmp, (\off+(\wr*8)+4)(\sc)
-#else /* CONFIG_CPU_BIG_ENDIAN */
-       copy_u_w \tmp, \wr, 2
-       EX sw   \tmp, (\off+(\wr*8)+4)(\sc)
-       copy_u_w \tmp, \wr, 3
-       EX sw   \tmp, (\off+(\wr*8)+0)(\sc)
-#endif
-       .endm
-
-/*
- * int _save_msa_context(struct sigcontext *sc)
- *
- * Save the upper 64 bits of each vector register along with the MSA_CSR
- * register into sc. Returns zero on success, else non-zero.
- */
-LEAF(_save_msa_context)
-       save_sc_msareg  0, SC_MSAREGS, a0, t0
-       save_sc_msareg  1, SC_MSAREGS, a0, t0
-       save_sc_msareg  2, SC_MSAREGS, a0, t0
-       save_sc_msareg  3, SC_MSAREGS, a0, t0
-       save_sc_msareg  4, SC_MSAREGS, a0, t0
-       save_sc_msareg  5, SC_MSAREGS, a0, t0
-       save_sc_msareg  6, SC_MSAREGS, a0, t0
-       save_sc_msareg  7, SC_MSAREGS, a0, t0
-       save_sc_msareg  8, SC_MSAREGS, a0, t0
-       save_sc_msareg  9, SC_MSAREGS, a0, t0
-       save_sc_msareg  10, SC_MSAREGS, a0, t0
-       save_sc_msareg  11, SC_MSAREGS, a0, t0
-       save_sc_msareg  12, SC_MSAREGS, a0, t0
-       save_sc_msareg  13, SC_MSAREGS, a0, t0
-       save_sc_msareg  14, SC_MSAREGS, a0, t0
-       save_sc_msareg  15, SC_MSAREGS, a0, t0
-       save_sc_msareg  16, SC_MSAREGS, a0, t0
-       save_sc_msareg  17, SC_MSAREGS, a0, t0
-       save_sc_msareg  18, SC_MSAREGS, a0, t0
-       save_sc_msareg  19, SC_MSAREGS, a0, t0
-       save_sc_msareg  20, SC_MSAREGS, a0, t0
-       save_sc_msareg  21, SC_MSAREGS, a0, t0
-       save_sc_msareg  22, SC_MSAREGS, a0, t0
-       save_sc_msareg  23, SC_MSAREGS, a0, t0
-       save_sc_msareg  24, SC_MSAREGS, a0, t0
-       save_sc_msareg  25, SC_MSAREGS, a0, t0
-       save_sc_msareg  26, SC_MSAREGS, a0, t0
-       save_sc_msareg  27, SC_MSAREGS, a0, t0
-       save_sc_msareg  28, SC_MSAREGS, a0, t0
-       save_sc_msareg  29, SC_MSAREGS, a0, t0
-       save_sc_msareg  30, SC_MSAREGS, a0, t0
-       save_sc_msareg  31, SC_MSAREGS, a0, t0
-       jr      ra
-        li     v0, 0
-       END(_save_msa_context)
-
-#ifdef CONFIG_MIPS32_COMPAT
-
-/*
- * int _save_msa_context32(struct sigcontext32 *sc)
- *
- * Save the upper 64 bits of each vector register along with the MSA_CSR
- * register into sc. Returns zero on success, else non-zero.
- */
-LEAF(_save_msa_context32)
-       save_sc_msareg  0, SC32_MSAREGS, a0, t0
-       save_sc_msareg  1, SC32_MSAREGS, a0, t0
-       save_sc_msareg  2, SC32_MSAREGS, a0, t0
-       save_sc_msareg  3, SC32_MSAREGS, a0, t0
-       save_sc_msareg  4, SC32_MSAREGS, a0, t0
-       save_sc_msareg  5, SC32_MSAREGS, a0, t0
-       save_sc_msareg  6, SC32_MSAREGS, a0, t0
-       save_sc_msareg  7, SC32_MSAREGS, a0, t0
-       save_sc_msareg  8, SC32_MSAREGS, a0, t0
-       save_sc_msareg  9, SC32_MSAREGS, a0, t0
-       save_sc_msareg  10, SC32_MSAREGS, a0, t0
-       save_sc_msareg  11, SC32_MSAREGS, a0, t0
-       save_sc_msareg  12, SC32_MSAREGS, a0, t0
-       save_sc_msareg  13, SC32_MSAREGS, a0, t0
-       save_sc_msareg  14, SC32_MSAREGS, a0, t0
-       save_sc_msareg  15, SC32_MSAREGS, a0, t0
-       save_sc_msareg  16, SC32_MSAREGS, a0, t0
-       save_sc_msareg  17, SC32_MSAREGS, a0, t0
-       save_sc_msareg  18, SC32_MSAREGS, a0, t0
-       save_sc_msareg  19, SC32_MSAREGS, a0, t0
-       save_sc_msareg  20, SC32_MSAREGS, a0, t0
-       save_sc_msareg  21, SC32_MSAREGS, a0, t0
-       save_sc_msareg  22, SC32_MSAREGS, a0, t0
-       save_sc_msareg  23, SC32_MSAREGS, a0, t0
-       save_sc_msareg  24, SC32_MSAREGS, a0, t0
-       save_sc_msareg  25, SC32_MSAREGS, a0, t0
-       save_sc_msareg  26, SC32_MSAREGS, a0, t0
-       save_sc_msareg  27, SC32_MSAREGS, a0, t0
-       save_sc_msareg  28, SC32_MSAREGS, a0, t0
-       save_sc_msareg  29, SC32_MSAREGS, a0, t0
-       save_sc_msareg  30, SC32_MSAREGS, a0, t0
-       save_sc_msareg  31, SC32_MSAREGS, a0, t0
-       jr      ra
-        li     v0, 0
-       END(_save_msa_context32)
-
-#endif /* CONFIG_MIPS32_COMPAT */
-
-       .macro restore_sc_msareg        wr, off, sc, tmp
-#ifdef CONFIG_64BIT
-       EX ld   \tmp, (\off+(\wr*8))(\sc)
-       insert_d \wr, 1, \tmp
-#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
-       EX lw   \tmp, (\off+(\wr*8)+0)(\sc)
-       insert_w \wr, 2, \tmp
-       EX lw   \tmp, (\off+(\wr*8)+4)(\sc)
-       insert_w \wr, 3, \tmp
-#else /* CONFIG_CPU_BIG_ENDIAN */
-       EX lw   \tmp, (\off+(\wr*8)+4)(\sc)
-       insert_w \wr, 2, \tmp
-       EX lw   \tmp, (\off+(\wr*8)+0)(\sc)
-       insert_w \wr, 3, \tmp
-#endif
-       .endm
-
-/*
- * int _restore_msa_context(struct sigcontext *sc)
- */
-LEAF(_restore_msa_context)
-       restore_sc_msareg       0, SC_MSAREGS, a0, t0
-       restore_sc_msareg       1, SC_MSAREGS, a0, t0
-       restore_sc_msareg       2, SC_MSAREGS, a0, t0
-       restore_sc_msareg       3, SC_MSAREGS, a0, t0
-       restore_sc_msareg       4, SC_MSAREGS, a0, t0
-       restore_sc_msareg       5, SC_MSAREGS, a0, t0
-       restore_sc_msareg       6, SC_MSAREGS, a0, t0
-       restore_sc_msareg       7, SC_MSAREGS, a0, t0
-       restore_sc_msareg       8, SC_MSAREGS, a0, t0
-       restore_sc_msareg       9, SC_MSAREGS, a0, t0
-       restore_sc_msareg       10, SC_MSAREGS, a0, t0
-       restore_sc_msareg       11, SC_MSAREGS, a0, t0
-       restore_sc_msareg       12, SC_MSAREGS, a0, t0
-       restore_sc_msareg       13, SC_MSAREGS, a0, t0
-       restore_sc_msareg       14, SC_MSAREGS, a0, t0
-       restore_sc_msareg       15, SC_MSAREGS, a0, t0
-       restore_sc_msareg       16, SC_MSAREGS, a0, t0
-       restore_sc_msareg       17, SC_MSAREGS, a0, t0
-       restore_sc_msareg       18, SC_MSAREGS, a0, t0
-       restore_sc_msareg       19, SC_MSAREGS, a0, t0
-       restore_sc_msareg       20, SC_MSAREGS, a0, t0
-       restore_sc_msareg       21, SC_MSAREGS, a0, t0
-       restore_sc_msareg       22, SC_MSAREGS, a0, t0
-       restore_sc_msareg       23, SC_MSAREGS, a0, t0
-       restore_sc_msareg       24, SC_MSAREGS, a0, t0
-       restore_sc_msareg       25, SC_MSAREGS, a0, t0
-       restore_sc_msareg       26, SC_MSAREGS, a0, t0
-       restore_sc_msareg       27, SC_MSAREGS, a0, t0
-       restore_sc_msareg       28, SC_MSAREGS, a0, t0
-       restore_sc_msareg       29, SC_MSAREGS, a0, t0
-       restore_sc_msareg       30, SC_MSAREGS, a0, t0
-       restore_sc_msareg       31, SC_MSAREGS, a0, t0
-       jr      ra
-        li     v0, 0
-       END(_restore_msa_context)
-
-#ifdef CONFIG_MIPS32_COMPAT
-
-/*
- * int _restore_msa_context32(struct sigcontext32 *sc)
- */
-LEAF(_restore_msa_context32)
-       restore_sc_msareg       0, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       1, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       2, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       3, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       4, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       5, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       6, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       7, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       8, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       9, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       10, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       11, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       12, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       13, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       14, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       15, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       16, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       17, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       18, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       19, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       20, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       21, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       22, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       23, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       24, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       25, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       26, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       27, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       28, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       29, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       30, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       31, SC32_MSAREGS, a0, t0
-       jr      ra
-        li     v0, 0
-       END(_restore_msa_context32)
-
-#endif /* CONFIG_MIPS32_COMPAT */
-
-#endif /* CONFIG_CPU_HAS_MSA */
-
        .set    reorder
 
        .type   fault@function
index 33133d3..9e60d11 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/bitops.h>
 #include <asm/cacheflush.h>
 #include <asm/fpu.h>
-#include <asm/msa.h>
 #include <asm/sim.h>
 #include <asm/ucontext.h>
 #include <asm/cpu-features.h>
@@ -48,9 +47,6 @@ static int (*restore_fp_context)(struct sigcontext __user *sc);
 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
 
-extern asmlinkage int _save_msa_context(struct sigcontext __user *sc);
-extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc);
-
 struct sigframe {
        u32 sf_ass[4];          /* argument save space for o32 */
        u32 sf_pad[2];          /* Was: signal trampoline */
@@ -99,61 +95,21 @@ static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
        return err;
 }
 
-/*
- * These functions will save only the upper 64 bits of the vector registers,
- * since the lower 64 bits have already been saved as the scalar FP context.
- */
-static int copy_msa_to_sigcontext(struct sigcontext __user *sc)
-{
-       int i;
-       int err = 0;
-
-       for (i = 0; i < NUM_FPU_REGS; i++) {
-               err |=
-                   __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
-                              &sc->sc_msaregs[i]);
-       }
-       err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
-
-       return err;
-}
-
-static int copy_msa_from_sigcontext(struct sigcontext __user *sc)
-{
-       int i;
-       int err = 0;
-       u64 val;
-
-       for (i = 0; i < NUM_FPU_REGS; i++) {
-               err |= __get_user(val, &sc->sc_msaregs[i]);
-               set_fpr64(&current->thread.fpu.fpr[i], 1, val);
-       }
-       err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
-
-       return err;
-}
-
 /*
  * Helper routines
  */
-static int protected_save_fp_context(struct sigcontext __user *sc,
-                                    unsigned used_math)
+static int protected_save_fp_context(struct sigcontext __user *sc)
 {
        int err;
-       bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
 #ifndef CONFIG_EVA
        while (1) {
                lock_fpu_owner();
                if (is_fpu_owner()) {
                        err = save_fp_context(sc);
-                       if (save_msa && !err)
-                               err = _save_msa_context(sc);
                        unlock_fpu_owner();
                } else {
                        unlock_fpu_owner();
                        err = copy_fp_to_sigcontext(sc);
-                       if (save_msa && !err)
-                               err = copy_msa_to_sigcontext(sc);
                }
                if (likely(!err))
                        break;
@@ -169,38 +125,24 @@ static int protected_save_fp_context(struct sigcontext __user *sc,
         * EVA does not have FPU EVA instructions so saving fpu context directly
         * does not work.
         */
-       disable_msa();
        lose_fpu(1);
        err = save_fp_context(sc); /* this might fail */
-       if (save_msa && !err)
-               err = copy_msa_to_sigcontext(sc);
 #endif
        return err;
 }
 
-static int protected_restore_fp_context(struct sigcontext __user *sc,
-                                       unsigned used_math)
+static int protected_restore_fp_context(struct sigcontext __user *sc)
 {
        int err, tmp __maybe_unused;
-       bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
 #ifndef CONFIG_EVA
        while (1) {
                lock_fpu_owner();
                if (is_fpu_owner()) {
                        err = restore_fp_context(sc);
-                       if (restore_msa && !err) {
-                               enable_msa();
-                               err = _restore_msa_context(sc);
-                       } else {
-                               /* signal handler may have used MSA */
-                               disable_msa();
-                       }
                        unlock_fpu_owner();
                } else {
                        unlock_fpu_owner();
                        err = copy_fp_from_sigcontext(sc);
-                       if (!err && (used_math & USEDMATH_MSA))
-                               err = copy_msa_from_sigcontext(sc);
                }
                if (likely(!err))
                        break;
@@ -216,11 +158,8 @@ static int protected_restore_fp_context(struct sigcontext __user *sc,
         * EVA does not have FPU EVA instructions so restoring fpu context
         * directly does not work.
         */
-       enable_msa();
        lose_fpu(0);
        err = restore_fp_context(sc); /* this might fail */
-       if (restore_msa && !err)
-               err = copy_msa_from_sigcontext(sc);
 #endif
        return err;
 }
@@ -252,8 +191,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
        }
 
-       used_math = used_math() ? USEDMATH_FP : 0;
-       used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
+       used_math = !!used_math();
        err |= __put_user(used_math, &sc->sc_used_math);
 
        if (used_math) {
@@ -261,7 +199,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                 * Save FPU state to signal context. Signal handler
                 * will "inherit" current FPU state.
                 */
-               err |= protected_save_fp_context(sc, used_math);
+               err |= protected_save_fp_context(sc);
        }
        return err;
 }
@@ -286,14 +224,14 @@ int fpcsr_pending(unsigned int __user *fpcsr)
 }
 
 static int
-check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math)
+check_and_restore_fp_context(struct sigcontext __user *sc)
 {
        int err, sig;
 
        err = sig = fpcsr_pending(&sc->sc_fpc_csr);
        if (err > 0)
                err = 0;
-       err |= protected_restore_fp_context(sc, used_math);
+       err |= protected_restore_fp_context(sc);
        return err ?: sig;
 }
 
@@ -333,10 +271,9 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
        if (used_math) {
                /* restore fpu context if we have used it before */
                if (!err)
-                       err = check_and_restore_fp_context(sc, used_math);
+                       err = check_and_restore_fp_context(sc);
        } else {
-               /* signal handler may have used FPU or MSA. Disable them. */
-               disable_msa();
+               /* signal handler may have used FPU.  Give it up. */
                lose_fpu(0);
        }
 
index 299f956..bae2e6e 100644 (file)
@@ -30,7 +30,6 @@
 #include <asm/sim.h>
 #include <asm/ucontext.h>
 #include <asm/fpu.h>
-#include <asm/msa.h>
 #include <asm/war.h>
 #include <asm/vdso.h>
 #include <asm/dsp.h>
@@ -43,9 +42,6 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
 extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
 extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
 
-extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc);
-
 /*
  * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
  */
@@ -114,60 +110,20 @@ static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
        return err;
 }
 
-/*
- * These functions will save only the upper 64 bits of the vector registers,
- * since the lower 64 bits have already been saved as the scalar FP context.
- */
-static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc)
-{
-       int i;
-       int err = 0;
-
-       for (i = 0; i < NUM_FPU_REGS; i++) {
-               err |=
-                   __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
-                              &sc->sc_msaregs[i]);
-       }
-       err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
-
-       return err;
-}
-
-static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc)
-{
-       int i;
-       int err = 0;
-       u64 val;
-
-       for (i = 0; i < NUM_FPU_REGS; i++) {
-               err |= __get_user(val, &sc->sc_msaregs[i]);
-               set_fpr64(&current->thread.fpu.fpr[i], 1, val);
-       }
-       err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
-
-       return err;
-}
-
 /*
  * sigcontext handlers
  */
-static int protected_save_fp_context32(struct sigcontext32 __user *sc,
-                                      unsigned used_math)
+static int protected_save_fp_context32(struct sigcontext32 __user *sc)
 {
        int err;
-       bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
        while (1) {
                lock_fpu_owner();
                if (is_fpu_owner()) {
                        err = save_fp_context32(sc);
-                       if (save_msa && !err)
-                               err = _save_msa_context32(sc);
                        unlock_fpu_owner();
                } else {
                        unlock_fpu_owner();
                        err = copy_fp_to_sigcontext32(sc);
-                       if (save_msa && !err)
-                               err = copy_msa_to_sigcontext32(sc);
                }
                if (likely(!err))
                        break;
@@ -181,28 +137,17 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc,
        return err;
 }
 
-static int protected_restore_fp_context32(struct sigcontext32 __user *sc,
-                                         unsigned used_math)
+static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
 {
        int err, tmp __maybe_unused;
-       bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
        while (1) {
                lock_fpu_owner();
                if (is_fpu_owner()) {
                        err = restore_fp_context32(sc);
-                       if (restore_msa && !err) {
-                               enable_msa();
-                               err = _restore_msa_context32(sc);
-                       } else {
-                               /* signal handler may have used MSA */
-                               disable_msa();
-                       }
                        unlock_fpu_owner();
                } else {
                        unlock_fpu_owner();
                        err = copy_fp_from_sigcontext32(sc);
-                       if (restore_msa && !err)
-                               err = copy_msa_from_sigcontext32(sc);
                }
                if (likely(!err))
                        break;
@@ -241,8 +186,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
                err |= __put_user(mflo3(), &sc->sc_lo3);
        }
 
-       used_math = used_math() ? USEDMATH_FP : 0;
-       used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
+       used_math = !!used_math();
        err |= __put_user(used_math, &sc->sc_used_math);
 
        if (used_math) {
@@ -250,21 +194,20 @@ static int setup_sigcontext32(struct pt_regs *regs,
                 * Save FPU state to signal context.  Signal handler
                 * will "inherit" current FPU state.
                 */
-               err |= protected_save_fp_context32(sc, used_math);
+               err |= protected_save_fp_context32(sc);
        }
        return err;
 }
 
 static int
-check_and_restore_fp_context32(struct sigcontext32 __user *sc,
-                              unsigned used_math)
+check_and_restore_fp_context32(struct sigcontext32 __user *sc)
 {
        int err, sig;
 
        err = sig = fpcsr_pending(&sc->sc_fpc_csr);
        if (err > 0)
                err = 0;
-       err |= protected_restore_fp_context32(sc, used_math);
+       err |= protected_restore_fp_context32(sc);
        return err ?: sig;
 }
 
@@ -301,10 +244,9 @@ static int restore_sigcontext32(struct pt_regs *regs,
        if (used_math) {
                /* restore fpu context if we have used it before */
                if (!err)
-                       err = check_and_restore_fp_context32(sc, used_math);
+                       err = check_and_restore_fp_context32(sc);
        } else {
-               /* signal handler may have used FPU or MSA. Disable them. */
-               disable_msa();
+               /* signal handler may have used FPU.  Give it up. */
                lose_fpu(0);
        }
 
index df0598d..949f2c6 100644 (file)
@@ -301,7 +301,7 @@ static int cps_cpu_disable(void)
 
        core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
        atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
-       smp_mb__after_atomic_dec();
+       smp_mb__after_atomic();
        set_cpu_online(cpu, false);
        cpu_clear(cpu, cpu_callin_map);
 
index 78d87bb..401fe02 100644 (file)
@@ -5,9 +5,9 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)
 
 EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
 
-kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \
-           kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \
-           kvm_mips_dyntrans.o kvm_trap_emul.o
+kvm-objs := $(common-objs) mips.o emulate.o locore.o \
+           interrupt.o stats.o commpage.o \
+           dyntrans.o trap_emul.o
 
 obj-$(CONFIG_KVM)      += kvm.o
-obj-y                  += kvm_cb.o kvm_tlb.o
+obj-y                  += callback.o tlb.o
diff --git a/arch/mips/kvm/callback.c b/arch/mips/kvm/callback.c
new file mode 100644 (file)
index 0000000..313c2e3
--- /dev/null
@@ -0,0 +1,14 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Yann Le Du <ledu@kymasys.com>
+ */
+
+#include <linux/export.h>
+#include <linux/kvm_host.h>
+
+struct kvm_mips_callbacks *kvm_mips_callbacks;
+EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
new file mode 100644 (file)
index 0000000..2d6e976
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * commpage, currently used for Virtual COP0 registers.
+ * Mapped into the guest kernel @ 0x0.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "commpage.h"
+
+void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
+{
+       struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
+
+       /* Specific init values for fields */
+       vcpu->arch.cop0 = &page->cop0;
+}
diff --git a/arch/mips/kvm/commpage.h b/arch/mips/kvm/commpage.h
new file mode 100644 (file)
index 0000000..08c5fa2
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: commpage: mapped into get kernel space
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#ifndef __KVM_MIPS_COMMPAGE_H__
+#define __KVM_MIPS_COMMPAGE_H__
+
+struct kvm_mips_commpage {
+       /* COP0 state is mapped into Guest kernel via commpage */
+       struct mips_coproc cop0;
+};
+
+#define KVM_MIPS_COMM_EIDI_OFFSET       0x0
+
+extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
+
+#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
new file mode 100644 (file)
index 0000000..521121b
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/cacheflush.h>
+
+#include "commpage.h"
+
+#define SYNCI_TEMPLATE  0x041f0000
+#define SYNCI_BASE(x)   (((x) >> 21) & 0x1f)
+#define SYNCI_OFFSET    ((x) & 0xffff)
+
+#define LW_TEMPLATE     0x8c000000
+#define CLEAR_TEMPLATE  0x00000020
+#define SW_TEMPLATE     0xac000000
+
+int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+                              struct kvm_vcpu *vcpu)
+{
+       int result = 0;
+       unsigned long kseg0_opc;
+       uint32_t synci_inst = 0x0;
+
+       /* Replace the CACHE instruction, with a NOP */
+       kseg0_opc =
+           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                      (vcpu, (unsigned long) opc));
+       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+       local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
+
+       return result;
+}
+
+/*
+ * Address based CACHE instructions are transformed into synci(s). A little
+ * heavy for just D-cache invalidates, but avoids an expensive trap
+ */
+int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+                           struct kvm_vcpu *vcpu)
+{
+       int result = 0;
+       unsigned long kseg0_opc;
+       uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
+
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       synci_inst |= (base << 21);
+       synci_inst |= offset;
+
+       kseg0_opc =
+           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                      (vcpu, (unsigned long) opc));
+       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
+       local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
+
+       return result;
+}
+
+int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       int32_t rt, rd, sel;
+       uint32_t mfc0_inst;
+       unsigned long kseg0_opc, flags;
+
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+
+       if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+               mfc0_inst = CLEAR_TEMPLATE;
+               mfc0_inst |= ((rt & 0x1f) << 16);
+       } else {
+               mfc0_inst = LW_TEMPLATE;
+               mfc0_inst |= ((rt & 0x1f) << 16);
+               mfc0_inst |=
+                   offsetof(struct mips_coproc,
+                            reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
+                                                     cop0);
+       }
+
+       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               kseg0_opc =
+                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                              (vcpu, (unsigned long) opc));
+               memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
+               local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
+       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
+               local_flush_icache_range((unsigned long)opc,
+                                        (unsigned long)opc + 32);
+               local_irq_restore(flags);
+       } else {
+               kvm_err("%s: Invalid address: %p\n", __func__, opc);
+               return -EFAULT;
+       }
+
+       return 0;
+}
+
+int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       int32_t rt, rd, sel;
+       uint32_t mtc0_inst = SW_TEMPLATE;
+       unsigned long kseg0_opc, flags;
+
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+
+       mtc0_inst |= ((rt & 0x1f) << 16);
+       mtc0_inst |=
+           offsetof(struct mips_coproc,
+                    reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
+
+       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               kseg0_opc =
+                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
+                              (vcpu, (unsigned long) opc));
+               memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
+               local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
+       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
+               local_flush_icache_range((unsigned long)opc,
+                                        (unsigned long)opc + 32);
+               local_irq_restore(flags);
+       } else {
+               kvm_err("%s: Invalid address: %p\n", __func__, opc);
+               return -EFAULT;
+       }
+
+       return 0;
+}
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
new file mode 100644 (file)
index 0000000..fb3e8df
--- /dev/null
@@ -0,0 +1,2319 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Instruction/Exception emulation
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/ktime.h>
+#include <linux/kvm_host.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <linux/random.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-info.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/inst.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#include "opcode.h"
+#include "interrupt.h"
+#include "commpage.h"
+
+#include "trace.h"
+
+/*
+ * Compute the return address and do emulate branch simulation, if required.
+ * This function should be called only in branch delay slot active.
+ */
+unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
+       unsigned long instpc)
+{
+       unsigned int dspcontrol;
+       union mips_instruction insn;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       long epc = instpc;
+       long nextpc = KVM_INVALID_INST;
+
+       if (epc & 3)
+               goto unaligned;
+
+       /* Read the instruction */
+       insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
+
+       if (insn.word == KVM_INVALID_INST)
+               return KVM_INVALID_INST;
+
+       switch (insn.i_format.opcode) {
+               /* jr and jalr are in r_format format. */
+       case spec_op:
+               switch (insn.r_format.func) {
+               case jalr_op:
+                       arch->gprs[insn.r_format.rd] = epc + 8;
+                       /* Fall through */
+               case jr_op:
+                       nextpc = arch->gprs[insn.r_format.rs];
+                       break;
+               }
+               break;
+
+               /*
+                * This group contains:
+                * bltz_op, bgez_op, bltzl_op, bgezl_op,
+                * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+                */
+       case bcond_op:
+               switch (insn.i_format.rt) {
+               case bltz_op:
+               case bltzl_op:
+                       if ((long)arch->gprs[insn.i_format.rs] < 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bgez_op:
+               case bgezl_op:
+                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bltzal_op:
+               case bltzall_op:
+                       arch->gprs[31] = epc + 8;
+                       if ((long)arch->gprs[insn.i_format.rs] < 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+
+               case bgezal_op:
+               case bgezall_op:
+                       arch->gprs[31] = epc + 8;
+                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+               case bposge32_op:
+                       if (!cpu_has_dsp)
+                               goto sigill;
+
+                       dspcontrol = rddsp(0x01);
+
+                       if (dspcontrol >= 32)
+                               epc = epc + 4 + (insn.i_format.simmediate << 2);
+                       else
+                               epc += 8;
+                       nextpc = epc;
+                       break;
+               }
+               break;
+
+               /* These are unconditional and in j_format. */
+       case jal_op:
+               arch->gprs[31] = instpc + 8;
+       case j_op:
+               epc += 4;
+               epc >>= 28;
+               epc <<= 28;
+               epc |= (insn.j_format.target << 2);
+               nextpc = epc;
+               break;
+
+               /* These are conditional and in i_format. */
+       case beq_op:
+       case beql_op:
+               if (arch->gprs[insn.i_format.rs] ==
+                   arch->gprs[insn.i_format.rt])
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case bne_op:
+       case bnel_op:
+               if (arch->gprs[insn.i_format.rs] !=
+                   arch->gprs[insn.i_format.rt])
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case blez_op:           /* not really i_format */
+       case blezl_op:
+               /* rt field assumed to be zero */
+               if ((long)arch->gprs[insn.i_format.rs] <= 0)
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+       case bgtz_op:
+       case bgtzl_op:
+               /* rt field assumed to be zero */
+               if ((long)arch->gprs[insn.i_format.rs] > 0)
+                       epc = epc + 4 + (insn.i_format.simmediate << 2);
+               else
+                       epc += 8;
+               nextpc = epc;
+               break;
+
+               /* And now the FPA/cp1 branch instructions. */
+       case cop1_op:
+               kvm_err("%s: unsupported cop1_op\n", __func__);
+               break;
+       }
+
+       return nextpc;
+
+unaligned:
+       kvm_err("%s: unaligned epc\n", __func__);
+       return nextpc;
+
+sigill:
+       kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
+       return nextpc;
+}
+
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+       unsigned long branch_pc;
+       enum emulation_result er = EMULATE_DONE;
+
+       if (cause & CAUSEF_BD) {
+               branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
+               if (branch_pc == KVM_INVALID_INST) {
+                       er = EMULATE_FAIL;
+               } else {
+                       vcpu->arch.pc = branch_pc;
+                       kvm_debug("BD update_pc(): New PC: %#lx\n",
+                                 vcpu->arch.pc);
+               }
+       } else
+               vcpu->arch.pc += 4;
+
+       kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
+
+       return er;
+}
+
+/**
+ * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
+ * @vcpu:      Virtual CPU.
+ *
+ * Returns:    1 if the CP0_Count timer is disabled by either the guest
+ *             CP0_Cause.DC bit or the count_ctl.DC bit.
+ *             0 otherwise (in which case CP0_Count timer is running).
+ */
+static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       return  (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
+               (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
+}
+
+/**
+ * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
+ *
+ * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+ */
+static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
+{
+       s64 now_ns, periods;
+       u64 delta;
+
+       now_ns = ktime_to_ns(now);
+       delta = now_ns + vcpu->arch.count_dyn_bias;
+
+       if (delta >= vcpu->arch.count_period) {
+               /* If delta is out of safe range the bias needs adjusting */
+               periods = div64_s64(now_ns, vcpu->arch.count_period);
+               vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
+               /* Recalculate delta with new bias */
+               delta = now_ns + vcpu->arch.count_dyn_bias;
+       }
+
+       /*
+        * We've ensured that:
+        *   delta < count_period
+        *
+        * Therefore the intermediate delta*count_hz will never overflow since
+        * at the boundary condition:
+        *   delta = count_period
+        *   delta = NSEC_PER_SEC * 2^32 / count_hz
+        *   delta * count_hz = NSEC_PER_SEC * 2^32
+        */
+       return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
+}
+
+/**
+ * kvm_mips_count_time() - Get effective current time.
+ * @vcpu:      Virtual CPU.
+ *
+ * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
+ * except when the master disable bit is set in count_ctl, in which case it is
+ * count_resume, i.e. the time that the count was disabled.
+ *
+ * Returns:    Effective monotonic ktime for CP0_Count.
+ */
+static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
+{
+       if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
+               return vcpu->arch.count_resume;
+
+       return ktime_get();
+}
+
+/**
+ * kvm_mips_read_count_running() - Read the current count value as if running.
+ * @vcpu:      Virtual CPU.
+ * @now:       Kernel time to read CP0_Count at.
+ *
+ * Returns the current guest CP0_Count register at time @now and handles if the
+ * timer interrupt is pending and hasn't been handled yet.
+ *
+ * Returns:    The current value of the guest CP0_Count register.
+ */
+static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
+{
+       ktime_t expires;
+       int running;
+
+       /* Is the hrtimer pending? */
+       expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
+       if (ktime_compare(now, expires) >= 0) {
+               /*
+                * Cancel it while we handle it so there's no chance of
+                * interference with the timeout handler.
+                */
+               running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+               /* Nothing should be waiting on the timeout */
+               kvm_mips_callbacks->queue_timer_int(vcpu);
+
+               /*
+                * Restart the timer if it was running based on the expiry time
+                * we read, so that we don't push it back 2 periods.
+                */
+               if (running) {
+                       expires = ktime_add_ns(expires,
+                                              vcpu->arch.count_period);
+                       hrtimer_start(&vcpu->arch.comparecount_timer, expires,
+                                     HRTIMER_MODE_ABS);
+               }
+       }
+
+       /* Return the biased and scaled guest CP0_Count */
+       return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+}
+
+/**
+ * kvm_mips_read_count() - Read the current count value.
+ * @vcpu:      Virtual CPU.
+ *
+ * Read the current guest CP0_Count value, taking into account whether the timer
+ * is stopped.
+ *
+ * Returns:    The current guest CP0_Count value.
+ */
+uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       /* If count disabled just read static copy of count */
+       if (kvm_mips_count_disabled(vcpu))
+               return kvm_read_c0_guest_count(cop0);
+
+       return kvm_mips_read_count_running(vcpu, ktime_get());
+}
+
+/**
+ * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
+ * @vcpu:      Virtual CPU.
+ * @count:     Output pointer for CP0_Count value at point of freeze.
+ *
+ * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
+ * at the point it was frozen. It is guaranteed that any pending interrupts at
+ * the point it was frozen are handled, and none after that point.
+ *
+ * This is useful where the time/CP0_Count is needed in the calculation of the
+ * new parameters.
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+ *
+ * Returns:    The ktime at the point of freeze.
+ */
+static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
+                                      uint32_t *count)
+{
+       ktime_t now;
+
+       /* stop hrtimer before finding time */
+       hrtimer_cancel(&vcpu->arch.comparecount_timer);
+       now = ktime_get();
+
+       /* find count at this point and handle pending hrtimer */
+       *count = kvm_mips_read_count_running(vcpu, now);
+
+       return now;
+}
+
+/**
+ * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
+ * @vcpu:      Virtual CPU.
+ * @now:       ktime at point of resume.
+ * @count:     CP0_Count at point of resume.
+ *
+ * Resumes the timer and updates the timer expiry based on @now and @count.
+ * This can be used in conjunction with kvm_mips_freeze_timer() when timer
+ * parameters need to be changed.
+ *
+ * It is guaranteed that a timer interrupt immediately after resume will be
+ * handled, but not if CP_Compare is exactly at @count. That case is already
+ * handled by kvm_mips_freeze_timer().
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+ */
+static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
+                                   ktime_t now, uint32_t count)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       uint32_t compare;
+       u64 delta;
+       ktime_t expire;
+
+       /* Calculate timeout (wrap 0 to 2^32) */
+       compare = kvm_read_c0_guest_compare(cop0);
+       delta = (u64)(uint32_t)(compare - count - 1) + 1;
+       delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
+       expire = ktime_add_ns(now, delta);
+
+       /* Update hrtimer to use new timeout */
+       hrtimer_cancel(&vcpu->arch.comparecount_timer);
+       hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
+}
+
+/**
+ * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
+ * @vcpu:      Virtual CPU.
+ *
+ * Recalculates and updates the expiry time of the hrtimer. This can be used
+ * after timer parameters have been altered which do not depend on the time that
+ * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
+ * kvm_mips_resume_hrtimer() are used directly).
+ *
+ * It is guaranteed that no timer interrupts will be lost in the process.
+ *
+ * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
+ */
+static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
+{
+       ktime_t now;
+       uint32_t count;
+
+       /*
+        * freeze_hrtimer takes care of a timer interrupts <= count, and
+        * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
+        */
+       now = kvm_mips_freeze_hrtimer(vcpu, &count);
+       kvm_mips_resume_hrtimer(vcpu, now, count);
+}
+
+/**
+ * kvm_mips_write_count() - Modify the count and update timer.
+ * @vcpu:      Virtual CPU.
+ * @count:     Guest CP0_Count value to set.
+ *
+ * Sets the CP0_Count value and updates the timer accordingly.
+ */
+void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       ktime_t now;
+
+       /* Calculate bias */
+       now = kvm_mips_count_time(vcpu);
+       vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
+
+       if (kvm_mips_count_disabled(vcpu))
+               /* The timer's disabled, adjust the static count */
+               kvm_write_c0_guest_count(cop0, count);
+       else
+               /* Update timeout */
+               kvm_mips_resume_hrtimer(vcpu, now, count);
+}
+
+/**
+ * kvm_mips_init_count() - Initialise timer.
+ * @vcpu:      Virtual CPU.
+ *
+ * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
+ * it going if it's enabled.
+ */
+void kvm_mips_init_count(struct kvm_vcpu *vcpu)
+{
+       /* 100 MHz */
+       vcpu->arch.count_hz = 100*1000*1000;
+       vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
+                                         vcpu->arch.count_hz);
+       vcpu->arch.count_dyn_bias = 0;
+
+       /* Starting at 0 */
+       kvm_mips_write_count(vcpu, 0);
+}
+
+/**
+ * kvm_mips_set_count_hz() - Update the frequency of the timer.
+ * @vcpu:      Virtual CPU.
+ * @count_hz:  Frequency of CP0_Count timer in Hz.
+ *
+ * Change the frequency of the CP0_Count timer. This is done atomically so that
+ * CP0_Count is continuous and no timer interrupt is lost.
+ *
+ * Returns:    -EINVAL if @count_hz is out of range.
+ *             0 on success.
+ */
+int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       int dc;
+       ktime_t now;
+       u32 count;
+
+       /* ensure the frequency is in a sensible range... */
+       if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
+               return -EINVAL;
+       /* ... and has actually changed */
+       if (vcpu->arch.count_hz == count_hz)
+               return 0;
+
+       /* Safely freeze timer so we can keep it continuous */
+       dc = kvm_mips_count_disabled(vcpu);
+       if (dc) {
+               now = kvm_mips_count_time(vcpu);
+               count = kvm_read_c0_guest_count(cop0);
+       } else {
+               now = kvm_mips_freeze_hrtimer(vcpu, &count);
+       }
+
+       /* Update the frequency */
+       vcpu->arch.count_hz = count_hz;
+       vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
+       vcpu->arch.count_dyn_bias = 0;
+
+       /* Calculate adjusted bias so dynamic count is unchanged */
+       vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
+
+       /* Update and resume hrtimer */
+       if (!dc)
+               kvm_mips_resume_hrtimer(vcpu, now, count);
+       return 0;
+}
+
+/**
+ * kvm_mips_write_compare() - Modify compare and update timer.
+ * @vcpu:      Virtual CPU.
+ * @compare:   New CP0_Compare value.
+ *
+ * Update CP0_Compare to a new value and update the timeout.
+ */
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       /* if unchanged, must just be an ack */
+       if (kvm_read_c0_guest_compare(cop0) == compare)
+               return;
+
+       /* Update compare */
+       kvm_write_c0_guest_compare(cop0, compare);
+
+       /* Update timeout if count enabled */
+       if (!kvm_mips_count_disabled(vcpu))
+               kvm_mips_update_hrtimer(vcpu);
+}
+
+/**
+ * kvm_mips_count_disable() - Disable count.
+ * @vcpu:      Virtual CPU.
+ *
+ * Disable the CP0_Count timer. A timer interrupt on or before the final stop
+ * time will be handled but not after.
+ *
+ * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
+ * count_ctl.DC has been set (count disabled).
+ *
+ * Returns:    The time that the timer was stopped.
+ */
+static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       uint32_t count;
+       ktime_t now;
+
+       /* Stop hrtimer */
+       hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+       /* Set the static count from the dynamic count, handling pending TI */
+       now = ktime_get();
+       count = kvm_mips_read_count_running(vcpu, now);
+       kvm_write_c0_guest_count(cop0, count);
+
+       return now;
+}
+
+/**
+ * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
+ * @vcpu:      Virtual CPU.
+ *
+ * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
+ * before the final stop time will be handled if the timer isn't disabled by
+ * count_ctl.DC, but not after.
+ *
+ * Assumes CP0_Cause.DC is clear (count enabled).
+ */
+void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
+       if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
+               kvm_mips_count_disable(vcpu);
+}
+
+/**
+ * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
+ * @vcpu:      Virtual CPU.
+ *
+ * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
+ * the start time will be handled if the timer isn't disabled by count_ctl.DC,
+ * potentially before even returning, so the caller should be careful with
+ * ordering of CP0_Cause modifications so as not to lose it.
+ *
+ * Assumes CP0_Cause.DC is set (count disabled).
+ */
+void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       uint32_t count;
+
+       kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
+
+       /*
+        * Set the dynamic count to match the static count.
+        * This starts the hrtimer if count_ctl.DC allows it.
+        * Otherwise it conveniently updates the biases.
+        */
+       count = kvm_read_c0_guest_count(cop0);
+       kvm_mips_write_count(vcpu, count);
+}
+
+/**
+ * kvm_mips_set_count_ctl() - Update the count control KVM register.
+ * @vcpu:      Virtual CPU.
+ * @count_ctl: Count control register new value.
+ *
+ * Set the count control KVM register. The timer is updated accordingly.
+ *
+ * Returns:    -EINVAL if reserved bits are set.
+ *             0 on success.
+ */
+int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       s64 changed = count_ctl ^ vcpu->arch.count_ctl;
+       s64 delta;
+       ktime_t expire, now;
+       uint32_t count, compare;
+
+       /* Only allow defined bits to be changed */
+       if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
+               return -EINVAL;
+
+       /* Apply new value */
+       vcpu->arch.count_ctl = count_ctl;
+
+       /* Master CP0_Count disable */
+       if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
+               /* Is CP0_Cause.DC already disabling CP0_Count? */
+               if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
+                       if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
+                               /* Just record the current time */
+                               vcpu->arch.count_resume = ktime_get();
+               } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
+                       /* disable timer and record current time */
+                       vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
+               } else {
+                       /*
+                        * Calculate timeout relative to static count at resume
+                        * time (wrap 0 to 2^32).
+                        */
+                       count = kvm_read_c0_guest_count(cop0);
+                       compare = kvm_read_c0_guest_compare(cop0);
+                       delta = (u64)(uint32_t)(compare - count - 1) + 1;
+                       delta = div_u64(delta * NSEC_PER_SEC,
+                                       vcpu->arch.count_hz);
+                       expire = ktime_add_ns(vcpu->arch.count_resume, delta);
+
+                       /* Handle pending interrupt */
+                       now = ktime_get();
+                       if (ktime_compare(now, expire) >= 0)
+                               /* Nothing should be waiting on the timeout */
+                               kvm_mips_callbacks->queue_timer_int(vcpu);
+
+                       /* Resume hrtimer without changing bias */
+                       count = kvm_mips_read_count_running(vcpu, now);
+                       kvm_mips_resume_hrtimer(vcpu, now, count);
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * kvm_mips_set_count_resume() - Update the count resume KVM register.
+ * @vcpu:              Virtual CPU.
+ * @count_resume:      Count resume register new value.
+ *
+ * Set the count resume KVM register.
+ *
+ * Returns:    -EINVAL if out of valid range (0..now).
+ *             0 on success.
+ */
+int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
+{
+       /*
+        * It doesn't make sense for the resume time to be in the future, as it
+        * would be possible for the next interrupt to be more than a full
+        * period in the future.
+        */
+       if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
+               return -EINVAL;
+
+       vcpu->arch.count_resume = ns_to_ktime(count_resume);
+       return 0;
+}
+
+/**
+ * kvm_mips_count_timeout() - Push timer forward on timeout.
+ * @vcpu:      Virtual CPU.
+ *
+ * Handle an hrtimer event by push the hrtimer forward a period.
+ *
+ * Returns:    The hrtimer_restart value to return to the hrtimer subsystem.
+ */
+enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
+{
+       /* Add the Count period to the current expiry time */
+       hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
+                              vcpu->arch.count_period);
+       return HRTIMER_RESTART;
+}
+
+enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+
+       if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+               kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
+                         kvm_read_c0_guest_epc(cop0));
+               kvm_clear_c0_guest_status(cop0, ST0_EXL);
+               vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
+
+       } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+               kvm_clear_c0_guest_status(cop0, ST0_ERL);
+               vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+       } else {
+               kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
+                       vcpu->arch.pc);
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
+{
+       kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
+                 vcpu->arch.pending_exceptions);
+
+       ++vcpu->stat.wait_exits;
+       trace_kvm_exit(vcpu, WAIT_EXITS);
+       if (!vcpu->arch.pending_exceptions) {
+               vcpu->arch.wait = 1;
+               kvm_vcpu_block(vcpu);
+
+               /*
+                * We we are runnable, then definitely go off to user space to
+                * check if any I/O interrupts are pending.
+                */
+               if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
+                       clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+                       vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
+               }
+       }
+
+       return EMULATE_DONE;
+}
+
+/*
+ * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
+ * we can catch this, if things ever change
+ */
+enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       uint32_t pc = vcpu->arch.pc;
+
+       kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
+       return EMULATE_FAIL;
+}
+
+/* Write Guest TLB Entry @ Index */
+enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       int index = kvm_read_c0_guest_index(cop0);
+       struct kvm_mips_tlb *tlb = NULL;
+       uint32_t pc = vcpu->arch.pc;
+
+       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
+               kvm_debug("%s: illegal index: %d\n", __func__, index);
+               kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+                         pc, index, kvm_read_c0_guest_entryhi(cop0),
+                         kvm_read_c0_guest_entrylo0(cop0),
+                         kvm_read_c0_guest_entrylo1(cop0),
+                         kvm_read_c0_guest_pagemask(cop0));
+               index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
+       }
+
+       tlb = &vcpu->arch.guest_tlb[index];
+       /*
+        * Probe the shadow host TLB for the entry being overwritten, if one
+        * matches, invalidate it
+        */
+       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+       kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+                 pc, index, kvm_read_c0_guest_entryhi(cop0),
+                 kvm_read_c0_guest_entrylo0(cop0),
+                 kvm_read_c0_guest_entrylo1(cop0),
+                 kvm_read_c0_guest_pagemask(cop0));
+
+       return EMULATE_DONE;
+}
+
+/* Write Guest TLB Entry @ Random Index */
+enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_mips_tlb *tlb = NULL;
+       uint32_t pc = vcpu->arch.pc;
+       int index;
+
+       get_random_bytes(&index, sizeof(index));
+       index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
+
+       tlb = &vcpu->arch.guest_tlb[index];
+
+       /*
+        * Probe the shadow host TLB for the entry being overwritten, if one
+        * matches, invalidate it
+        */
+       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
+       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
+       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
+       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+
+       kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+                 pc, index, kvm_read_c0_guest_entryhi(cop0),
+                 kvm_read_c0_guest_entrylo0(cop0),
+                 kvm_read_c0_guest_entrylo1(cop0));
+
+       return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       long entryhi = kvm_read_c0_guest_entryhi(cop0);
+       uint32_t pc = vcpu->arch.pc;
+       int index = -1;
+
+       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+
+       kvm_write_c0_guest_index(cop0, index);
+
+       kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
+                 index);
+
+       return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
+                                          uint32_t cause, struct kvm_run *run,
+                                          struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+       int32_t rt, rd, copz, sel, co_bit, op;
+       uint32_t pc = vcpu->arch.pc;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       copz = (inst >> 21) & 0x1f;
+       rt = (inst >> 16) & 0x1f;
+       rd = (inst >> 11) & 0x1f;
+       sel = inst & 0x7;
+       co_bit = (inst >> 25) & 1;
+
+       if (co_bit) {
+               op = (inst) & 0xff;
+
+               switch (op) {
+               case tlbr_op:   /*  Read indexed TLB entry  */
+                       er = kvm_mips_emul_tlbr(vcpu);
+                       break;
+               case tlbwi_op:  /*  Write indexed  */
+                       er = kvm_mips_emul_tlbwi(vcpu);
+                       break;
+               case tlbwr_op:  /*  Write random  */
+                       er = kvm_mips_emul_tlbwr(vcpu);
+                       break;
+               case tlbp_op:   /* TLB Probe */
+                       er = kvm_mips_emul_tlbp(vcpu);
+                       break;
+               case rfe_op:
+                       kvm_err("!!!COP0_RFE!!!\n");
+                       break;
+               case eret_op:
+                       er = kvm_mips_emul_eret(vcpu);
+                       goto dont_update_pc;
+                       break;
+               case wait_op:
+                       er = kvm_mips_emul_wait(vcpu);
+                       break;
+               }
+       } else {
+               switch (copz) {
+               case mfc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[rd][sel]++;
+#endif
+                       /* Get reg */
+                       if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+                               vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
+                       } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
+                               vcpu->arch.gprs[rt] = 0x0;
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+                       } else {
+                               vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mfc0(inst, opc, vcpu);
+#endif
+                       }
+
+                       kvm_debug
+                           ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
+                            pc, rd, sel, rt, vcpu->arch.gprs[rt]);
+
+                       break;
+
+               case dmfc_op:
+                       vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+                       break;
+
+               case mtc_op:
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[rd][sel]++;
+#endif
+                       if ((rd == MIPS_CP0_TLB_INDEX)
+                           && (vcpu->arch.gprs[rt] >=
+                               KVM_MIPS_GUEST_TLB_SIZE)) {
+                               kvm_err("Invalid TLB Index: %ld",
+                                       vcpu->arch.gprs[rt]);
+                               er = EMULATE_FAIL;
+                               break;
+                       }
+#define C0_EBASE_CORE_MASK 0xff
+                       if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
+                               /* Preserve CORE number */
+                               kvm_change_c0_guest_ebase(cop0,
+                                                         ~(C0_EBASE_CORE_MASK),
+                                                         vcpu->arch.gprs[rt]);
+                               kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
+                                       kvm_read_c0_guest_ebase(cop0));
+                       } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
+                               uint32_t nasid =
+                                       vcpu->arch.gprs[rt] & ASID_MASK;
+                               if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
+                                   ((kvm_read_c0_guest_entryhi(cop0) &
+                                     ASID_MASK) != nasid)) {
+                                       kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
+                                               kvm_read_c0_guest_entryhi(cop0)
+                                               & ASID_MASK,
+                                               vcpu->arch.gprs[rt]
+                                               & ASID_MASK);
+
+                                       /* Blow away the shadow host TLBs */
+                                       kvm_mips_flush_host_tlb(1);
+                               }
+                               kvm_write_c0_guest_entryhi(cop0,
+                                                          vcpu->arch.gprs[rt]);
+                       }
+                       /* Are we writing to COUNT */
+                       else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
+                               kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
+                               goto done;
+                       } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
+                               kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
+                                         pc, kvm_read_c0_guest_compare(cop0),
+                                         vcpu->arch.gprs[rt]);
+
+                               /* If we are writing to COMPARE */
+                               /* Clear pending timer interrupt, if any */
+                               kvm_mips_callbacks->dequeue_timer_int(vcpu);
+                               kvm_mips_write_compare(vcpu,
+                                                      vcpu->arch.gprs[rt]);
+                       } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
+                               kvm_write_c0_guest_status(cop0,
+                                                         vcpu->arch.gprs[rt]);
+                               /*
+                                * Make sure that CU1 and NMI bits are
+                                * never set
+                                */
+                               kvm_clear_c0_guest_status(cop0,
+                                                         (ST0_CU1 | ST0_NMI));
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+                       } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
+                               uint32_t old_cause, new_cause;
+
+                               old_cause = kvm_read_c0_guest_cause(cop0);
+                               new_cause = vcpu->arch.gprs[rt];
+                               /* Update R/W bits */
+                               kvm_change_c0_guest_cause(cop0, 0x08800300,
+                                                         new_cause);
+                               /* DC bit enabling/disabling timer? */
+                               if ((old_cause ^ new_cause) & CAUSEF_DC) {
+                                       if (new_cause & CAUSEF_DC)
+                                               kvm_mips_count_disable_cause(vcpu);
+                                       else
+                                               kvm_mips_count_enable_cause(vcpu);
+                               }
+                       } else {
+                               cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+                               kvm_mips_trans_mtc0(inst, opc, vcpu);
+#endif
+                       }
+
+                       kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
+                                 rd, sel, cop0->reg[rd][sel]);
+                       break;
+
+               case dmtc_op:
+                       kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
+                               vcpu->arch.pc, rt, rd, sel);
+                       er = EMULATE_FAIL;
+                       break;
+
+               case mfmcz_op:
+#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
+                       cop0->stat[MIPS_CP0_STATUS][0]++;
+#endif
+                       if (rt != 0) {
+                               vcpu->arch.gprs[rt] =
+                                   kvm_read_c0_guest_status(cop0);
+                       }
+                       /* EI */
+                       if (inst & 0x20) {
+                               kvm_debug("[%#lx] mfmcz_op: EI\n",
+                                         vcpu->arch.pc);
+                               kvm_set_c0_guest_status(cop0, ST0_IE);
+                       } else {
+                               kvm_debug("[%#lx] mfmcz_op: DI\n",
+                                         vcpu->arch.pc);
+                               kvm_clear_c0_guest_status(cop0, ST0_IE);
+                       }
+
+                       break;
+
+               case wrpgpr_op:
+                       {
+                               uint32_t css =
+                                   cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
+                               uint32_t pss =
+                                   (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
+                               /*
+                                * We don't support any shadow register sets, so
+                                * SRSCtl[PSS] == SRSCtl[CSS] = 0
+                                */
+                               if (css || pss) {
+                                       er = EMULATE_FAIL;
+                                       break;
+                               }
+                               kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
+                                         vcpu->arch.gprs[rt]);
+                               vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
+                       }
+                       break;
+               default:
+                       kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
+                               vcpu->arch.pc, copz);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+       }
+
+done:
+       /* Rollback PC only if emulation was unsuccessful */
+       if (er == EMULATE_FAIL)
+               vcpu->arch.pc = curr_pc;
+
+dont_update_pc:
+       /*
+        * This is for special instructions whose emulation
+        * updates the PC, so do not overwrite the PC under
+        * any circumstances
+        */
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
+                                            struct kvm_run *run,
+                                            struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DO_MMIO;
+       int32_t op, base, rt, offset;
+       uint32_t bytes;
+       void *data = run->mmio.data;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       rt = (inst >> 16) & 0x1f;
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       op = (inst >> 26) & 0x3f;
+
+       switch (op) {
+       case sb_op:
+               bytes = 1;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(u8 *) data = vcpu->arch.gprs[rt];
+               kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
+                         *(uint8_t *) data);
+
+               break;
+
+       case sw_op:
+               bytes = 4;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(uint32_t *) data = vcpu->arch.gprs[rt];
+
+               kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+                         vcpu->arch.gprs[rt], *(uint32_t *) data);
+               break;
+
+       case sh_op:
+               bytes = 2;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 1;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 1;
+               *(uint16_t *) data = vcpu->arch.gprs[rt];
+
+               kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
+                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
+                         vcpu->arch.gprs[rt], *(uint32_t *) data);
+               break;
+
+       default:
+               kvm_err("Store not yet supported");
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       /* Rollback PC if emulation was unsuccessful */
+       if (er == EMULATE_FAIL)
+               vcpu->arch.pc = curr_pc;
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
+                                           struct kvm_run *run,
+                                           struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DO_MMIO;
+       int32_t op, base, rt, offset;
+       uint32_t bytes;
+
+       rt = (inst >> 16) & 0x1f;
+       base = (inst >> 21) & 0x1f;
+       offset = inst & 0xffff;
+       op = (inst >> 26) & 0x3f;
+
+       vcpu->arch.pending_load_cause = cause;
+       vcpu->arch.io_gpr = rt;
+
+       switch (op) {
+       case lw_op:
+               bytes = 4;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 0;
+               break;
+
+       case lh_op:
+       case lhu_op:
+               bytes = 2;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_needed = 1;
+               vcpu->mmio_is_write = 0;
+
+               if (op == lh_op)
+                       vcpu->mmio_needed = 2;
+               else
+                       vcpu->mmio_needed = 1;
+
+               break;
+
+       case lbu_op:
+       case lb_op:
+               bytes = 1;
+               if (bytes > sizeof(run->mmio.data)) {
+                       kvm_err("%s: bad MMIO length: %d\n", __func__,
+                              run->mmio.len);
+                       er = EMULATE_FAIL;
+                       break;
+               }
+               run->mmio.phys_addr =
+                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
+                                                  host_cp0_badvaddr);
+               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
+                       er = EMULATE_FAIL;
+                       break;
+               }
+
+               run->mmio.len = bytes;
+               run->mmio.is_write = 0;
+               vcpu->mmio_is_write = 0;
+
+               if (op == lb_op)
+                       vcpu->mmio_needed = 2;
+               else
+                       vcpu->mmio_needed = 1;
+
+               break;
+
+       default:
+               kvm_err("Load not yet supported");
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       return er;
+}
+
+int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
+{
+       unsigned long offset = (va & ~PAGE_MASK);
+       struct kvm *kvm = vcpu->kvm;
+       unsigned long pa;
+       gfn_t gfn;
+       pfn_t pfn;
+
+       gfn = va >> PAGE_SHIFT;
+
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               return -1;
+       }
+       pfn = kvm->arch.guest_pmap[gfn];
+       pa = (pfn << PAGE_SHIFT) | offset;
+
+       kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
+                 CKSEG0ADDR(pa));
+
+       local_flush_icache_range(CKSEG0ADDR(pa), 32);
+       return 0;
+}
+
+#define MIPS_CACHE_OP_INDEX_INV         0x0
+#define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
+#define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
+#define MIPS_CACHE_OP_IMP               0x3
+#define MIPS_CACHE_OP_HIT_INV           0x4
+#define MIPS_CACHE_OP_FILL_WB_INV       0x5
+#define MIPS_CACHE_OP_HIT_HB            0x6
+#define MIPS_CACHE_OP_FETCH_LOCK        0x7
+
+#define MIPS_CACHE_ICACHE               0x0
+#define MIPS_CACHE_DCACHE               0x1
+#define MIPS_CACHE_SEC                  0x3
+
+enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
+                                            uint32_t cause,
+                                            struct kvm_run *run,
+                                            struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       enum emulation_result er = EMULATE_DONE;
+       int32_t offset, cache, op_inst, op, base;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       unsigned long va;
+       unsigned long curr_pc;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       base = (inst >> 21) & 0x1f;
+       op_inst = (inst >> 16) & 0x1f;
+       offset = inst & 0xffff;
+       cache = (inst >> 16) & 0x3;
+       op = (inst >> 18) & 0x7;
+
+       va = arch->gprs[base] + offset;
+
+       kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                 cache, op, base, arch->gprs[base], offset);
+
+       /*
+        * Treat INDEX_INV as a nop, basically issued by Linux on startup to
+        * invalidate the caches entirely by stepping through all the
+        * ways/indexes
+        */
+       if (op == MIPS_CACHE_OP_INDEX_INV) {
+               kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                         vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
+                         arch->gprs[base], offset);
+
+               if (cache == MIPS_CACHE_DCACHE)
+                       r4k_blast_dcache();
+               else if (cache == MIPS_CACHE_ICACHE)
+                       r4k_blast_icache();
+               else {
+                       kvm_err("%s: unsupported CACHE INDEX operation\n",
+                               __func__);
+                       return EMULATE_FAIL;
+               }
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               kvm_mips_trans_cache_index(inst, opc, vcpu);
+#endif
+               goto done;
+       }
+
+       preempt_disable();
+       if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+               if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
+                       kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+       } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
+                  KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+               int index;
+
+               /* If an entry already exists then skip */
+               if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
+                       goto skip_fault;
+
+               /*
+                * If address not in the guest TLB, then give the guest a fault,
+                * the resulting handler will do the right thing
+                */
+               index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
+                                                 (kvm_read_c0_guest_entryhi
+                                                  (cop0) & ASID_MASK));
+
+               if (index < 0) {
+                       vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
+                       vcpu->arch.host_cp0_badvaddr = va;
+                       er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
+                                                        vcpu);
+                       preempt_enable();
+                       goto dont_update_pc;
+               } else {
+                       struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+                       /*
+                        * Check if the entry is valid, if not then setup a TLB
+                        * invalid exception to the guest
+                        */
+                       if (!TLB_IS_VALID(*tlb, va)) {
+                               er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
+                                                               run, vcpu);
+                               preempt_enable();
+                               goto dont_update_pc;
+                       } else {
+                               /*
+                                * We fault an entry from the guest tlb to the
+                                * shadow host TLB
+                                */
+                               kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
+                                                                    NULL,
+                                                                    NULL);
+                       }
+               }
+       } else {
+               kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                       cache, op, base, arch->gprs[base], offset);
+               er = EMULATE_FAIL;
+               preempt_enable();
+               goto dont_update_pc;
+
+       }
+
+skip_fault:
+       /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
+       if (cache == MIPS_CACHE_DCACHE
+           && (op == MIPS_CACHE_OP_FILL_WB_INV
+               || op == MIPS_CACHE_OP_HIT_INV)) {
+               flush_dcache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               /*
+                * Replace the CACHE instruction, with a SYNCI, not the same,
+                * but avoids a trap
+                */
+               kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+       } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
+               flush_dcache_line(va);
+               flush_icache_line(va);
+
+#ifdef CONFIG_KVM_MIPS_DYN_TRANS
+               /* Replace the CACHE instruction, with a SYNCI */
+               kvm_mips_trans_cache_va(inst, opc, vcpu);
+#endif
+       } else {
+               kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
+                       cache, op, base, arch->gprs[base], offset);
+               er = EMULATE_FAIL;
+               preempt_enable();
+               goto dont_update_pc;
+       }
+
+       preempt_enable();
+
+dont_update_pc:
+       /* Rollback PC */
+       vcpu->arch.pc = curr_pc;
+done:
+       return er;
+}
+
+enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
+                                           struct kvm_run *run,
+                                           struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t inst;
+
+       /* Fetch the instruction. */
+       if (cause & CAUSEF_BD)
+               opc += 1;
+
+       inst = kvm_get_inst(opc, vcpu);
+
+       switch (((union mips_instruction)inst).r_format.opcode) {
+       case cop0_op:
+               er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
+               break;
+       case sb_op:
+       case sh_op:
+       case sw_op:
+               er = kvm_mips_emulate_store(inst, cause, run, vcpu);
+               break;
+       case lb_op:
+       case lbu_op:
+       case lhu_op:
+       case lh_op:
+       case lw_op:
+               er = kvm_mips_emulate_load(inst, cause, run, vcpu);
+               break;
+
+       case cache_op:
+               ++vcpu->stat.cache_exits;
+               trace_kvm_exit(vcpu, CACHE_EXITS);
+               er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
+               break;
+
+       default:
+               kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
+                       inst);
+               kvm_arch_vcpu_dump_regs(vcpu);
+               er = EMULATE_FAIL;
+               break;
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
+                                              uint32_t *opc,
+                                              struct kvm_run *run,
+                                              struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_SYSCALL << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
+                                                 uint32_t *opc,
+                                                 struct kvm_run *run,
+                                                 struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* set pc to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x0;
+
+       } else {
+               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
+                                                uint32_t *opc,
+                                                struct kvm_run *run,
+                                                struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       unsigned long entryhi =
+               (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
+                         arch->pc);
+
+               /* set pc to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
+                                                 uint32_t *opc,
+                                                 struct kvm_run *run,
+                                                 struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x0;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
+                                                uint32_t *opc,
+                                                struct kvm_run *run,
+                                                struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return EMULATE_DONE;
+}
+
+/* TLBMOD: store into address matching TLB with Dirty bit off */
+enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
+                                            struct kvm_run *run,
+                                            struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+#ifdef DEBUG
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+       int index;
+
+       /* If address not in the guest TLB, then we are in trouble */
+       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
+       if (index < 0) {
+               /* XXXKYMA Invalidate and retry */
+               kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
+               kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
+                    __func__, entryhi);
+               kvm_mips_dump_guest_tlbs(vcpu);
+               kvm_mips_dump_host_tlbs();
+               return EMULATE_FAIL;
+       }
+#endif
+
+       er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
+       return er;
+}
+
+enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
+                                             uint32_t *opc,
+                                             struct kvm_run *run,
+                                             struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
+                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
+                         arch->pc);
+
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       } else {
+               kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
+                         arch->pc);
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+       }
+
+       kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
+
+       /* setup badvaddr, context and entryhi registers for the guest */
+       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+       /* XXXKYMA: is the context register used by linux??? */
+       kvm_write_c0_guest_entryhi(cop0, entryhi);
+       /* Blow away the shadow host TLBs */
+       kvm_mips_flush_host_tlb(1);
+
+       return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
+                                              uint32_t *opc,
+                                              struct kvm_run *run,
+                                              struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+       }
+
+       arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       kvm_change_c0_guest_cause(cop0, (0xff),
+                                 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
+       kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
+
+       return EMULATE_DONE;
+}
+
+enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
+                                             uint32_t *opc,
+                                             struct kvm_run *run,
+                                             struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_RES_INST << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_err("Trying to deliver RI when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
+                                             uint32_t *opc,
+                                             struct kvm_run *run,
+                                             struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (T_BREAK << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+       } else {
+               kvm_err("Trying to deliver BP when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+/* ll/sc, rdhwr, sync emulation */
+
+#define OPCODE 0xfc000000
+#define BASE   0x03e00000
+#define RT     0x001f0000
+#define OFFSET 0x0000ffff
+#define LL     0xc0000000
+#define SC     0xe0000000
+#define SPEC0  0x00000000
+#define SPEC3  0x7c000000
+#define RD     0x0000f800
+#define FUNC   0x0000003f
+#define SYNC   0x0000000f
+#define RDHWR  0x0000003b
+
+enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+                                        struct kvm_run *run,
+                                        struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long curr_pc;
+       uint32_t inst;
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       /* Fetch the instruction. */
+       if (cause & CAUSEF_BD)
+               opc += 1;
+
+       inst = kvm_get_inst(opc, vcpu);
+
+       if (inst == KVM_INVALID_INST) {
+               kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
+               return EMULATE_FAIL;
+       }
+
+       if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
+               int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+               int rd = (inst & RD) >> 11;
+               int rt = (inst & RT) >> 16;
+               /* If usermode, check RDHWR rd is allowed by guest HWREna */
+               if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
+                       kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
+                                 rd, opc);
+                       goto emulate_ri;
+               }
+               switch (rd) {
+               case 0: /* CPU number */
+                       arch->gprs[rt] = 0;
+                       break;
+               case 1: /* SYNCI length */
+                       arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
+                                            current_cpu_data.icache.linesz);
+                       break;
+               case 2: /* Read count register */
+                       arch->gprs[rt] = kvm_mips_read_count(vcpu);
+                       break;
+               case 3: /* Count register resolution */
+                       switch (current_cpu_data.cputype) {
+                       case CPU_20KC:
+                       case CPU_25KF:
+                               arch->gprs[rt] = 1;
+                               break;
+                       default:
+                               arch->gprs[rt] = 2;
+                       }
+                       break;
+               case 29:
+                       arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
+                       break;
+
+               default:
+                       kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
+                       goto emulate_ri;
+               }
+       } else {
+               kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
+               goto emulate_ri;
+       }
+
+       return EMULATE_DONE;
+
+emulate_ri:
+       /*
+        * Rollback PC (if in branch delay slot then the PC already points to
+        * branch target), and pass the RI exception to the guest OS.
+        */
+       vcpu->arch.pc = curr_pc;
+       return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
+}
+
+enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
+                                                 struct kvm_run *run)
+{
+       unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
+       enum emulation_result er = EMULATE_DONE;
+       unsigned long curr_pc;
+
+       if (run->mmio.len > sizeof(*gpr)) {
+               kvm_err("Bad MMIO length: %d", run->mmio.len);
+               er = EMULATE_FAIL;
+               goto done;
+       }
+
+       /*
+        * Update PC and hold onto current PC in case there is
+        * an error and we want to rollback the PC
+        */
+       curr_pc = vcpu->arch.pc;
+       er = update_pc(vcpu, vcpu->arch.pending_load_cause);
+       if (er == EMULATE_FAIL)
+               return er;
+
+       switch (run->mmio.len) {
+       case 4:
+               *gpr = *(int32_t *) run->mmio.data;
+               break;
+
+       case 2:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(int16_t *) run->mmio.data;
+               else
+                       *gpr = *(int16_t *) run->mmio.data;
+
+               break;
+       case 1:
+               if (vcpu->mmio_needed == 2)
+                       *gpr = *(int8_t *) run->mmio.data;
+               else
+                       *gpr = *(u8 *) run->mmio.data;
+               break;
+       }
+
+       if (vcpu->arch.pending_load_cause & CAUSEF_BD)
+               kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
+                         vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
+                         vcpu->mmio_needed);
+
+done:
+       return er;
+}
+
+static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
+                                                 uint32_t *opc,
+                                                 struct kvm_run *run,
+                                                 struct kvm_vcpu *vcpu)
+{
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       enum emulation_result er = EMULATE_DONE;
+
+       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+               /* save old pc */
+               kvm_write_c0_guest_epc(cop0, arch->pc);
+               kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+               if (cause & CAUSEF_BD)
+                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+               else
+                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+               kvm_change_c0_guest_cause(cop0, (0xff),
+                                         (exccode << CAUSEB_EXCCODE));
+
+               /* Set PC to the exception entry point */
+               arch->pc = KVM_GUEST_KSEG0 + 0x180;
+               kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
+
+               kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
+                         exccode, kvm_read_c0_guest_epc(cop0),
+                         kvm_read_c0_guest_badvaddr(cop0));
+       } else {
+               kvm_err("Trying to deliver EXC when EXL is already set\n");
+               er = EMULATE_FAIL;
+       }
+
+       return er;
+}
+
+enum emulation_result kvm_mips_check_privilege(unsigned long cause,
+                                              uint32_t *opc,
+                                              struct kvm_run *run,
+                                              struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+
+       int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
+
+       if (usermode) {
+               switch (exccode) {
+               case T_INT:
+               case T_SYSCALL:
+               case T_BREAK:
+               case T_RES_INST:
+                       break;
+
+               case T_COP_UNUSABLE:
+                       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
+                               er = EMULATE_PRIV_FAIL;
+                       break;
+
+               case T_TLB_MOD:
+                       break;
+
+               case T_TLB_LD_MISS:
+                       /*
+                        * We we are accessing Guest kernel space, then send an
+                        * address error exception to the guest
+                        */
+                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+                               kvm_debug("%s: LD MISS @ %#lx\n", __func__,
+                                         badvaddr);
+                               cause &= ~0xff;
+                               cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
+                               er = EMULATE_PRIV_FAIL;
+                       }
+                       break;
+
+               case T_TLB_ST_MISS:
+                       /*
+                        * We we are accessing Guest kernel space, then send an
+                        * address error exception to the guest
+                        */
+                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
+                               kvm_debug("%s: ST MISS @ %#lx\n", __func__,
+                                         badvaddr);
+                               cause &= ~0xff;
+                               cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
+                               er = EMULATE_PRIV_FAIL;
+                       }
+                       break;
+
+               case T_ADDR_ERR_ST:
+                       kvm_debug("%s: address error ST @ %#lx\n", __func__,
+                                 badvaddr);
+                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+                               cause &= ~0xff;
+                               cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
+                       }
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               case T_ADDR_ERR_LD:
+                       kvm_debug("%s: address error LD @ %#lx\n", __func__,
+                                 badvaddr);
+                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
+                               cause &= ~0xff;
+                               cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
+                       }
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               default:
+                       er = EMULATE_PRIV_FAIL;
+                       break;
+               }
+       }
+
+       if (er == EMULATE_PRIV_FAIL)
+               kvm_mips_emulate_exc(cause, opc, run, vcpu);
+
+       return er;
+}
+
+/*
+ * User Address (UA) fault, this could happen if
+ * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
+ *     case we pass on the fault to the guest kernel and let it handle it.
+ * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
+ *     case we inject the TLB from the Guest TLB into the shadow host TLB
+ */
+enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
+                                             uint32_t *opc,
+                                             struct kvm_run *run,
+                                             struct kvm_vcpu *vcpu)
+{
+       enum emulation_result er = EMULATE_DONE;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       unsigned long va = vcpu->arch.host_cp0_badvaddr;
+       int index;
+
+       kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
+                 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
+
+       /*
+        * KVM would not have got the exception if this entry was valid in the
+        * shadow host TLB. Check the Guest TLB, if the entry is not there then
+        * send the guest an exception. The guest exc handler should then inject
+        * an entry into the guest TLB.
+        */
+       index = kvm_mips_guest_tlb_lookup(vcpu,
+                                         (va & VPN2_MASK) |
+                                         (kvm_read_c0_guest_entryhi
+                                          (vcpu->arch.cop0) & ASID_MASK));
+       if (index < 0) {
+               if (exccode == T_TLB_LD_MISS) {
+                       er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
+               } else if (exccode == T_TLB_ST_MISS) {
+                       er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
+               } else {
+                       kvm_err("%s: invalid exc code: %d\n", __func__,
+                               exccode);
+                       er = EMULATE_FAIL;
+               }
+       } else {
+               struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
+
+               /*
+                * Check if the entry is valid, if not then setup a TLB invalid
+                * exception to the guest
+                */
+               if (!TLB_IS_VALID(*tlb, va)) {
+                       if (exccode == T_TLB_LD_MISS) {
+                               er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
+                                                               vcpu);
+                       } else if (exccode == T_TLB_ST_MISS) {
+                               er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
+                                                               vcpu);
+                       } else {
+                               kvm_err("%s: invalid exc code: %d\n", __func__,
+                                       exccode);
+                               er = EMULATE_FAIL;
+                       }
+               } else {
+                       kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
+                                 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+                       /*
+                        * OK we have a Guest TLB entry, now inject it into the
+                        * shadow host TLB
+                        */
+                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
+                                                            NULL);
+               }
+       }
+
+       return er;
+}
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
new file mode 100644 (file)
index 0000000..9b44459
--- /dev/null
@@ -0,0 +1,242 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Interrupt delivery
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+
+#include <linux/kvm_host.h>
+
+#include "interrupt.h"
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+       set_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+{
+       clear_bit(priority, &vcpu->arch.pending_exceptions);
+}
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+       /*
+        * Cause bits to reflect the pending timer interrupt,
+        * the EXC code will be set when we are actually
+        * delivering the interrupt:
+        */
+       kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+
+       /* Queue up an INT exception for the core */
+       kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
+
+}
+
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
+{
+       kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
+       kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
+}
+
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+                             struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+
+       /*
+        * Cause bits to reflect the pending IO interrupt,
+        * the EXC code will be set when we are actually
+        * delivering the interrupt:
+        */
+       switch (intr) {
+       case 2:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+               /* Queue up an INT exception for the core */
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
+               break;
+
+       case 3:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+               break;
+
+       case 4:
+               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+               break;
+
+       default:
+               break;
+       }
+
+}
+
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+                               struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+
+       switch (intr) {
+       case -2:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
+               break;
+
+       case -3:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
+               break;
+
+       case -4:
+               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
+               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
+               break;
+
+       default:
+               break;
+       }
+
+}
+
+/* Deliver the interrupt of the corresponding priority, if possible. */
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                           uint32_t cause)
+{
+       int allowed = 0;
+       uint32_t exccode;
+
+       struct kvm_vcpu_arch *arch = &vcpu->arch;
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+
+       switch (priority) {
+       case MIPS_EXC_INT_TIMER:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IO:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IPI_1:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       case MIPS_EXC_INT_IPI_2:
+               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
+                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
+                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
+                       allowed = 1;
+                       exccode = T_INT;
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       /* Are we allowed to deliver the interrupt ??? */
+       if (allowed) {
+               if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
+                       /* save old pc */
+                       kvm_write_c0_guest_epc(cop0, arch->pc);
+                       kvm_set_c0_guest_status(cop0, ST0_EXL);
+
+                       if (cause & CAUSEF_BD)
+                               kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
+                       else
+                               kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
+
+                       kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
+
+               } else
+                       kvm_err("Trying to deliver interrupt when EXL is already set\n");
+
+               kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
+                                         (exccode << CAUSEB_EXCCODE));
+
+               /* XXXSL Set PC to the interrupt exception entry point */
+               if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
+                       arch->pc = KVM_GUEST_KSEG0 + 0x200;
+               else
+                       arch->pc = KVM_GUEST_KSEG0 + 0x180;
+
+               clear_bit(priority, &vcpu->arch.pending_exceptions);
+       }
+
+       return allowed;
+}
+
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                         uint32_t cause)
+{
+       return 1;
+}
+
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
+{
+       unsigned long *pending = &vcpu->arch.pending_exceptions;
+       unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
+       unsigned int priority;
+
+       if (!(*pending) && !(*pending_clr))
+               return;
+
+       priority = __ffs(*pending_clr);
+       while (priority <= MIPS_EXC_MAX) {
+               if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
+                       if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
+                               break;
+               }
+
+               priority = find_next_bit(pending_clr,
+                                        BITS_PER_BYTE * sizeof(*pending_clr),
+                                        priority + 1);
+       }
+
+       priority = __ffs(*pending);
+       while (priority <= MIPS_EXC_MAX) {
+               if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
+                       if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
+                               break;
+               }
+
+               priority = find_next_bit(pending,
+                                        BITS_PER_BYTE * sizeof(*pending),
+                                        priority + 1);
+       }
+
+}
+
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
+}
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
new file mode 100644 (file)
index 0000000..4ab4bdf
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Interrupts
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+/*
+ * MIPS Exception Priorities, exceptions (including interrupts) are queued up
+ * for the guest in the order specified by their priorities
+ */
+
+#define MIPS_EXC_RESET              0
+#define MIPS_EXC_SRESET             1
+#define MIPS_EXC_DEBUG_ST           2
+#define MIPS_EXC_DEBUG              3
+#define MIPS_EXC_DDB                4
+#define MIPS_EXC_NMI                5
+#define MIPS_EXC_MCHK               6
+#define MIPS_EXC_INT_TIMER          7
+#define MIPS_EXC_INT_IO             8
+#define MIPS_EXC_EXECUTE            9
+#define MIPS_EXC_INT_IPI_1          10
+#define MIPS_EXC_INT_IPI_2          11
+#define MIPS_EXC_MAX                12
+/* XXXSL More to follow */
+
+extern char mips32_exception[], mips32_exceptionEnd[];
+extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
+
+#define C_TI        (_ULCAST_(1) << 30)
+
+#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
+#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE   (0)
+
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
+
+void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
+void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
+                             struct kvm_mips_interrupt *irq);
+void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
+                               struct kvm_mips_interrupt *irq);
+int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                           uint32_t cause);
+int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
+                         uint32_t cause);
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/kvm_cb.c
deleted file mode 100644 (file)
index 313c2e3..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Yann Le Du <ledu@kymasys.com>
- */
-
-#include <linux/export.h>
-#include <linux/kvm_host.h>
-
-struct kvm_mips_callbacks *kvm_mips_callbacks;
-EXPORT_SYMBOL(kvm_mips_callbacks);
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/kvm_locore.S
deleted file mode 100644 (file)
index 033ac34..0000000
+++ /dev/null
@@ -1,613 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Main entry point for the guest, exception handling.
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <asm/asm.h>
-#include <asm/asmmacro.h>
-#include <asm/regdef.h>
-#include <asm/mipsregs.h>
-#include <asm/stackframe.h>
-#include <asm/asm-offsets.h>
-
-
-#define _C_LABEL(x)     x
-#define MIPSX(name)     mips32_ ## name
-#define CALLFRAME_SIZ   32
-
-/*
- * VECTOR
- *  exception vector entrypoint
- */
-#define VECTOR(x, regmask)      \
-    .ent    _C_LABEL(x),0;      \
-    EXPORT(x);
-
-#define VECTOR_END(x)      \
-    EXPORT(x);
-
-/* Overload, Danger Will Robinson!! */
-#define PT_HOST_ASID        PT_BVADDR
-#define PT_HOST_USERLOCAL   PT_EPC
-
-#define CP0_DDATA_LO        $28,3
-#define CP0_EBASE           $15,1
-
-#define CP0_INTCTL          $12,1
-#define CP0_SRSCTL          $12,2
-#define CP0_SRSMAP          $12,3
-#define CP0_HWRENA          $7,0
-
-/* Resume Flags */
-#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
-
-#define RESUME_GUEST            0
-#define RESUME_HOST             RESUME_FLAG_HOST
-
-/*
- * __kvm_mips_vcpu_run: entry point to the guest
- * a0: run
- * a1: vcpu
- */
-       .set    noreorder
-       .set    noat
-
-FEXPORT(__kvm_mips_vcpu_run)
-       /* k0/k1 not being used in host kernel context */
-       INT_ADDIU k1, sp, -PT_SIZE
-       LONG_S  $0, PT_R0(k1)
-       LONG_S  $1, PT_R1(k1)
-       LONG_S  $2, PT_R2(k1)
-       LONG_S  $3, PT_R3(k1)
-
-       LONG_S  $4, PT_R4(k1)
-       LONG_S  $5, PT_R5(k1)
-       LONG_S  $6, PT_R6(k1)
-       LONG_S  $7, PT_R7(k1)
-
-       LONG_S  $8,  PT_R8(k1)
-       LONG_S  $9,  PT_R9(k1)
-       LONG_S  $10, PT_R10(k1)
-       LONG_S  $11, PT_R11(k1)
-       LONG_S  $12, PT_R12(k1)
-       LONG_S  $13, PT_R13(k1)
-       LONG_S  $14, PT_R14(k1)
-       LONG_S  $15, PT_R15(k1)
-       LONG_S  $16, PT_R16(k1)
-       LONG_S  $17, PT_R17(k1)
-
-       LONG_S  $18, PT_R18(k1)
-       LONG_S  $19, PT_R19(k1)
-       LONG_S  $20, PT_R20(k1)
-       LONG_S  $21, PT_R21(k1)
-       LONG_S  $22, PT_R22(k1)
-       LONG_S  $23, PT_R23(k1)
-       LONG_S  $24, PT_R24(k1)
-       LONG_S  $25, PT_R25(k1)
-
-       /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */
-
-       LONG_S  $28, PT_R28(k1)
-       LONG_S  $29, PT_R29(k1)
-       LONG_S  $30, PT_R30(k1)
-       LONG_S  $31, PT_R31(k1)
-
-       /* Save hi/lo */
-       mflo    v0
-       LONG_S  v0, PT_LO(k1)
-       mfhi    v1
-       LONG_S  v1, PT_HI(k1)
-
-       /* Save host status */
-       mfc0    v0, CP0_STATUS
-       LONG_S  v0, PT_STATUS(k1)
-
-       /* Save host ASID, shove it into the BVADDR location */
-       mfc0    v1, CP0_ENTRYHI
-       andi    v1, 0xff
-       LONG_S  v1, PT_HOST_ASID(k1)
-
-       /* Save DDATA_LO, will be used to store pointer to vcpu */
-       mfc0    v1, CP0_DDATA_LO
-       LONG_S  v1, PT_HOST_USERLOCAL(k1)
-
-       /* DDATA_LO has pointer to vcpu */
-       mtc0    a1, CP0_DDATA_LO
-
-       /* Offset into vcpu->arch */
-       INT_ADDIU k1, a1, VCPU_HOST_ARCH
-
-       /*
-        * Save the host stack to VCPU, used for exception processing
-        * when we exit from the Guest
-        */
-       LONG_S  sp, VCPU_HOST_STACK(k1)
-
-       /* Save the kernel gp as well */
-       LONG_S  gp, VCPU_HOST_GP(k1)
-
-       /* Setup status register for running the guest in UM, interrupts are disabled */
-       li      k0, (ST0_EXL | KSU_USER | ST0_BEV)
-       mtc0    k0, CP0_STATUS
-       ehb
-
-       /* load up the new EBASE */
-       LONG_L  k0, VCPU_GUEST_EBASE(k1)
-       mtc0    k0, CP0_EBASE
-
-       /*
-        * Now that the new EBASE has been loaded, unset BEV, set
-        * interrupt mask as it was but make sure that timer interrupts
-        * are enabled
-        */
-       li      k0, (ST0_EXL | KSU_USER | ST0_IE)
-       andi    v0, v0, ST0_IM
-       or      k0, k0, v0
-       mtc0    k0, CP0_STATUS
-       ehb
-
-
-       /* Set Guest EPC */
-       LONG_L  t0, VCPU_PC(k1)
-       mtc0    t0, CP0_EPC
-
-FEXPORT(__kvm_mips_load_asid)
-       /* Set the ASID for the Guest Kernel */
-       INT_SLL t0, t0, 1       /* with kseg0 @ 0x40000000, kernel */
-                               /* addresses shift to 0x80000000 */
-       bltz    t0, 1f          /* If kernel */
-        INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
-       INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
-1:
-            /* t1: contains the base of the ASID array, need to get the cpu id  */
-       LONG_L  t2, TI_CPU($28)             /* smp_processor_id */
-       INT_SLL t2, t2, 2                   /* x4 */
-       REG_ADDU t3, t1, t2
-       LONG_L  k0, (t3)
-       andi    k0, k0, 0xff
-       mtc0    k0, CP0_ENTRYHI
-       ehb
-
-       /* Disable RDHWR access */
-       mtc0    zero, CP0_HWRENA
-
-       /* Now load up the Guest Context from VCPU */
-       LONG_L  $1, VCPU_R1(k1)
-       LONG_L  $2, VCPU_R2(k1)
-       LONG_L  $3, VCPU_R3(k1)
-
-       LONG_L  $4, VCPU_R4(k1)
-       LONG_L  $5, VCPU_R5(k1)
-       LONG_L  $6, VCPU_R6(k1)
-       LONG_L  $7, VCPU_R7(k1)
-
-       LONG_L  $8, VCPU_R8(k1)
-       LONG_L  $9, VCPU_R9(k1)
-       LONG_L  $10, VCPU_R10(k1)
-       LONG_L  $11, VCPU_R11(k1)
-       LONG_L  $12, VCPU_R12(k1)
-       LONG_L  $13, VCPU_R13(k1)
-       LONG_L  $14, VCPU_R14(k1)
-       LONG_L  $15, VCPU_R15(k1)
-       LONG_L  $16, VCPU_R16(k1)
-       LONG_L  $17, VCPU_R17(k1)
-       LONG_L  $18, VCPU_R18(k1)
-       LONG_L  $19, VCPU_R19(k1)
-       LONG_L  $20, VCPU_R20(k1)
-       LONG_L  $21, VCPU_R21(k1)
-       LONG_L  $22, VCPU_R22(k1)
-       LONG_L  $23, VCPU_R23(k1)
-       LONG_L  $24, VCPU_R24(k1)
-       LONG_L  $25, VCPU_R25(k1)
-
-       /* k0/k1 loaded up later */
-
-       LONG_L  $28, VCPU_R28(k1)
-       LONG_L  $29, VCPU_R29(k1)
-       LONG_L  $30, VCPU_R30(k1)
-       LONG_L  $31, VCPU_R31(k1)
-
-       /* Restore hi/lo */
-       LONG_L  k0, VCPU_LO(k1)
-       mtlo    k0
-
-       LONG_L  k0, VCPU_HI(k1)
-       mthi    k0
-
-FEXPORT(__kvm_mips_load_k0k1)
-       /* Restore the guest's k0/k1 registers */
-       LONG_L  k0, VCPU_R26(k1)
-       LONG_L  k1, VCPU_R27(k1)
-
-       /* Jump to guest */
-       eret
-
-VECTOR(MIPSX(exception), unknown)
-/*
- * Find out what mode we came from and jump to the proper handler.
- */
-       mtc0    k0, CP0_ERROREPC        #01: Save guest k0
-       ehb                             #02:
-
-       mfc0    k0, CP0_EBASE           #02: Get EBASE
-       INT_SRL k0, k0, 10              #03: Get rid of CPUNum
-       INT_SLL k0, k0, 10              #04
-       LONG_S  k1, 0x3000(k0)          #05: Save k1 @ offset 0x3000
-       INT_ADDIU k0, k0, 0x2000                #06: Exception handler is installed @ offset 0x2000
-       j       k0                      #07: jump to the function
-        nop                            #08: branch delay slot
-VECTOR_END(MIPSX(exceptionEnd))
-.end MIPSX(exception)
-
-/*
- * Generic Guest exception handler. We end up here when the guest
- * does something that causes a trap to kernel mode.
- *
- */
-NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
-       /* Get the VCPU pointer from DDTATA_LO */
-       mfc0    k1, CP0_DDATA_LO
-       INT_ADDIU k1, k1, VCPU_HOST_ARCH
-
-       /* Start saving Guest context to VCPU */
-       LONG_S  $0, VCPU_R0(k1)
-       LONG_S  $1, VCPU_R1(k1)
-       LONG_S  $2, VCPU_R2(k1)
-       LONG_S  $3, VCPU_R3(k1)
-       LONG_S  $4, VCPU_R4(k1)
-       LONG_S  $5, VCPU_R5(k1)
-       LONG_S  $6, VCPU_R6(k1)
-       LONG_S  $7, VCPU_R7(k1)
-       LONG_S  $8, VCPU_R8(k1)
-       LONG_S  $9, VCPU_R9(k1)
-       LONG_S  $10, VCPU_R10(k1)
-       LONG_S  $11, VCPU_R11(k1)
-       LONG_S  $12, VCPU_R12(k1)
-       LONG_S  $13, VCPU_R13(k1)
-       LONG_S  $14, VCPU_R14(k1)
-       LONG_S  $15, VCPU_R15(k1)
-       LONG_S  $16, VCPU_R16(k1)
-       LONG_S  $17, VCPU_R17(k1)
-       LONG_S  $18, VCPU_R18(k1)
-       LONG_S  $19, VCPU_R19(k1)
-       LONG_S  $20, VCPU_R20(k1)
-       LONG_S  $21, VCPU_R21(k1)
-       LONG_S  $22, VCPU_R22(k1)
-       LONG_S  $23, VCPU_R23(k1)
-       LONG_S  $24, VCPU_R24(k1)
-       LONG_S  $25, VCPU_R25(k1)
-
-       /* Guest k0/k1 saved later */
-
-       LONG_S  $28, VCPU_R28(k1)
-       LONG_S  $29, VCPU_R29(k1)
-       LONG_S  $30, VCPU_R30(k1)
-       LONG_S  $31, VCPU_R31(k1)
-
-       /* We need to save hi/lo and restore them on
-        * the way out
-        */
-       mfhi    t0
-       LONG_S  t0, VCPU_HI(k1)
-
-       mflo    t0
-       LONG_S  t0, VCPU_LO(k1)
-
-       /* Finally save guest k0/k1 to VCPU */
-       mfc0    t0, CP0_ERROREPC
-       LONG_S  t0, VCPU_R26(k1)
-
-       /* Get GUEST k1 and save it in VCPU */
-       PTR_LI  t1, ~0x2ff
-       mfc0    t0, CP0_EBASE
-       and     t0, t0, t1
-       LONG_L  t0, 0x3000(t0)
-       LONG_S  t0, VCPU_R27(k1)
-
-       /* Now that context has been saved, we can use other registers */
-
-       /* Restore vcpu */
-       mfc0    a1, CP0_DDATA_LO
-       move    s1, a1
-
-       /* Restore run (vcpu->run) */
-       LONG_L  a0, VCPU_RUN(a1)
-       /* Save pointer to run in s0, will be saved by the compiler */
-       move    s0, a0
-
-       /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to
-        * process the exception */
-       mfc0    k0,CP0_EPC
-       LONG_S  k0, VCPU_PC(k1)
-
-       mfc0    k0, CP0_BADVADDR
-       LONG_S  k0, VCPU_HOST_CP0_BADVADDR(k1)
-
-       mfc0    k0, CP0_CAUSE
-       LONG_S  k0, VCPU_HOST_CP0_CAUSE(k1)
-
-       mfc0    k0, CP0_ENTRYHI
-       LONG_S  k0, VCPU_HOST_ENTRYHI(k1)
-
-       /* Now restore the host state just enough to run the handlers */
-
-       /* Swtich EBASE to the one used by Linux */
-       /* load up the host EBASE */
-       mfc0    v0, CP0_STATUS
-
-       .set    at
-       or      k0, v0, ST0_BEV
-       .set    noat
-
-       mtc0    k0, CP0_STATUS
-       ehb
-
-       LONG_L  k0, VCPU_HOST_EBASE(k1)
-       mtc0    k0,CP0_EBASE
-
-
-       /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
-       .set    at
-       and     v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
-       or      v0, v0, ST0_CU0
-       .set    noat
-       mtc0    v0, CP0_STATUS
-       ehb
-
-       /* Load up host GP */
-       LONG_L  gp, VCPU_HOST_GP(k1)
-
-       /* Need a stack before we can jump to "C" */
-       LONG_L  sp, VCPU_HOST_STACK(k1)
-
-       /* Saved host state */
-       INT_ADDIU sp, sp, -PT_SIZE
-
-       /* XXXKYMA do we need to load the host ASID, maybe not because the
-        * kernel entries are marked GLOBAL, need to verify
-        */
-
-       /* Restore host DDATA_LO */
-       LONG_L  k0, PT_HOST_USERLOCAL(sp)
-       mtc0    k0, CP0_DDATA_LO
-
-       /* Restore RDHWR access */
-       PTR_LI  k0, 0x2000000F
-       mtc0    k0, CP0_HWRENA
-
-       /* Jump to handler */
-FEXPORT(__kvm_mips_jump_to_handler)
-       /* XXXKYMA: not sure if this is safe, how large is the stack??
-        * Now jump to the kvm_mips_handle_exit() to see if we can deal
-        * with this in the kernel */
-       PTR_LA  t9, kvm_mips_handle_exit
-       jalr.hb t9
-        INT_ADDIU sp, sp, -CALLFRAME_SIZ           /* BD Slot */
-
-       /* Return from handler Make sure interrupts are disabled */
-       di
-       ehb
-
-       /* XXXKYMA: k0/k1 could have been blown away if we processed
-        * an exception while we were handling the exception from the
-        * guest, reload k1
-        */
-
-       move    k1, s1
-       INT_ADDIU k1, k1, VCPU_HOST_ARCH
-
-       /* Check return value, should tell us if we are returning to the
-        * host (handle I/O etc)or resuming the guest
-        */
-       andi    t0, v0, RESUME_HOST
-       bnez    t0, __kvm_mips_return_to_host
-        nop
-
-__kvm_mips_return_to_guest:
-       /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
-       mtc0    s1, CP0_DDATA_LO
-
-       /* Load up the Guest EBASE to minimize the window where BEV is set */
-       LONG_L  t0, VCPU_GUEST_EBASE(k1)
-
-       /* Switch EBASE back to the one used by KVM */
-       mfc0    v1, CP0_STATUS
-       .set    at
-       or      k0, v1, ST0_BEV
-       .set    noat
-       mtc0    k0, CP0_STATUS
-       ehb
-       mtc0    t0, CP0_EBASE
-
-       /* Setup status register for running guest in UM */
-       .set    at
-       or      v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
-       and     v1, v1, ~ST0_CU0
-       .set    noat
-       mtc0    v1, CP0_STATUS
-       ehb
-
-       /* Set Guest EPC */
-       LONG_L  t0, VCPU_PC(k1)
-       mtc0    t0, CP0_EPC
-
-       /* Set the ASID for the Guest Kernel */
-       INT_SLL t0, t0, 1       /* with kseg0 @ 0x40000000, kernel */
-                               /* addresses shift to 0x80000000 */
-       bltz    t0, 1f          /* If kernel */
-        INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
-       INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
-1:
-       /* t1: contains the base of the ASID array, need to get the cpu id  */
-       LONG_L  t2, TI_CPU($28)         /* smp_processor_id */
-       INT_SLL t2, t2, 2               /* x4 */
-       REG_ADDU t3, t1, t2
-       LONG_L  k0, (t3)
-       andi    k0, k0, 0xff
-       mtc0    k0,CP0_ENTRYHI
-       ehb
-
-       /* Disable RDHWR access */
-       mtc0    zero,  CP0_HWRENA
-
-       /* load the guest context from VCPU and return */
-       LONG_L  $0, VCPU_R0(k1)
-       LONG_L  $1, VCPU_R1(k1)
-       LONG_L  $2, VCPU_R2(k1)
-       LONG_L  $3, VCPU_R3(k1)
-       LONG_L  $4, VCPU_R4(k1)
-       LONG_L  $5, VCPU_R5(k1)
-       LONG_L  $6, VCPU_R6(k1)
-       LONG_L  $7, VCPU_R7(k1)
-       LONG_L  $8, VCPU_R8(k1)
-       LONG_L  $9, VCPU_R9(k1)
-       LONG_L  $10, VCPU_R10(k1)
-       LONG_L  $11, VCPU_R11(k1)
-       LONG_L  $12, VCPU_R12(k1)
-       LONG_L  $13, VCPU_R13(k1)
-       LONG_L  $14, VCPU_R14(k1)
-       LONG_L  $15, VCPU_R15(k1)
-       LONG_L  $16, VCPU_R16(k1)
-       LONG_L  $17, VCPU_R17(k1)
-       LONG_L  $18, VCPU_R18(k1)
-       LONG_L  $19, VCPU_R19(k1)
-       LONG_L  $20, VCPU_R20(k1)
-       LONG_L  $21, VCPU_R21(k1)
-       LONG_L  $22, VCPU_R22(k1)
-       LONG_L  $23, VCPU_R23(k1)
-       LONG_L  $24, VCPU_R24(k1)
-       LONG_L  $25, VCPU_R25(k1)
-
-       /* $/k1 loaded later */
-       LONG_L  $28, VCPU_R28(k1)
-       LONG_L  $29, VCPU_R29(k1)
-       LONG_L  $30, VCPU_R30(k1)
-       LONG_L  $31, VCPU_R31(k1)
-
-FEXPORT(__kvm_mips_skip_guest_restore)
-       LONG_L  k0, VCPU_HI(k1)
-       mthi    k0
-
-       LONG_L  k0, VCPU_LO(k1)
-       mtlo    k0
-
-       LONG_L  k0, VCPU_R26(k1)
-       LONG_L  k1, VCPU_R27(k1)
-
-       eret
-
-__kvm_mips_return_to_host:
-       /* EBASE is already pointing to Linux */
-       LONG_L  k1, VCPU_HOST_STACK(k1)
-       INT_ADDIU k1,k1, -PT_SIZE
-
-       /* Restore host DDATA_LO */
-       LONG_L  k0, PT_HOST_USERLOCAL(k1)
-       mtc0    k0, CP0_DDATA_LO
-
-       /* Restore host ASID */
-       LONG_L  k0, PT_HOST_ASID(sp)
-       andi    k0, 0xff
-       mtc0    k0,CP0_ENTRYHI
-       ehb
-
-       /* Load context saved on the host stack */
-       LONG_L  $0, PT_R0(k1)
-       LONG_L  $1, PT_R1(k1)
-
-       /* r2/v0 is the return code, shift it down by 2 (arithmetic)
-        * to recover the err code  */
-       INT_SRA k0, v0, 2
-       move    $2, k0
-
-       LONG_L  $3, PT_R3(k1)
-       LONG_L  $4, PT_R4(k1)
-       LONG_L  $5, PT_R5(k1)
-       LONG_L  $6, PT_R6(k1)
-       LONG_L  $7, PT_R7(k1)
-       LONG_L  $8, PT_R8(k1)
-       LONG_L  $9, PT_R9(k1)
-       LONG_L  $10, PT_R10(k1)
-       LONG_L  $11, PT_R11(k1)
-       LONG_L  $12, PT_R12(k1)
-       LONG_L  $13, PT_R13(k1)
-       LONG_L  $14, PT_R14(k1)
-       LONG_L  $15, PT_R15(k1)
-       LONG_L  $16, PT_R16(k1)
-       LONG_L  $17, PT_R17(k1)
-       LONG_L  $18, PT_R18(k1)
-       LONG_L  $19, PT_R19(k1)
-       LONG_L  $20, PT_R20(k1)
-       LONG_L  $21, PT_R21(k1)
-       LONG_L  $22, PT_R22(k1)
-       LONG_L  $23, PT_R23(k1)
-       LONG_L  $24, PT_R24(k1)
-       LONG_L  $25, PT_R25(k1)
-
-       /* Host k0/k1 were not saved */
-
-       LONG_L  $28, PT_R28(k1)
-       LONG_L  $29, PT_R29(k1)
-       LONG_L  $30, PT_R30(k1)
-
-       LONG_L  k0, PT_HI(k1)
-       mthi    k0
-
-       LONG_L  k0, PT_LO(k1)
-       mtlo    k0
-
-       /* Restore RDHWR access */
-       PTR_LI  k0, 0x2000000F
-       mtc0    k0,  CP0_HWRENA
-
-
-       /* Restore RA, which is the address we will return to */
-       LONG_L  ra, PT_R31(k1)
-       j       ra
-        nop
-
-VECTOR_END(MIPSX(GuestExceptionEnd))
-.end MIPSX(GuestException)
-
-MIPSX(exceptions):
-       ####
-       ##### The exception handlers.
-       #####
-       .word _C_LABEL(MIPSX(GuestException))   #  0
-       .word _C_LABEL(MIPSX(GuestException))   #  1
-       .word _C_LABEL(MIPSX(GuestException))   #  2
-       .word _C_LABEL(MIPSX(GuestException))   #  3
-       .word _C_LABEL(MIPSX(GuestException))   #  4
-       .word _C_LABEL(MIPSX(GuestException))   #  5
-       .word _C_LABEL(MIPSX(GuestException))   #  6
-       .word _C_LABEL(MIPSX(GuestException))   #  7
-       .word _C_LABEL(MIPSX(GuestException))   #  8
-       .word _C_LABEL(MIPSX(GuestException))   #  9
-       .word _C_LABEL(MIPSX(GuestException))   # 10
-       .word _C_LABEL(MIPSX(GuestException))   # 11
-       .word _C_LABEL(MIPSX(GuestException))   # 12
-       .word _C_LABEL(MIPSX(GuestException))   # 13
-       .word _C_LABEL(MIPSX(GuestException))   # 14
-       .word _C_LABEL(MIPSX(GuestException))   # 15
-       .word _C_LABEL(MIPSX(GuestException))   # 16
-       .word _C_LABEL(MIPSX(GuestException))   # 17
-       .word _C_LABEL(MIPSX(GuestException))   # 18
-       .word _C_LABEL(MIPSX(GuestException))   # 19
-       .word _C_LABEL(MIPSX(GuestException))   # 20
-       .word _C_LABEL(MIPSX(GuestException))   # 21
-       .word _C_LABEL(MIPSX(GuestException))   # 22
-       .word _C_LABEL(MIPSX(GuestException))   # 23
-       .word _C_LABEL(MIPSX(GuestException))   # 24
-       .word _C_LABEL(MIPSX(GuestException))   # 25
-       .word _C_LABEL(MIPSX(GuestException))   # 26
-       .word _C_LABEL(MIPSX(GuestException))   # 27
-       .word _C_LABEL(MIPSX(GuestException))   # 28
-       .word _C_LABEL(MIPSX(GuestException))   # 29
-       .word _C_LABEL(MIPSX(GuestException))   # 30
-       .word _C_LABEL(MIPSX(GuestException))   # 31
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
deleted file mode 100644 (file)
index cd5e4f5..0000000
+++ /dev/null
@@ -1,1226 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * KVM/MIPS: MIPS specific KVM APIs
- *
- * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/bootmem.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-#include <asm/mmu_context.h>
-
-#include <linux/kvm_host.h>
-
-#include "kvm_mips_int.h"
-#include "kvm_mips_comm.h"
-
-#define CREATE_TRACE_POINTS
-#include "trace.h"
-
-#ifndef VECTORSPACING
-#define VECTORSPACING 0x100    /* for EI/VI mode */
-#endif
-
-#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
-struct kvm_stats_debugfs_item debugfs_entries[] = {
-       { "wait", VCPU_STAT(wait_exits) },
-       { "cache", VCPU_STAT(cache_exits) },
-       { "signal", VCPU_STAT(signal_exits) },
-       { "interrupt", VCPU_STAT(int_exits) },
-       { "cop_unsuable", VCPU_STAT(cop_unusable_exits) },
-       { "tlbmod", VCPU_STAT(tlbmod_exits) },
-       { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) },
-       { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) },
-       { "addrerr_st", VCPU_STAT(addrerr_st_exits) },
-       { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) },
-       { "syscall", VCPU_STAT(syscall_exits) },
-       { "resvd_inst", VCPU_STAT(resvd_inst_exits) },
-       { "break_inst", VCPU_STAT(break_inst_exits) },
-       { "flush_dcache", VCPU_STAT(flush_dcache_exits) },
-       { "halt_wakeup", VCPU_STAT(halt_wakeup) },
-       {NULL}
-};
-
-static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
-{
-       int i;
-       for_each_possible_cpu(i) {
-               vcpu->arch.guest_kernel_asid[i] = 0;
-               vcpu->arch.guest_user_asid[i] = 0;
-       }
-       return 0;
-}
-
-/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we
- * are "runnable" if interrupts are pending
- */
-int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
-{
-       return !!(vcpu->arch.pending_exceptions);
-}
-
-int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
-{
-       return 1;
-}
-
-int kvm_arch_hardware_enable(void *garbage)
-{
-       return 0;
-}
-
-void kvm_arch_hardware_disable(void *garbage)
-{
-}
-
-int kvm_arch_hardware_setup(void)
-{
-       return 0;
-}
-
-void kvm_arch_hardware_unsetup(void)
-{
-}
-
-void kvm_arch_check_processor_compat(void *rtn)
-{
-       int *r = (int *)rtn;
-       *r = 0;
-       return;
-}
-
-static void kvm_mips_init_tlbs(struct kvm *kvm)
-{
-       unsigned long wired;
-
-       /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */
-       wired = read_c0_wired();
-       write_c0_wired(wired + 1);
-       mtc0_tlbw_hazard();
-       kvm->arch.commpage_tlb = wired;
-
-       kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
-                 kvm->arch.commpage_tlb);
-}
-
-static void kvm_mips_init_vm_percpu(void *arg)
-{
-       struct kvm *kvm = (struct kvm *)arg;
-
-       kvm_mips_init_tlbs(kvm);
-       kvm_mips_callbacks->vm_init(kvm);
-
-}
-
-int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
-{
-       if (atomic_inc_return(&kvm_mips_instance) == 1) {
-               kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
-                         __func__);
-               on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
-       }
-
-
-       return 0;
-}
-
-void kvm_mips_free_vcpus(struct kvm *kvm)
-{
-       unsigned int i;
-       struct kvm_vcpu *vcpu;
-
-       /* Put the pages we reserved for the guest pmap */
-       for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
-               if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
-                       kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
-       }
-       kfree(kvm->arch.guest_pmap);
-
-       kvm_for_each_vcpu(i, vcpu, kvm) {
-               kvm_arch_vcpu_free(vcpu);
-       }
-
-       mutex_lock(&kvm->lock);
-
-       for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
-               kvm->vcpus[i] = NULL;
-
-       atomic_set(&kvm->online_vcpus, 0);
-
-       mutex_unlock(&kvm->lock);
-}
-
-void kvm_arch_sync_events(struct kvm *kvm)
-{
-}
-
-static void kvm_mips_uninit_tlbs(void *arg)
-{
-       /* Restore wired count */
-       write_c0_wired(0);
-       mtc0_tlbw_hazard();
-       /* Clear out all the TLBs */
-       kvm_local_flush_tlb_all();
-}
-
-void kvm_arch_destroy_vm(struct kvm *kvm)
-{
-       kvm_mips_free_vcpus(kvm);
-
-       /* If this is the last instance, restore wired count */
-       if (atomic_dec_return(&kvm_mips_instance) == 0) {
-               kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
-                         __func__);
-               on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
-       }
-}
-
-long
-kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
-{
-       return -ENOIOCTLCMD;
-}
-
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
-                          struct kvm_memory_slot *dont)
-{
-}
-
-int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
-                           unsigned long npages)
-{
-       return 0;
-}
-
-void kvm_arch_memslots_updated(struct kvm *kvm)
-{
-}
-
-int kvm_arch_prepare_memory_region(struct kvm *kvm,
-                                struct kvm_memory_slot *memslot,
-                                struct kvm_userspace_memory_region *mem,
-                                enum kvm_mr_change change)
-{
-       return 0;
-}
-
-void kvm_arch_commit_memory_region(struct kvm *kvm,
-                                struct kvm_userspace_memory_region *mem,
-                                const struct kvm_memory_slot *old,
-                                enum kvm_mr_change change)
-{
-       unsigned long npages = 0;
-       int i, err = 0;
-
-       kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
-                 __func__, kvm, mem->slot, mem->guest_phys_addr,
-                 mem->memory_size, mem->userspace_addr);
-
-       /* Setup Guest PMAP table */
-       if (!kvm->arch.guest_pmap) {
-               if (mem->slot == 0)
-                       npages = mem->memory_size >> PAGE_SHIFT;
-
-               if (npages) {
-                       kvm->arch.guest_pmap_npages = npages;
-                       kvm->arch.guest_pmap =
-                           kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
-
-                       if (!kvm->arch.guest_pmap) {
-                               kvm_err("Failed to allocate guest PMAP");
-                               err = -ENOMEM;
-                               goto out;
-                       }
-
-                       kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
-                                 npages, kvm->arch.guest_pmap);
-
-                       /* Now setup the page table */
-                       for (i = 0; i < npages; i++) {
-                               kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
-                       }
-               }
-       }
-out:
-       return;
-}
-
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
-}
-
-void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
-                                  struct kvm_memory_slot *slot)
-{
-}
-
-void kvm_arch_flush_shadow(struct kvm *kvm)
-{
-}
-
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
-{
-       extern char mips32_exception[], mips32_exceptionEnd[];
-       extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
-       int err, size, offset;
-       void *gebase;
-       int i;
-
-       struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
-
-       if (!vcpu) {
-               err = -ENOMEM;
-               goto out;
-       }
-
-       err = kvm_vcpu_init(vcpu, kvm, id);
-
-       if (err)
-               goto out_free_cpu;
-
-       kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
-
-       /* Allocate space for host mode exception handlers that handle
-        * guest mode exits
-        */
-       if (cpu_has_veic || cpu_has_vint) {
-               size = 0x200 + VECTORSPACING * 64;
-       } else {
-               size = 0x4000;
-       }
-
-       /* Save Linux EBASE */
-       vcpu->arch.host_ebase = (void *)read_c0_ebase();
-
-       gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
-
-       if (!gebase) {
-               err = -ENOMEM;
-               goto out_free_cpu;
-       }
-       kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
-                 ALIGN(size, PAGE_SIZE), gebase);
-
-       /* Save new ebase */
-       vcpu->arch.guest_ebase = gebase;
-
-       /* Copy L1 Guest Exception handler to correct offset */
-
-       /* TLB Refill, EXL = 0 */
-       memcpy(gebase, mips32_exception,
-              mips32_exceptionEnd - mips32_exception);
-
-       /* General Exception Entry point */
-       memcpy(gebase + 0x180, mips32_exception,
-              mips32_exceptionEnd - mips32_exception);
-
-       /* For vectored interrupts poke the exception code @ all offsets 0-7 */
-       for (i = 0; i < 8; i++) {
-               kvm_debug("L1 Vectored handler @ %p\n",
-                         gebase + 0x200 + (i * VECTORSPACING));
-               memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
-                      mips32_exceptionEnd - mips32_exception);
-       }
-
-       /* General handler, relocate to unmapped space for sanity's sake */
-       offset = 0x2000;
-       kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
-                 gebase + offset,
-                 mips32_GuestExceptionEnd - mips32_GuestException);
-
-       memcpy(gebase + offset, mips32_GuestException,
-              mips32_GuestExceptionEnd - mips32_GuestException);
-
-       /* Invalidate the icache for these ranges */
-       local_flush_icache_range((unsigned long)gebase,
-                               (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
-
-       /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
-       vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
-
-       if (!vcpu->arch.kseg0_commpage) {
-               err = -ENOMEM;
-               goto out_free_gebase;
-       }
-
-       kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
-       kvm_mips_commpage_init(vcpu);
-
-       /* Init */
-       vcpu->arch.last_sched_cpu = -1;
-
-       /* Start off the timer */
-       kvm_mips_init_count(vcpu);
-
-       return vcpu;
-
-out_free_gebase:
-       kfree(gebase);
-
-out_free_cpu:
-       kfree(vcpu);
-
-out:
-       return ERR_PTR(err);
-}
-
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
-{
-       hrtimer_cancel(&vcpu->arch.comparecount_timer);
-
-       kvm_vcpu_uninit(vcpu);
-
-       kvm_mips_dump_stats(vcpu);
-
-       kfree(vcpu->arch.guest_ebase);
-       kfree(vcpu->arch.kseg0_commpage);
-}
-
-void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
-{
-       kvm_arch_vcpu_free(vcpu);
-}
-
-int
-kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
-                                   struct kvm_guest_debug *dbg)
-{
-       return -ENOIOCTLCMD;
-}
-
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-       int r = 0;
-       sigset_t sigsaved;
-
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
-
-       if (vcpu->mmio_needed) {
-               if (!vcpu->mmio_is_write)
-                       kvm_mips_complete_mmio_load(vcpu, run);
-               vcpu->mmio_needed = 0;
-       }
-
-       local_irq_disable();
-       /* Check if we have any exceptions/interrupts pending */
-       kvm_mips_deliver_interrupts(vcpu,
-                                   kvm_read_c0_guest_cause(vcpu->arch.cop0));
-
-       kvm_guest_enter();
-
-       r = __kvm_mips_vcpu_run(run, vcpu);
-
-       kvm_guest_exit();
-       local_irq_enable();
-
-       if (vcpu->sigset_active)
-               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
-
-       return r;
-}
-
-int
-kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
-{
-       int intr = (int)irq->irq;
-       struct kvm_vcpu *dvcpu = NULL;
-
-       if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
-               kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
-                         (int)intr);
-
-       if (irq->cpu == -1)
-               dvcpu = vcpu;
-       else
-               dvcpu = vcpu->kvm->vcpus[irq->cpu];
-
-       if (intr == 2 || intr == 3 || intr == 4) {
-               kvm_mips_callbacks->queue_io_int(dvcpu, irq);
-
-       } else if (intr == -2 || intr == -3 || intr == -4) {
-               kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
-       } else {
-               kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
-                       irq->cpu, irq->irq);
-               return -EINVAL;
-       }
-
-       dvcpu->arch.wait = 0;
-
-       if (waitqueue_active(&dvcpu->wq)) {
-               wake_up_interruptible(&dvcpu->wq);
-       }
-
-       return 0;
-}
-
-int
-kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
-                               struct kvm_mp_state *mp_state)
-{
-       return -ENOIOCTLCMD;
-}
-
-int
-kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
-                               struct kvm_mp_state *mp_state)
-{
-       return -ENOIOCTLCMD;
-}
-
-static u64 kvm_mips_get_one_regs[] = {
-       KVM_REG_MIPS_R0,
-       KVM_REG_MIPS_R1,
-       KVM_REG_MIPS_R2,
-       KVM_REG_MIPS_R3,
-       KVM_REG_MIPS_R4,
-       KVM_REG_MIPS_R5,
-       KVM_REG_MIPS_R6,
-       KVM_REG_MIPS_R7,
-       KVM_REG_MIPS_R8,
-       KVM_REG_MIPS_R9,
-       KVM_REG_MIPS_R10,
-       KVM_REG_MIPS_R11,
-       KVM_REG_MIPS_R12,
-       KVM_REG_MIPS_R13,
-       KVM_REG_MIPS_R14,
-       KVM_REG_MIPS_R15,
-       KVM_REG_MIPS_R16,
-       KVM_REG_MIPS_R17,
-       KVM_REG_MIPS_R18,
-       KVM_REG_MIPS_R19,
-       KVM_REG_MIPS_R20,
-       KVM_REG_MIPS_R21,
-       KVM_REG_MIPS_R22,
-       KVM_REG_MIPS_R23,
-       KVM_REG_MIPS_R24,
-       KVM_REG_MIPS_R25,
-       KVM_REG_MIPS_R26,
-       KVM_REG_MIPS_R27,
-       KVM_REG_MIPS_R28,
-       KVM_REG_MIPS_R29,
-       KVM_REG_MIPS_R30,
-       KVM_REG_MIPS_R31,
-
-       KVM_REG_MIPS_HI,
-       KVM_REG_MIPS_LO,
-       KVM_REG_MIPS_PC,
-
-       KVM_REG_MIPS_CP0_INDEX,
-       KVM_REG_MIPS_CP0_CONTEXT,
-       KVM_REG_MIPS_CP0_USERLOCAL,
-       KVM_REG_MIPS_CP0_PAGEMASK,
-       KVM_REG_MIPS_CP0_WIRED,
-       KVM_REG_MIPS_CP0_HWRENA,
-       KVM_REG_MIPS_CP0_BADVADDR,
-       KVM_REG_MIPS_CP0_COUNT,
-       KVM_REG_MIPS_CP0_ENTRYHI,
-       KVM_REG_MIPS_CP0_COMPARE,
-       KVM_REG_MIPS_CP0_STATUS,
-       KVM_REG_MIPS_CP0_CAUSE,
-       KVM_REG_MIPS_CP0_EPC,
-       KVM_REG_MIPS_CP0_CONFIG,
-       KVM_REG_MIPS_CP0_CONFIG1,
-       KVM_REG_MIPS_CP0_CONFIG2,
-       KVM_REG_MIPS_CP0_CONFIG3,
-       KVM_REG_MIPS_CP0_CONFIG7,
-       KVM_REG_MIPS_CP0_ERROREPC,
-
-       KVM_REG_MIPS_COUNT_CTL,
-       KVM_REG_MIPS_COUNT_RESUME,
-       KVM_REG_MIPS_COUNT_HZ,
-};
-
-static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
-                           const struct kvm_one_reg *reg)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       int ret;
-       s64 v;
-
-       switch (reg->id) {
-       case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
-               v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
-               break;
-       case KVM_REG_MIPS_HI:
-               v = (long)vcpu->arch.hi;
-               break;
-       case KVM_REG_MIPS_LO:
-               v = (long)vcpu->arch.lo;
-               break;
-       case KVM_REG_MIPS_PC:
-               v = (long)vcpu->arch.pc;
-               break;
-
-       case KVM_REG_MIPS_CP0_INDEX:
-               v = (long)kvm_read_c0_guest_index(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_CONTEXT:
-               v = (long)kvm_read_c0_guest_context(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_USERLOCAL:
-               v = (long)kvm_read_c0_guest_userlocal(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_PAGEMASK:
-               v = (long)kvm_read_c0_guest_pagemask(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_WIRED:
-               v = (long)kvm_read_c0_guest_wired(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_HWRENA:
-               v = (long)kvm_read_c0_guest_hwrena(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_BADVADDR:
-               v = (long)kvm_read_c0_guest_badvaddr(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_ENTRYHI:
-               v = (long)kvm_read_c0_guest_entryhi(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_COMPARE:
-               v = (long)kvm_read_c0_guest_compare(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_STATUS:
-               v = (long)kvm_read_c0_guest_status(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_CAUSE:
-               v = (long)kvm_read_c0_guest_cause(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_EPC:
-               v = (long)kvm_read_c0_guest_epc(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_ERROREPC:
-               v = (long)kvm_read_c0_guest_errorepc(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_CONFIG:
-               v = (long)kvm_read_c0_guest_config(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_CONFIG1:
-               v = (long)kvm_read_c0_guest_config1(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_CONFIG2:
-               v = (long)kvm_read_c0_guest_config2(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_CONFIG3:
-               v = (long)kvm_read_c0_guest_config3(cop0);
-               break;
-       case KVM_REG_MIPS_CP0_CONFIG7:
-               v = (long)kvm_read_c0_guest_config7(cop0);
-               break;
-       /* registers to be handled specially */
-       case KVM_REG_MIPS_CP0_COUNT:
-       case KVM_REG_MIPS_COUNT_CTL:
-       case KVM_REG_MIPS_COUNT_RESUME:
-       case KVM_REG_MIPS_COUNT_HZ:
-               ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
-               if (ret)
-                       return ret;
-               break;
-       default:
-               return -EINVAL;
-       }
-       if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
-               u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
-               return put_user(v, uaddr64);
-       } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
-               u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
-               u32 v32 = (u32)v;
-               return put_user(v32, uaddr32);
-       } else {
-               return -EINVAL;
-       }
-}
-
-static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
-                           const struct kvm_one_reg *reg)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       u64 v;
-
-       if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
-               u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
-
-               if (get_user(v, uaddr64) != 0)
-                       return -EFAULT;
-       } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
-               u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
-               s32 v32;
-
-               if (get_user(v32, uaddr32) != 0)
-                       return -EFAULT;
-               v = (s64)v32;
-       } else {
-               return -EINVAL;
-       }
-
-       switch (reg->id) {
-       case KVM_REG_MIPS_R0:
-               /* Silently ignore requests to set $0 */
-               break;
-       case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
-               vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
-               break;
-       case KVM_REG_MIPS_HI:
-               vcpu->arch.hi = v;
-               break;
-       case KVM_REG_MIPS_LO:
-               vcpu->arch.lo = v;
-               break;
-       case KVM_REG_MIPS_PC:
-               vcpu->arch.pc = v;
-               break;
-
-       case KVM_REG_MIPS_CP0_INDEX:
-               kvm_write_c0_guest_index(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_CONTEXT:
-               kvm_write_c0_guest_context(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_USERLOCAL:
-               kvm_write_c0_guest_userlocal(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_PAGEMASK:
-               kvm_write_c0_guest_pagemask(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_WIRED:
-               kvm_write_c0_guest_wired(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_HWRENA:
-               kvm_write_c0_guest_hwrena(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_BADVADDR:
-               kvm_write_c0_guest_badvaddr(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_ENTRYHI:
-               kvm_write_c0_guest_entryhi(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_STATUS:
-               kvm_write_c0_guest_status(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_EPC:
-               kvm_write_c0_guest_epc(cop0, v);
-               break;
-       case KVM_REG_MIPS_CP0_ERROREPC:
-               kvm_write_c0_guest_errorepc(cop0, v);
-               break;
-       /* registers to be handled specially */
-       case KVM_REG_MIPS_CP0_COUNT:
-       case KVM_REG_MIPS_CP0_COMPARE:
-       case KVM_REG_MIPS_CP0_CAUSE:
-       case KVM_REG_MIPS_COUNT_CTL:
-       case KVM_REG_MIPS_COUNT_RESUME:
-       case KVM_REG_MIPS_COUNT_HZ:
-               return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-long
-kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
-{
-       struct kvm_vcpu *vcpu = filp->private_data;
-       void __user *argp = (void __user *)arg;
-       long r;
-
-       switch (ioctl) {
-       case KVM_SET_ONE_REG:
-       case KVM_GET_ONE_REG: {
-               struct kvm_one_reg reg;
-               if (copy_from_user(&reg, argp, sizeof(reg)))
-                       return -EFAULT;
-               if (ioctl == KVM_SET_ONE_REG)
-                       return kvm_mips_set_reg(vcpu, &reg);
-               else
-                       return kvm_mips_get_reg(vcpu, &reg);
-       }
-       case KVM_GET_REG_LIST: {
-               struct kvm_reg_list __user *user_list = argp;
-               u64 __user *reg_dest;
-               struct kvm_reg_list reg_list;
-               unsigned n;
-
-               if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
-                       return -EFAULT;
-               n = reg_list.n;
-               reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
-               if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
-                       return -EFAULT;
-               if (n < reg_list.n)
-                       return -E2BIG;
-               reg_dest = user_list->reg;
-               if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
-                                sizeof(kvm_mips_get_one_regs)))
-                       return -EFAULT;
-               return 0;
-       }
-       case KVM_NMI:
-               /* Treat the NMI as a CPU reset */
-               r = kvm_mips_reset_vcpu(vcpu);
-               break;
-       case KVM_INTERRUPT:
-               {
-                       struct kvm_mips_interrupt irq;
-                       r = -EFAULT;
-                       if (copy_from_user(&irq, argp, sizeof(irq)))
-                               goto out;
-
-                       kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
-                                 irq.irq);
-
-                       r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
-                       break;
-               }
-       default:
-               r = -ENOIOCTLCMD;
-       }
-
-out:
-       return r;
-}
-
-/*
- * Get (and clear) the dirty memory log for a memory slot.
- */
-int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
-{
-       struct kvm_memory_slot *memslot;
-       unsigned long ga, ga_end;
-       int is_dirty = 0;
-       int r;
-       unsigned long n;
-
-       mutex_lock(&kvm->slots_lock);
-
-       r = kvm_get_dirty_log(kvm, log, &is_dirty);
-       if (r)
-               goto out;
-
-       /* If nothing is dirty, don't bother messing with page tables. */
-       if (is_dirty) {
-               memslot = &kvm->memslots->memslots[log->slot];
-
-               ga = memslot->base_gfn << PAGE_SHIFT;
-               ga_end = ga + (memslot->npages << PAGE_SHIFT);
-
-               printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
-                      ga_end);
-
-               n = kvm_dirty_bitmap_bytes(memslot);
-               memset(memslot->dirty_bitmap, 0, n);
-       }
-
-       r = 0;
-out:
-       mutex_unlock(&kvm->slots_lock);
-       return r;
-
-}
-
-long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
-{
-       long r;
-
-       switch (ioctl) {
-       default:
-               r = -ENOIOCTLCMD;
-       }
-
-       return r;
-}
-
-int kvm_arch_init(void *opaque)
-{
-       int ret;
-
-       if (kvm_mips_callbacks) {
-               kvm_err("kvm: module already exists\n");
-               return -EEXIST;
-       }
-
-       ret = kvm_mips_emulation_init(&kvm_mips_callbacks);
-
-       return ret;
-}
-
-void kvm_arch_exit(void)
-{
-       kvm_mips_callbacks = NULL;
-}
-
-int
-kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
-{
-       return -ENOIOCTLCMD;
-}
-
-int
-kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
-{
-       return -ENOIOCTLCMD;
-}
-
-int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
-{
-       return 0;
-}
-
-int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
-{
-       return -ENOIOCTLCMD;
-}
-
-int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
-{
-       return -ENOIOCTLCMD;
-}
-
-int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
-{
-       return VM_FAULT_SIGBUS;
-}
-
-int kvm_dev_ioctl_check_extension(long ext)
-{
-       int r;
-
-       switch (ext) {
-       case KVM_CAP_ONE_REG:
-               r = 1;
-               break;
-       case KVM_CAP_COALESCED_MMIO:
-               r = KVM_COALESCED_MMIO_PAGE_OFFSET;
-               break;
-       default:
-               r = 0;
-               break;
-       }
-       return r;
-}
-
-int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
-{
-       return kvm_mips_pending_timer(vcpu);
-}
-
-int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
-{
-       int i;
-       struct mips_coproc *cop0;
-
-       if (!vcpu)
-               return -1;
-
-       printk("VCPU Register Dump:\n");
-       printk("\tpc = 0x%08lx\n", vcpu->arch.pc);
-       printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
-
-       for (i = 0; i < 32; i += 4) {
-               printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
-                      vcpu->arch.gprs[i],
-                      vcpu->arch.gprs[i + 1],
-                      vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
-       }
-       printk("\thi: 0x%08lx\n", vcpu->arch.hi);
-       printk("\tlo: 0x%08lx\n", vcpu->arch.lo);
-
-       cop0 = vcpu->arch.cop0;
-       printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
-              kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0));
-
-       printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
-
-       return 0;
-}
-
-int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
-       int i;
-
-       for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
-               vcpu->arch.gprs[i] = regs->gpr[i];
-       vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
-       vcpu->arch.hi = regs->hi;
-       vcpu->arch.lo = regs->lo;
-       vcpu->arch.pc = regs->pc;
-
-       return 0;
-}
-
-int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
-{
-       int i;
-
-       for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
-               regs->gpr[i] = vcpu->arch.gprs[i];
-
-       regs->hi = vcpu->arch.hi;
-       regs->lo = vcpu->arch.lo;
-       regs->pc = vcpu->arch.pc;
-
-       return 0;
-}
-
-static void kvm_mips_comparecount_func(unsigned long data)
-{
-       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
-
-       kvm_mips_callbacks->queue_timer_int(vcpu);
-
-       vcpu->arch.wait = 0;
-       if (waitqueue_active(&vcpu->wq)) {
-               wake_up_interruptible(&vcpu->wq);
-       }
-}
-
-/*
- * low level hrtimer wake routine.
- */
-static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
-{
-       struct kvm_vcpu *vcpu;
-
-       vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
-       kvm_mips_comparecount_func((unsigned long) vcpu);
-       return kvm_mips_count_timeout(vcpu);
-}
-
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
-{
-       kvm_mips_callbacks->vcpu_init(vcpu);
-       hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
-                    HRTIMER_MODE_REL);
-       vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
-       return 0;
-}
-
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
-{
-       return;
-}
-
-int
-kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr)
-{
-       return 0;
-}
-
-/* Initial guest state */
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
-{
-       return kvm_mips_callbacks->vcpu_setup(vcpu);
-}
-
-static
-void kvm_mips_set_c0_status(void)
-{
-       uint32_t status = read_c0_status();
-
-       if (cpu_has_fpu)
-               status |= (ST0_CU1);
-
-       if (cpu_has_dsp)
-               status |= (ST0_MX);
-
-       write_c0_status(status);
-       ehb();
-}
-
-/*
- * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
- */
-int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       uint32_t cause = vcpu->arch.host_cp0_cause;
-       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       /* Set a default exit reason */
-       run->exit_reason = KVM_EXIT_UNKNOWN;
-       run->ready_for_interrupt_injection = 1;
-
-       /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */
-       kvm_mips_set_c0_status();
-
-       local_irq_enable();
-
-       kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
-                       cause, opc, run, vcpu);
-
-       /* Do a privilege check, if in UM most of these exit conditions end up
-        * causing an exception to be delivered to the Guest Kernel
-        */
-       er = kvm_mips_check_privilege(cause, opc, run, vcpu);
-       if (er == EMULATE_PRIV_FAIL) {
-               goto skip_emul;
-       } else if (er == EMULATE_FAIL) {
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-               goto skip_emul;
-       }
-
-       switch (exccode) {
-       case T_INT:
-               kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
-
-               ++vcpu->stat.int_exits;
-               trace_kvm_exit(vcpu, INT_EXITS);
-
-               if (need_resched()) {
-                       cond_resched();
-               }
-
-               ret = RESUME_GUEST;
-               break;
-
-       case T_COP_UNUSABLE:
-               kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
-
-               ++vcpu->stat.cop_unusable_exits;
-               trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
-               ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
-               /* XXXKYMA: Might need to return to user space */
-               if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) {
-                       ret = RESUME_HOST;
-               }
-               break;
-
-       case T_TLB_MOD:
-               ++vcpu->stat.tlbmod_exits;
-               trace_kvm_exit(vcpu, TLBMOD_EXITS);
-               ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
-               break;
-
-       case T_TLB_ST_MISS:
-               kvm_debug
-                   ("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
-                    cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
-                    badvaddr);
-
-               ++vcpu->stat.tlbmiss_st_exits;
-               trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
-               ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
-               break;
-
-       case T_TLB_LD_MISS:
-               kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
-                         cause, opc, badvaddr);
-
-               ++vcpu->stat.tlbmiss_ld_exits;
-               trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
-               ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
-               break;
-
-       case T_ADDR_ERR_ST:
-               ++vcpu->stat.addrerr_st_exits;
-               trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
-               ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
-               break;
-
-       case T_ADDR_ERR_LD:
-               ++vcpu->stat.addrerr_ld_exits;
-               trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
-               ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
-               break;
-
-       case T_SYSCALL:
-               ++vcpu->stat.syscall_exits;
-               trace_kvm_exit(vcpu, SYSCALL_EXITS);
-               ret = kvm_mips_callbacks->handle_syscall(vcpu);
-               break;
-
-       case T_RES_INST:
-               ++vcpu->stat.resvd_inst_exits;
-               trace_kvm_exit(vcpu, RESVD_INST_EXITS);
-               ret = kvm_mips_callbacks->handle_res_inst(vcpu);
-               break;
-
-       case T_BREAK:
-               ++vcpu->stat.break_inst_exits;
-               trace_kvm_exit(vcpu, BREAK_INST_EXITS);
-               ret = kvm_mips_callbacks->handle_break(vcpu);
-               break;
-
-       default:
-               kvm_err
-                   ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
-                    exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
-                    kvm_read_c0_guest_status(vcpu->arch.cop0));
-               kvm_arch_vcpu_dump_regs(vcpu);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-               break;
-
-       }
-
-skip_emul:
-       local_irq_disable();
-
-       if (er == EMULATE_DONE && !(ret & RESUME_HOST))
-               kvm_mips_deliver_interrupts(vcpu, cause);
-
-       if (!(ret & RESUME_HOST)) {
-               /* Only check for signals if not already exiting to userspace  */
-               if (signal_pending(current)) {
-                       run->exit_reason = KVM_EXIT_INTR;
-                       ret = (-EINTR << 2) | RESUME_HOST;
-                       ++vcpu->stat.signal_exits;
-                       trace_kvm_exit(vcpu, SIGNAL_EXITS);
-               }
-       }
-
-       return ret;
-}
-
-int __init kvm_mips_init(void)
-{
-       int ret;
-
-       ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
-
-       if (ret)
-               return ret;
-
-       /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs.
-        * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c)
-        * to avoid the possibility of double faulting. The issue is that the TLB code
-        * references routines that are part of the the KVM module,
-        * which are only available once the module is loaded.
-        */
-       kvm_mips_gfn_to_pfn = gfn_to_pfn;
-       kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
-       kvm_mips_is_error_pfn = is_error_pfn;
-
-       pr_info("KVM/MIPS Initialized\n");
-       return 0;
-}
-
-void __exit kvm_mips_exit(void)
-{
-       kvm_exit();
-
-       kvm_mips_gfn_to_pfn = NULL;
-       kvm_mips_release_pfn_clean = NULL;
-       kvm_mips_is_error_pfn = NULL;
-
-       pr_info("KVM/MIPS unloaded\n");
-}
-
-module_init(kvm_mips_init);
-module_exit(kvm_mips_exit);
-
-EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h
deleted file mode 100644 (file)
index a4a8c85..0000000
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS: commpage: mapped into get kernel space
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-#ifndef __KVM_MIPS_COMMPAGE_H__
-#define __KVM_MIPS_COMMPAGE_H__
-
-struct kvm_mips_commpage {
-       struct mips_coproc cop0;        /* COP0 state is mapped into Guest kernel via commpage */
-};
-
-#define KVM_MIPS_COMM_EIDI_OFFSET       0x0
-
-extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu);
-
-#endif /* __KVM_MIPS_COMMPAGE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c
deleted file mode 100644 (file)
index 3873b1e..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* commpage, currently used for Virtual COP0 registers.
-* Mapped into the guest kernel @ 0x0.
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/bootmem.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-#include <asm/mmu_context.h>
-
-#include <linux/kvm_host.h>
-
-#include "kvm_mips_comm.h"
-
-void kvm_mips_commpage_init(struct kvm_vcpu *vcpu)
-{
-       struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage;
-       memset(page, 0, sizeof(struct kvm_mips_commpage));
-
-       /* Specific init values for fields */
-       vcpu->arch.cop0 = &page->cop0;
-       memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc));
-
-       return;
-}
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/kvm_mips_dyntrans.c
deleted file mode 100644 (file)
index b80e41d..0000000
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS: Binary Patching for privileged instructions, reduces traps.
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/kvm_host.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/bootmem.h>
-#include <asm/cacheflush.h>
-
-#include "kvm_mips_comm.h"
-
-#define SYNCI_TEMPLATE  0x041f0000
-#define SYNCI_BASE(x)   (((x) >> 21) & 0x1f)
-#define SYNCI_OFFSET    ((x) & 0xffff)
-
-#define LW_TEMPLATE     0x8c000000
-#define CLEAR_TEMPLATE  0x00000020
-#define SW_TEMPLATE     0xac000000
-
-int
-kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
-                          struct kvm_vcpu *vcpu)
-{
-       int result = 0;
-       unsigned long kseg0_opc;
-       uint32_t synci_inst = 0x0;
-
-       /* Replace the CACHE instruction, with a NOP */
-       kseg0_opc =
-           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
-                      (vcpu, (unsigned long) opc));
-       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
-       local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
-
-       return result;
-}
-
-/*
- *  Address based CACHE instructions are transformed into synci(s). A little heavy
- * for just D-cache invalidates, but avoids an expensive trap
- */
-int
-kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
-                       struct kvm_vcpu *vcpu)
-{
-       int result = 0;
-       unsigned long kseg0_opc;
-       uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
-
-       base = (inst >> 21) & 0x1f;
-       offset = inst & 0xffff;
-       synci_inst |= (base << 21);
-       synci_inst |= offset;
-
-       kseg0_opc =
-           CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
-                      (vcpu, (unsigned long) opc));
-       memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
-       local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
-
-       return result;
-}
-
-int
-kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
-{
-       int32_t rt, rd, sel;
-       uint32_t mfc0_inst;
-       unsigned long kseg0_opc, flags;
-
-       rt = (inst >> 16) & 0x1f;
-       rd = (inst >> 11) & 0x1f;
-       sel = inst & 0x7;
-
-       if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
-               mfc0_inst = CLEAR_TEMPLATE;
-               mfc0_inst |= ((rt & 0x1f) << 16);
-       } else {
-               mfc0_inst = LW_TEMPLATE;
-               mfc0_inst |= ((rt & 0x1f) << 16);
-               mfc0_inst |=
-                   offsetof(struct mips_coproc,
-                            reg[rd][sel]) + offsetof(struct kvm_mips_commpage,
-                                                     cop0);
-       }
-
-       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
-               kseg0_opc =
-                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
-                              (vcpu, (unsigned long) opc));
-               memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
-               local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
-       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
-               local_irq_save(flags);
-               memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
-               local_flush_icache_range((unsigned long)opc,
-                                        (unsigned long)opc + 32);
-               local_irq_restore(flags);
-       } else {
-               kvm_err("%s: Invalid address: %p\n", __func__, opc);
-               return -EFAULT;
-       }
-
-       return 0;
-}
-
-int
-kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
-{
-       int32_t rt, rd, sel;
-       uint32_t mtc0_inst = SW_TEMPLATE;
-       unsigned long kseg0_opc, flags;
-
-       rt = (inst >> 16) & 0x1f;
-       rd = (inst >> 11) & 0x1f;
-       sel = inst & 0x7;
-
-       mtc0_inst |= ((rt & 0x1f) << 16);
-       mtc0_inst |=
-           offsetof(struct mips_coproc,
-                    reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0);
-
-       if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
-               kseg0_opc =
-                   CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
-                              (vcpu, (unsigned long) opc));
-               memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
-               local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
-       } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
-               local_irq_save(flags);
-               memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
-               local_flush_icache_range((unsigned long)opc,
-                                        (unsigned long)opc + 32);
-               local_irq_restore(flags);
-       } else {
-               kvm_err("%s: Invalid address: %p\n", __func__, opc);
-               return -EFAULT;
-       }
-
-       return 0;
-}
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
deleted file mode 100644 (file)
index 8d48400..0000000
+++ /dev/null
@@ -1,2332 +0,0 @@
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS: Instruction/Exception emulation
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/ktime.h>
-#include <linux/kvm_host.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/bootmem.h>
-#include <linux/random.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-#include <asm/cpu-info.h>
-#include <asm/mmu_context.h>
-#include <asm/tlbflush.h>
-#include <asm/inst.h>
-
-#undef CONFIG_MIPS_MT
-#include <asm/r4kcache.h>
-#define CONFIG_MIPS_MT
-
-#include "kvm_mips_opcode.h"
-#include "kvm_mips_int.h"
-#include "kvm_mips_comm.h"
-
-#include "trace.h"
-
-/*
- * Compute the return address and do emulate branch simulation, if required.
- * This function should be called only in branch delay slot active.
- */
-unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
-       unsigned long instpc)
-{
-       unsigned int dspcontrol;
-       union mips_instruction insn;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       long epc = instpc;
-       long nextpc = KVM_INVALID_INST;
-
-       if (epc & 3)
-               goto unaligned;
-
-       /*
-        * Read the instruction
-        */
-       insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
-
-       if (insn.word == KVM_INVALID_INST)
-               return KVM_INVALID_INST;
-
-       switch (insn.i_format.opcode) {
-               /*
-                * jr and jalr are in r_format format.
-                */
-       case spec_op:
-               switch (insn.r_format.func) {
-               case jalr_op:
-                       arch->gprs[insn.r_format.rd] = epc + 8;
-                       /* Fall through */
-               case jr_op:
-                       nextpc = arch->gprs[insn.r_format.rs];
-                       break;
-               }
-               break;
-
-               /*
-                * This group contains:
-                * bltz_op, bgez_op, bltzl_op, bgezl_op,
-                * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
-                */
-       case bcond_op:
-               switch (insn.i_format.rt) {
-               case bltz_op:
-               case bltzl_op:
-                       if ((long)arch->gprs[insn.i_format.rs] < 0)
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                       else
-                               epc += 8;
-                       nextpc = epc;
-                       break;
-
-               case bgez_op:
-               case bgezl_op:
-                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                       else
-                               epc += 8;
-                       nextpc = epc;
-                       break;
-
-               case bltzal_op:
-               case bltzall_op:
-                       arch->gprs[31] = epc + 8;
-                       if ((long)arch->gprs[insn.i_format.rs] < 0)
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                       else
-                               epc += 8;
-                       nextpc = epc;
-                       break;
-
-               case bgezal_op:
-               case bgezall_op:
-                       arch->gprs[31] = epc + 8;
-                       if ((long)arch->gprs[insn.i_format.rs] >= 0)
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                       else
-                               epc += 8;
-                       nextpc = epc;
-                       break;
-               case bposge32_op:
-                       if (!cpu_has_dsp)
-                               goto sigill;
-
-                       dspcontrol = rddsp(0x01);
-
-                       if (dspcontrol >= 32) {
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                       } else
-                               epc += 8;
-                       nextpc = epc;
-                       break;
-               }
-               break;
-
-               /*
-                * These are unconditional and in j_format.
-                */
-       case jal_op:
-               arch->gprs[31] = instpc + 8;
-       case j_op:
-               epc += 4;
-               epc >>= 28;
-               epc <<= 28;
-               epc |= (insn.j_format.target << 2);
-               nextpc = epc;
-               break;
-
-               /*
-                * These are conditional and in i_format.
-                */
-       case beq_op:
-       case beql_op:
-               if (arch->gprs[insn.i_format.rs] ==
-                   arch->gprs[insn.i_format.rt])
-                       epc = epc + 4 + (insn.i_format.simmediate << 2);
-               else
-                       epc += 8;
-               nextpc = epc;
-               break;
-
-       case bne_op:
-       case bnel_op:
-               if (arch->gprs[insn.i_format.rs] !=
-                   arch->gprs[insn.i_format.rt])
-                       epc = epc + 4 + (insn.i_format.simmediate << 2);
-               else
-                       epc += 8;
-               nextpc = epc;
-               break;
-
-       case blez_op:           /* not really i_format */
-       case blezl_op:
-               /* rt field assumed to be zero */
-               if ((long)arch->gprs[insn.i_format.rs] <= 0)
-                       epc = epc + 4 + (insn.i_format.simmediate << 2);
-               else
-                       epc += 8;
-               nextpc = epc;
-               break;
-
-       case bgtz_op:
-       case bgtzl_op:
-               /* rt field assumed to be zero */
-               if ((long)arch->gprs[insn.i_format.rs] > 0)
-                       epc = epc + 4 + (insn.i_format.simmediate << 2);
-               else
-                       epc += 8;
-               nextpc = epc;
-               break;
-
-               /*
-                * And now the FPA/cp1 branch instructions.
-                */
-       case cop1_op:
-               printk("%s: unsupported cop1_op\n", __func__);
-               break;
-       }
-
-       return nextpc;
-
-unaligned:
-       printk("%s: unaligned epc\n", __func__);
-       return nextpc;
-
-sigill:
-       printk("%s: DSP branch but not DSP ASE\n", __func__);
-       return nextpc;
-}
-
-enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
-{
-       unsigned long branch_pc;
-       enum emulation_result er = EMULATE_DONE;
-
-       if (cause & CAUSEF_BD) {
-               branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
-               if (branch_pc == KVM_INVALID_INST) {
-                       er = EMULATE_FAIL;
-               } else {
-                       vcpu->arch.pc = branch_pc;
-                       kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
-               }
-       } else
-               vcpu->arch.pc += 4;
-
-       kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
-
-       return er;
-}
-
-/**
- * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
- * @vcpu:      Virtual CPU.
- *
- * Returns:    1 if the CP0_Count timer is disabled by either the guest
- *             CP0_Cause.DC bit or the count_ctl.DC bit.
- *             0 otherwise (in which case CP0_Count timer is running).
- */
-static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       return  (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
-               (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
-}
-
-/**
- * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
- *
- * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
- *
- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
- */
-static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
-{
-       s64 now_ns, periods;
-       u64 delta;
-
-       now_ns = ktime_to_ns(now);
-       delta = now_ns + vcpu->arch.count_dyn_bias;
-
-       if (delta >= vcpu->arch.count_period) {
-               /* If delta is out of safe range the bias needs adjusting */
-               periods = div64_s64(now_ns, vcpu->arch.count_period);
-               vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
-               /* Recalculate delta with new bias */
-               delta = now_ns + vcpu->arch.count_dyn_bias;
-       }
-
-       /*
-        * We've ensured that:
-        *   delta < count_period
-        *
-        * Therefore the intermediate delta*count_hz will never overflow since
-        * at the boundary condition:
-        *   delta = count_period
-        *   delta = NSEC_PER_SEC * 2^32 / count_hz
-        *   delta * count_hz = NSEC_PER_SEC * 2^32
-        */
-       return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
-}
-
-/**
- * kvm_mips_count_time() - Get effective current time.
- * @vcpu:      Virtual CPU.
- *
- * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
- * except when the master disable bit is set in count_ctl, in which case it is
- * count_resume, i.e. the time that the count was disabled.
- *
- * Returns:    Effective monotonic ktime for CP0_Count.
- */
-static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
-{
-       if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
-               return vcpu->arch.count_resume;
-
-       return ktime_get();
-}
-
-/**
- * kvm_mips_read_count_running() - Read the current count value as if running.
- * @vcpu:      Virtual CPU.
- * @now:       Kernel time to read CP0_Count at.
- *
- * Returns the current guest CP0_Count register at time @now and handles if the
- * timer interrupt is pending and hasn't been handled yet.
- *
- * Returns:    The current value of the guest CP0_Count register.
- */
-static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
-{
-       ktime_t expires;
-       int running;
-
-       /* Is the hrtimer pending? */
-       expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
-       if (ktime_compare(now, expires) >= 0) {
-               /*
-                * Cancel it while we handle it so there's no chance of
-                * interference with the timeout handler.
-                */
-               running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
-
-               /* Nothing should be waiting on the timeout */
-               kvm_mips_callbacks->queue_timer_int(vcpu);
-
-               /*
-                * Restart the timer if it was running based on the expiry time
-                * we read, so that we don't push it back 2 periods.
-                */
-               if (running) {
-                       expires = ktime_add_ns(expires,
-                                              vcpu->arch.count_period);
-                       hrtimer_start(&vcpu->arch.comparecount_timer, expires,
-                                     HRTIMER_MODE_ABS);
-               }
-       }
-
-       /* Return the biased and scaled guest CP0_Count */
-       return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
-}
-
-/**
- * kvm_mips_read_count() - Read the current count value.
- * @vcpu:      Virtual CPU.
- *
- * Read the current guest CP0_Count value, taking into account whether the timer
- * is stopped.
- *
- * Returns:    The current guest CP0_Count value.
- */
-uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-
-       /* If count disabled just read static copy of count */
-       if (kvm_mips_count_disabled(vcpu))
-               return kvm_read_c0_guest_count(cop0);
-
-       return kvm_mips_read_count_running(vcpu, ktime_get());
-}
-
-/**
- * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
- * @vcpu:      Virtual CPU.
- * @count:     Output pointer for CP0_Count value at point of freeze.
- *
- * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
- * at the point it was frozen. It is guaranteed that any pending interrupts at
- * the point it was frozen are handled, and none after that point.
- *
- * This is useful where the time/CP0_Count is needed in the calculation of the
- * new parameters.
- *
- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
- *
- * Returns:    The ktime at the point of freeze.
- */
-static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
-                                      uint32_t *count)
-{
-       ktime_t now;
-
-       /* stop hrtimer before finding time */
-       hrtimer_cancel(&vcpu->arch.comparecount_timer);
-       now = ktime_get();
-
-       /* find count at this point and handle pending hrtimer */
-       *count = kvm_mips_read_count_running(vcpu, now);
-
-       return now;
-}
-
-
-/**
- * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
- * @vcpu:      Virtual CPU.
- * @now:       ktime at point of resume.
- * @count:     CP0_Count at point of resume.
- *
- * Resumes the timer and updates the timer expiry based on @now and @count.
- * This can be used in conjunction with kvm_mips_freeze_timer() when timer
- * parameters need to be changed.
- *
- * It is guaranteed that a timer interrupt immediately after resume will be
- * handled, but not if CP_Compare is exactly at @count. That case is already
- * handled by kvm_mips_freeze_timer().
- *
- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
- */
-static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
-                                   ktime_t now, uint32_t count)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       uint32_t compare;
-       u64 delta;
-       ktime_t expire;
-
-       /* Calculate timeout (wrap 0 to 2^32) */
-       compare = kvm_read_c0_guest_compare(cop0);
-       delta = (u64)(uint32_t)(compare - count - 1) + 1;
-       delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
-       expire = ktime_add_ns(now, delta);
-
-       /* Update hrtimer to use new timeout */
-       hrtimer_cancel(&vcpu->arch.comparecount_timer);
-       hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
-}
-
-/**
- * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
- * @vcpu:      Virtual CPU.
- *
- * Recalculates and updates the expiry time of the hrtimer. This can be used
- * after timer parameters have been altered which do not depend on the time that
- * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
- * kvm_mips_resume_hrtimer() are used directly).
- *
- * It is guaranteed that no timer interrupts will be lost in the process.
- *
- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
- */
-static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
-{
-       ktime_t now;
-       uint32_t count;
-
-       /*
-        * freeze_hrtimer takes care of a timer interrupts <= count, and
-        * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
-        */
-       now = kvm_mips_freeze_hrtimer(vcpu, &count);
-       kvm_mips_resume_hrtimer(vcpu, now, count);
-}
-
-/**
- * kvm_mips_write_count() - Modify the count and update timer.
- * @vcpu:      Virtual CPU.
- * @count:     Guest CP0_Count value to set.
- *
- * Sets the CP0_Count value and updates the timer accordingly.
- */
-void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       ktime_t now;
-
-       /* Calculate bias */
-       now = kvm_mips_count_time(vcpu);
-       vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
-
-       if (kvm_mips_count_disabled(vcpu))
-               /* The timer's disabled, adjust the static count */
-               kvm_write_c0_guest_count(cop0, count);
-       else
-               /* Update timeout */
-               kvm_mips_resume_hrtimer(vcpu, now, count);
-}
-
-/**
- * kvm_mips_init_count() - Initialise timer.
- * @vcpu:      Virtual CPU.
- *
- * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
- * it going if it's enabled.
- */
-void kvm_mips_init_count(struct kvm_vcpu *vcpu)
-{
-       /* 100 MHz */
-       vcpu->arch.count_hz = 100*1000*1000;
-       vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
-                                         vcpu->arch.count_hz);
-       vcpu->arch.count_dyn_bias = 0;
-
-       /* Starting at 0 */
-       kvm_mips_write_count(vcpu, 0);
-}
-
-/**
- * kvm_mips_set_count_hz() - Update the frequency of the timer.
- * @vcpu:      Virtual CPU.
- * @count_hz:  Frequency of CP0_Count timer in Hz.
- *
- * Change the frequency of the CP0_Count timer. This is done atomically so that
- * CP0_Count is continuous and no timer interrupt is lost.
- *
- * Returns:    -EINVAL if @count_hz is out of range.
- *             0 on success.
- */
-int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       int dc;
-       ktime_t now;
-       u32 count;
-
-       /* ensure the frequency is in a sensible range... */
-       if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
-               return -EINVAL;
-       /* ... and has actually changed */
-       if (vcpu->arch.count_hz == count_hz)
-               return 0;
-
-       /* Safely freeze timer so we can keep it continuous */
-       dc = kvm_mips_count_disabled(vcpu);
-       if (dc) {
-               now = kvm_mips_count_time(vcpu);
-               count = kvm_read_c0_guest_count(cop0);
-       } else {
-               now = kvm_mips_freeze_hrtimer(vcpu, &count);
-       }
-
-       /* Update the frequency */
-       vcpu->arch.count_hz = count_hz;
-       vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
-       vcpu->arch.count_dyn_bias = 0;
-
-       /* Calculate adjusted bias so dynamic count is unchanged */
-       vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
-
-       /* Update and resume hrtimer */
-       if (!dc)
-               kvm_mips_resume_hrtimer(vcpu, now, count);
-       return 0;
-}
-
-/**
- * kvm_mips_write_compare() - Modify compare and update timer.
- * @vcpu:      Virtual CPU.
- * @compare:   New CP0_Compare value.
- *
- * Update CP0_Compare to a new value and update the timeout.
- */
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-
-       /* if unchanged, must just be an ack */
-       if (kvm_read_c0_guest_compare(cop0) == compare)
-               return;
-
-       /* Update compare */
-       kvm_write_c0_guest_compare(cop0, compare);
-
-       /* Update timeout if count enabled */
-       if (!kvm_mips_count_disabled(vcpu))
-               kvm_mips_update_hrtimer(vcpu);
-}
-
-/**
- * kvm_mips_count_disable() - Disable count.
- * @vcpu:      Virtual CPU.
- *
- * Disable the CP0_Count timer. A timer interrupt on or before the final stop
- * time will be handled but not after.
- *
- * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
- * count_ctl.DC has been set (count disabled).
- *
- * Returns:    The time that the timer was stopped.
- */
-static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       uint32_t count;
-       ktime_t now;
-
-       /* Stop hrtimer */
-       hrtimer_cancel(&vcpu->arch.comparecount_timer);
-
-       /* Set the static count from the dynamic count, handling pending TI */
-       now = ktime_get();
-       count = kvm_mips_read_count_running(vcpu, now);
-       kvm_write_c0_guest_count(cop0, count);
-
-       return now;
-}
-
-/**
- * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
- * @vcpu:      Virtual CPU.
- *
- * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
- * before the final stop time will be handled if the timer isn't disabled by
- * count_ctl.DC, but not after.
- *
- * Assumes CP0_Cause.DC is clear (count enabled).
- */
-void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-
-       kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
-       if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
-               kvm_mips_count_disable(vcpu);
-}
-
-/**
- * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
- * @vcpu:      Virtual CPU.
- *
- * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
- * the start time will be handled if the timer isn't disabled by count_ctl.DC,
- * potentially before even returning, so the caller should be careful with
- * ordering of CP0_Cause modifications so as not to lose it.
- *
- * Assumes CP0_Cause.DC is set (count disabled).
- */
-void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       uint32_t count;
-
-       kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
-
-       /*
-        * Set the dynamic count to match the static count.
-        * This starts the hrtimer if count_ctl.DC allows it.
-        * Otherwise it conveniently updates the biases.
-        */
-       count = kvm_read_c0_guest_count(cop0);
-       kvm_mips_write_count(vcpu, count);
-}
-
-/**
- * kvm_mips_set_count_ctl() - Update the count control KVM register.
- * @vcpu:      Virtual CPU.
- * @count_ctl: Count control register new value.
- *
- * Set the count control KVM register. The timer is updated accordingly.
- *
- * Returns:    -EINVAL if reserved bits are set.
- *             0 on success.
- */
-int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       s64 changed = count_ctl ^ vcpu->arch.count_ctl;
-       s64 delta;
-       ktime_t expire, now;
-       uint32_t count, compare;
-
-       /* Only allow defined bits to be changed */
-       if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
-               return -EINVAL;
-
-       /* Apply new value */
-       vcpu->arch.count_ctl = count_ctl;
-
-       /* Master CP0_Count disable */
-       if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
-               /* Is CP0_Cause.DC already disabling CP0_Count? */
-               if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
-                       if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
-                               /* Just record the current time */
-                               vcpu->arch.count_resume = ktime_get();
-               } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
-                       /* disable timer and record current time */
-                       vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
-               } else {
-                       /*
-                        * Calculate timeout relative to static count at resume
-                        * time (wrap 0 to 2^32).
-                        */
-                       count = kvm_read_c0_guest_count(cop0);
-                       compare = kvm_read_c0_guest_compare(cop0);
-                       delta = (u64)(uint32_t)(compare - count - 1) + 1;
-                       delta = div_u64(delta * NSEC_PER_SEC,
-                                       vcpu->arch.count_hz);
-                       expire = ktime_add_ns(vcpu->arch.count_resume, delta);
-
-                       /* Handle pending interrupt */
-                       now = ktime_get();
-                       if (ktime_compare(now, expire) >= 0)
-                               /* Nothing should be waiting on the timeout */
-                               kvm_mips_callbacks->queue_timer_int(vcpu);
-
-                       /* Resume hrtimer without changing bias */
-                       count = kvm_mips_read_count_running(vcpu, now);
-                       kvm_mips_resume_hrtimer(vcpu, now, count);
-               }
-       }
-
-       return 0;
-}
-
-/**
- * kvm_mips_set_count_resume() - Update the count resume KVM register.
- * @vcpu:              Virtual CPU.
- * @count_resume:      Count resume register new value.
- *
- * Set the count resume KVM register.
- *
- * Returns:    -EINVAL if out of valid range (0..now).
- *             0 on success.
- */
-int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
-{
-       /*
-        * It doesn't make sense for the resume time to be in the future, as it
-        * would be possible for the next interrupt to be more than a full
-        * period in the future.
-        */
-       if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
-               return -EINVAL;
-
-       vcpu->arch.count_resume = ns_to_ktime(count_resume);
-       return 0;
-}
-
-/**
- * kvm_mips_count_timeout() - Push timer forward on timeout.
- * @vcpu:      Virtual CPU.
- *
- * Handle an hrtimer event by push the hrtimer forward a period.
- *
- * Returns:    The hrtimer_restart value to return to the hrtimer subsystem.
- */
-enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
-{
-       /* Add the Count period to the current expiry time */
-       hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
-                              vcpu->arch.count_period);
-       return HRTIMER_RESTART;
-}
-
-enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       enum emulation_result er = EMULATE_DONE;
-
-       if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
-               kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
-                         kvm_read_c0_guest_epc(cop0));
-               kvm_clear_c0_guest_status(cop0, ST0_EXL);
-               vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
-
-       } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
-               kvm_clear_c0_guest_status(cop0, ST0_ERL);
-               vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
-       } else {
-               printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
-                      vcpu->arch.pc);
-               er = EMULATE_FAIL;
-       }
-
-       return er;
-}
-
-enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
-{
-       enum emulation_result er = EMULATE_DONE;
-
-       kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
-                 vcpu->arch.pending_exceptions);
-
-       ++vcpu->stat.wait_exits;
-       trace_kvm_exit(vcpu, WAIT_EXITS);
-       if (!vcpu->arch.pending_exceptions) {
-               vcpu->arch.wait = 1;
-               kvm_vcpu_block(vcpu);
-
-               /* We we are runnable, then definitely go off to user space to check if any
-                * I/O interrupts are pending.
-                */
-               if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
-                       clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
-                       vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
-               }
-       }
-
-       return er;
-}
-
-/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
- * this, if things ever change
- */
-enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       enum emulation_result er = EMULATE_FAIL;
-       uint32_t pc = vcpu->arch.pc;
-
-       printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
-       return er;
-}
-
-/* Write Guest TLB Entry @ Index */
-enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       int index = kvm_read_c0_guest_index(cop0);
-       enum emulation_result er = EMULATE_DONE;
-       struct kvm_mips_tlb *tlb = NULL;
-       uint32_t pc = vcpu->arch.pc;
-
-       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
-               printk("%s: illegal index: %d\n", __func__, index);
-               printk
-                   ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
-                    pc, index, kvm_read_c0_guest_entryhi(cop0),
-                    kvm_read_c0_guest_entrylo0(cop0),
-                    kvm_read_c0_guest_entrylo1(cop0),
-                    kvm_read_c0_guest_pagemask(cop0));
-               index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
-       }
-
-       tlb = &vcpu->arch.guest_tlb[index];
-#if 1
-       /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
-       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
-#endif
-
-       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
-       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
-       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
-       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
-
-       kvm_debug
-           ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
-            pc, index, kvm_read_c0_guest_entryhi(cop0),
-            kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
-            kvm_read_c0_guest_pagemask(cop0));
-
-       return er;
-}
-
-/* Write Guest TLB Entry @ Random Index */
-enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       enum emulation_result er = EMULATE_DONE;
-       struct kvm_mips_tlb *tlb = NULL;
-       uint32_t pc = vcpu->arch.pc;
-       int index;
-
-#if 1
-       get_random_bytes(&index, sizeof(index));
-       index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
-#else
-       index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
-#endif
-
-       if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
-               printk("%s: illegal index: %d\n", __func__, index);
-               return EMULATE_FAIL;
-       }
-
-       tlb = &vcpu->arch.guest_tlb[index];
-
-#if 1
-       /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
-       kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
-#endif
-
-       tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
-       tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
-       tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
-       tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
-
-       kvm_debug
-           ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
-            pc, index, kvm_read_c0_guest_entryhi(cop0),
-            kvm_read_c0_guest_entrylo0(cop0),
-            kvm_read_c0_guest_entrylo1(cop0));
-
-       return er;
-}
-
-enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       long entryhi = kvm_read_c0_guest_entryhi(cop0);
-       enum emulation_result er = EMULATE_DONE;
-       uint32_t pc = vcpu->arch.pc;
-       int index = -1;
-
-       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
-
-       kvm_write_c0_guest_index(cop0, index);
-
-       kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
-                 index);
-
-       return er;
-}
-
-enum emulation_result
-kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
-                    struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       enum emulation_result er = EMULATE_DONE;
-       int32_t rt, rd, copz, sel, co_bit, op;
-       uint32_t pc = vcpu->arch.pc;
-       unsigned long curr_pc;
-
-       /*
-        * Update PC and hold onto current PC in case there is
-        * an error and we want to rollback the PC
-        */
-       curr_pc = vcpu->arch.pc;
-       er = update_pc(vcpu, cause);
-       if (er == EMULATE_FAIL) {
-               return er;
-       }
-
-       copz = (inst >> 21) & 0x1f;
-       rt = (inst >> 16) & 0x1f;
-       rd = (inst >> 11) & 0x1f;
-       sel = inst & 0x7;
-       co_bit = (inst >> 25) & 1;
-
-       if (co_bit) {
-               op = (inst) & 0xff;
-
-               switch (op) {
-               case tlbr_op:   /*  Read indexed TLB entry  */
-                       er = kvm_mips_emul_tlbr(vcpu);
-                       break;
-               case tlbwi_op:  /*  Write indexed  */
-                       er = kvm_mips_emul_tlbwi(vcpu);
-                       break;
-               case tlbwr_op:  /*  Write random  */
-                       er = kvm_mips_emul_tlbwr(vcpu);
-                       break;
-               case tlbp_op:   /* TLB Probe */
-                       er = kvm_mips_emul_tlbp(vcpu);
-                       break;
-               case rfe_op:
-                       printk("!!!COP0_RFE!!!\n");
-                       break;
-               case eret_op:
-                       er = kvm_mips_emul_eret(vcpu);
-                       goto dont_update_pc;
-                       break;
-               case wait_op:
-                       er = kvm_mips_emul_wait(vcpu);
-                       break;
-               }
-       } else {
-               switch (copz) {
-               case mfc_op:
-#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
-                       cop0->stat[rd][sel]++;
-#endif
-                       /* Get reg */
-                       if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
-                               vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
-                       } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
-                               vcpu->arch.gprs[rt] = 0x0;
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
-                               kvm_mips_trans_mfc0(inst, opc, vcpu);
-#endif
-                       }
-                       else {
-                               vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
-                               kvm_mips_trans_mfc0(inst, opc, vcpu);
-#endif
-                       }
-
-                       kvm_debug
-                           ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
-                            pc, rd, sel, rt, vcpu->arch.gprs[rt]);
-
-                       break;
-
-               case dmfc_op:
-                       vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
-                       break;
-
-               case mtc_op:
-#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
-                       cop0->stat[rd][sel]++;
-#endif
-                       if ((rd == MIPS_CP0_TLB_INDEX)
-                           && (vcpu->arch.gprs[rt] >=
-                               KVM_MIPS_GUEST_TLB_SIZE)) {
-                               printk("Invalid TLB Index: %ld",
-                                      vcpu->arch.gprs[rt]);
-                               er = EMULATE_FAIL;
-                               break;
-                       }
-#define C0_EBASE_CORE_MASK 0xff
-                       if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
-                               /* Preserve CORE number */
-                               kvm_change_c0_guest_ebase(cop0,
-                                                         ~(C0_EBASE_CORE_MASK),
-                                                         vcpu->arch.gprs[rt]);
-                               printk("MTCz, cop0->reg[EBASE]: %#lx\n",
-                                      kvm_read_c0_guest_ebase(cop0));
-                       } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
-                               uint32_t nasid =
-                                   vcpu->arch.gprs[rt] & ASID_MASK;
-                               if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
-                                   &&
-                                   ((kvm_read_c0_guest_entryhi(cop0) &
-                                     ASID_MASK) != nasid)) {
-
-                                       kvm_debug
-                                           ("MTCz, change ASID from %#lx to %#lx\n",
-                                            kvm_read_c0_guest_entryhi(cop0) &
-                                            ASID_MASK,
-                                            vcpu->arch.gprs[rt] & ASID_MASK);
-
-                                       /* Blow away the shadow host TLBs */
-                                       kvm_mips_flush_host_tlb(1);
-                               }
-                               kvm_write_c0_guest_entryhi(cop0,
-                                                          vcpu->arch.gprs[rt]);
-                       }
-                       /* Are we writing to COUNT */
-                       else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
-                               kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
-                               goto done;
-                       } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
-                               kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
-                                         pc, kvm_read_c0_guest_compare(cop0),
-                                         vcpu->arch.gprs[rt]);
-
-                               /* If we are writing to COMPARE */
-                               /* Clear pending timer interrupt, if any */
-                               kvm_mips_callbacks->dequeue_timer_int(vcpu);
-                               kvm_mips_write_compare(vcpu,
-                                                      vcpu->arch.gprs[rt]);
-                       } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
-                               kvm_write_c0_guest_status(cop0,
-                                                         vcpu->arch.gprs[rt]);
-                               /* Make sure that CU1 and NMI bits are never set */
-                               kvm_clear_c0_guest_status(cop0,
-                                                         (ST0_CU1 | ST0_NMI));
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
-                               kvm_mips_trans_mtc0(inst, opc, vcpu);
-#endif
-                       } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
-                               uint32_t old_cause, new_cause;
-                               old_cause = kvm_read_c0_guest_cause(cop0);
-                               new_cause = vcpu->arch.gprs[rt];
-                               /* Update R/W bits */
-                               kvm_change_c0_guest_cause(cop0, 0x08800300,
-                                                         new_cause);
-                               /* DC bit enabling/disabling timer? */
-                               if ((old_cause ^ new_cause) & CAUSEF_DC) {
-                                       if (new_cause & CAUSEF_DC)
-                                               kvm_mips_count_disable_cause(vcpu);
-                                       else
-                                               kvm_mips_count_enable_cause(vcpu);
-                               }
-                       } else {
-                               cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
-                               kvm_mips_trans_mtc0(inst, opc, vcpu);
-#endif
-                       }
-
-                       kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
-                                 rd, sel, cop0->reg[rd][sel]);
-                       break;
-
-               case dmtc_op:
-                       printk
-                           ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
-                            vcpu->arch.pc, rt, rd, sel);
-                       er = EMULATE_FAIL;
-                       break;
-
-               case mfmcz_op:
-#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
-                       cop0->stat[MIPS_CP0_STATUS][0]++;
-#endif
-                       if (rt != 0) {
-                               vcpu->arch.gprs[rt] =
-                                   kvm_read_c0_guest_status(cop0);
-                       }
-                       /* EI */
-                       if (inst & 0x20) {
-                               kvm_debug("[%#lx] mfmcz_op: EI\n",
-                                         vcpu->arch.pc);
-                               kvm_set_c0_guest_status(cop0, ST0_IE);
-                       } else {
-                               kvm_debug("[%#lx] mfmcz_op: DI\n",
-                                         vcpu->arch.pc);
-                               kvm_clear_c0_guest_status(cop0, ST0_IE);
-                       }
-
-                       break;
-
-               case wrpgpr_op:
-                       {
-                               uint32_t css =
-                                   cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
-                               uint32_t pss =
-                                   (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
-                               /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
-                               if (css || pss) {
-                                       er = EMULATE_FAIL;
-                                       break;
-                               }
-                               kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
-                                         vcpu->arch.gprs[rt]);
-                               vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
-                       }
-                       break;
-               default:
-                       printk
-                           ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
-                            vcpu->arch.pc, copz);
-                       er = EMULATE_FAIL;
-                       break;
-               }
-       }
-
-done:
-       /*
-        * Rollback PC only if emulation was unsuccessful
-        */
-       if (er == EMULATE_FAIL) {
-               vcpu->arch.pc = curr_pc;
-       }
-
-dont_update_pc:
-       /*
-        * This is for special instructions whose emulation
-        * updates the PC, so do not overwrite the PC under
-        * any circumstances
-        */
-
-       return er;
-}
-
-enum emulation_result
-kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
-                      struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       enum emulation_result er = EMULATE_DO_MMIO;
-       int32_t op, base, rt, offset;
-       uint32_t bytes;
-       void *data = run->mmio.data;
-       unsigned long curr_pc;
-
-       /*
-        * Update PC and hold onto current PC in case there is
-        * an error and we want to rollback the PC
-        */
-       curr_pc = vcpu->arch.pc;
-       er = update_pc(vcpu, cause);
-       if (er == EMULATE_FAIL)
-               return er;
-
-       rt = (inst >> 16) & 0x1f;
-       base = (inst >> 21) & 0x1f;
-       offset = inst & 0xffff;
-       op = (inst >> 26) & 0x3f;
-
-       switch (op) {
-       case sb_op:
-               bytes = 1;
-               if (bytes > sizeof(run->mmio.data)) {
-                       kvm_err("%s: bad MMIO length: %d\n", __func__,
-                              run->mmio.len);
-               }
-               run->mmio.phys_addr =
-                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
-                                                  host_cp0_badvaddr);
-               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
-                       er = EMULATE_FAIL;
-                       break;
-               }
-               run->mmio.len = bytes;
-               run->mmio.is_write = 1;
-               vcpu->mmio_needed = 1;
-               vcpu->mmio_is_write = 1;
-               *(u8 *) data = vcpu->arch.gprs[rt];
-               kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
-                         vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
-                         *(uint8_t *) data);
-
-               break;
-
-       case sw_op:
-               bytes = 4;
-               if (bytes > sizeof(run->mmio.data)) {
-                       kvm_err("%s: bad MMIO length: %d\n", __func__,
-                              run->mmio.len);
-               }
-               run->mmio.phys_addr =
-                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
-                                                  host_cp0_badvaddr);
-               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
-                       er = EMULATE_FAIL;
-                       break;
-               }
-
-               run->mmio.len = bytes;
-               run->mmio.is_write = 1;
-               vcpu->mmio_needed = 1;
-               vcpu->mmio_is_write = 1;
-               *(uint32_t *) data = vcpu->arch.gprs[rt];
-
-               kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
-                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
-                         vcpu->arch.gprs[rt], *(uint32_t *) data);
-               break;
-
-       case sh_op:
-               bytes = 2;
-               if (bytes > sizeof(run->mmio.data)) {
-                       kvm_err("%s: bad MMIO length: %d\n", __func__,
-                              run->mmio.len);
-               }
-               run->mmio.phys_addr =
-                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
-                                                  host_cp0_badvaddr);
-               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
-                       er = EMULATE_FAIL;
-                       break;
-               }
-
-               run->mmio.len = bytes;
-               run->mmio.is_write = 1;
-               vcpu->mmio_needed = 1;
-               vcpu->mmio_is_write = 1;
-               *(uint16_t *) data = vcpu->arch.gprs[rt];
-
-               kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
-                         vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
-                         vcpu->arch.gprs[rt], *(uint32_t *) data);
-               break;
-
-       default:
-               printk("Store not yet supported");
-               er = EMULATE_FAIL;
-               break;
-       }
-
-       /*
-        * Rollback PC if emulation was unsuccessful
-        */
-       if (er == EMULATE_FAIL) {
-               vcpu->arch.pc = curr_pc;
-       }
-
-       return er;
-}
-
-enum emulation_result
-kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
-                     struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       enum emulation_result er = EMULATE_DO_MMIO;
-       int32_t op, base, rt, offset;
-       uint32_t bytes;
-
-       rt = (inst >> 16) & 0x1f;
-       base = (inst >> 21) & 0x1f;
-       offset = inst & 0xffff;
-       op = (inst >> 26) & 0x3f;
-
-       vcpu->arch.pending_load_cause = cause;
-       vcpu->arch.io_gpr = rt;
-
-       switch (op) {
-       case lw_op:
-               bytes = 4;
-               if (bytes > sizeof(run->mmio.data)) {
-                       kvm_err("%s: bad MMIO length: %d\n", __func__,
-                              run->mmio.len);
-                       er = EMULATE_FAIL;
-                       break;
-               }
-               run->mmio.phys_addr =
-                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
-                                                  host_cp0_badvaddr);
-               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
-                       er = EMULATE_FAIL;
-                       break;
-               }
-
-               run->mmio.len = bytes;
-               run->mmio.is_write = 0;
-               vcpu->mmio_needed = 1;
-               vcpu->mmio_is_write = 0;
-               break;
-
-       case lh_op:
-       case lhu_op:
-               bytes = 2;
-               if (bytes > sizeof(run->mmio.data)) {
-                       kvm_err("%s: bad MMIO length: %d\n", __func__,
-                              run->mmio.len);
-                       er = EMULATE_FAIL;
-                       break;
-               }
-               run->mmio.phys_addr =
-                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
-                                                  host_cp0_badvaddr);
-               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
-                       er = EMULATE_FAIL;
-                       break;
-               }
-
-               run->mmio.len = bytes;
-               run->mmio.is_write = 0;
-               vcpu->mmio_needed = 1;
-               vcpu->mmio_is_write = 0;
-
-               if (op == lh_op)
-                       vcpu->mmio_needed = 2;
-               else
-                       vcpu->mmio_needed = 1;
-
-               break;
-
-       case lbu_op:
-       case lb_op:
-               bytes = 1;
-               if (bytes > sizeof(run->mmio.data)) {
-                       kvm_err("%s: bad MMIO length: %d\n", __func__,
-                              run->mmio.len);
-                       er = EMULATE_FAIL;
-                       break;
-               }
-               run->mmio.phys_addr =
-                   kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
-                                                  host_cp0_badvaddr);
-               if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
-                       er = EMULATE_FAIL;
-                       break;
-               }
-
-               run->mmio.len = bytes;
-               run->mmio.is_write = 0;
-               vcpu->mmio_is_write = 0;
-
-               if (op == lb_op)
-                       vcpu->mmio_needed = 2;
-               else
-                       vcpu->mmio_needed = 1;
-
-               break;
-
-       default:
-               printk("Load not yet supported");
-               er = EMULATE_FAIL;
-               break;
-       }
-
-       return er;
-}
-
-int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
-{
-       unsigned long offset = (va & ~PAGE_MASK);
-       struct kvm *kvm = vcpu->kvm;
-       unsigned long pa;
-       gfn_t gfn;
-       pfn_t pfn;
-
-       gfn = va >> PAGE_SHIFT;
-
-       if (gfn >= kvm->arch.guest_pmap_npages) {
-               printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
-               kvm_mips_dump_host_tlbs();
-               kvm_arch_vcpu_dump_regs(vcpu);
-               return -1;
-       }
-       pfn = kvm->arch.guest_pmap[gfn];
-       pa = (pfn << PAGE_SHIFT) | offset;
-
-       printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
-
-       local_flush_icache_range(CKSEG0ADDR(pa), 32);
-       return 0;
-}
-
-#define MIPS_CACHE_OP_INDEX_INV         0x0
-#define MIPS_CACHE_OP_INDEX_LD_TAG      0x1
-#define MIPS_CACHE_OP_INDEX_ST_TAG      0x2
-#define MIPS_CACHE_OP_IMP               0x3
-#define MIPS_CACHE_OP_HIT_INV           0x4
-#define MIPS_CACHE_OP_FILL_WB_INV       0x5
-#define MIPS_CACHE_OP_HIT_HB            0x6
-#define MIPS_CACHE_OP_FETCH_LOCK        0x7
-
-#define MIPS_CACHE_ICACHE               0x0
-#define MIPS_CACHE_DCACHE               0x1
-#define MIPS_CACHE_SEC                  0x3
-
-enum emulation_result
-kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
-                      struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       extern void (*r4k_blast_dcache) (void);
-       extern void (*r4k_blast_icache) (void);
-       enum emulation_result er = EMULATE_DONE;
-       int32_t offset, cache, op_inst, op, base;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       unsigned long va;
-       unsigned long curr_pc;
-
-       /*
-        * Update PC and hold onto current PC in case there is
-        * an error and we want to rollback the PC
-        */
-       curr_pc = vcpu->arch.pc;
-       er = update_pc(vcpu, cause);
-       if (er == EMULATE_FAIL)
-               return er;
-
-       base = (inst >> 21) & 0x1f;
-       op_inst = (inst >> 16) & 0x1f;
-       offset = inst & 0xffff;
-       cache = (inst >> 16) & 0x3;
-       op = (inst >> 18) & 0x7;
-
-       va = arch->gprs[base] + offset;
-
-       kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
-                 cache, op, base, arch->gprs[base], offset);
-
-       /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
-        * the caches entirely by stepping through all the ways/indexes
-        */
-       if (op == MIPS_CACHE_OP_INDEX_INV) {
-               kvm_debug
-                   ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
-                    vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
-                    arch->gprs[base], offset);
-
-               if (cache == MIPS_CACHE_DCACHE)
-                       r4k_blast_dcache();
-               else if (cache == MIPS_CACHE_ICACHE)
-                       r4k_blast_icache();
-               else {
-                       printk("%s: unsupported CACHE INDEX operation\n",
-                              __func__);
-                       return EMULATE_FAIL;
-               }
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
-               kvm_mips_trans_cache_index(inst, opc, vcpu);
-#endif
-               goto done;
-       }
-
-       preempt_disable();
-       if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
-
-               if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
-                       kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
-               }
-       } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
-                  KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
-               int index;
-
-               /* If an entry already exists then skip */
-               if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
-                       goto skip_fault;
-               }
-
-               /* If address not in the guest TLB, then give the guest a fault, the
-                * resulting handler will do the right thing
-                */
-               index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
-                                                 (kvm_read_c0_guest_entryhi
-                                                  (cop0) & ASID_MASK));
-
-               if (index < 0) {
-                       vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
-                       vcpu->arch.host_cp0_badvaddr = va;
-                       er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
-                                                        vcpu);
-                       preempt_enable();
-                       goto dont_update_pc;
-               } else {
-                       struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
-                       /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
-                       if (!TLB_IS_VALID(*tlb, va)) {
-                               er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
-                                                               run, vcpu);
-                               preempt_enable();
-                               goto dont_update_pc;
-                       } else {
-                               /* We fault an entry from the guest tlb to the shadow host TLB */
-                               kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
-                                                                    NULL,
-                                                                    NULL);
-                       }
-               }
-       } else {
-               printk
-                   ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
-                    cache, op, base, arch->gprs[base], offset);
-               er = EMULATE_FAIL;
-               preempt_enable();
-               goto dont_update_pc;
-
-       }
-
-skip_fault:
-       /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
-       if (cache == MIPS_CACHE_DCACHE
-           && (op == MIPS_CACHE_OP_FILL_WB_INV
-               || op == MIPS_CACHE_OP_HIT_INV)) {
-               flush_dcache_line(va);
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
-               /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
-               kvm_mips_trans_cache_va(inst, opc, vcpu);
-#endif
-       } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
-               flush_dcache_line(va);
-               flush_icache_line(va);
-
-#ifdef CONFIG_KVM_MIPS_DYN_TRANS
-               /* Replace the CACHE instruction, with a SYNCI */
-               kvm_mips_trans_cache_va(inst, opc, vcpu);
-#endif
-       } else {
-               printk
-                   ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
-                    cache, op, base, arch->gprs[base], offset);
-               er = EMULATE_FAIL;
-               preempt_enable();
-               goto dont_update_pc;
-       }
-
-       preempt_enable();
-
-      dont_update_pc:
-       /*
-        * Rollback PC
-        */
-       vcpu->arch.pc = curr_pc;
-      done:
-       return er;
-}
-
-enum emulation_result
-kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
-                     struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       enum emulation_result er = EMULATE_DONE;
-       uint32_t inst;
-
-       /*
-        *  Fetch the instruction.
-        */
-       if (cause & CAUSEF_BD) {
-               opc += 1;
-       }
-
-       inst = kvm_get_inst(opc, vcpu);
-
-       switch (((union mips_instruction)inst).r_format.opcode) {
-       case cop0_op:
-               er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
-               break;
-       case sb_op:
-       case sh_op:
-       case sw_op:
-               er = kvm_mips_emulate_store(inst, cause, run, vcpu);
-               break;
-       case lb_op:
-       case lbu_op:
-       case lhu_op:
-       case lh_op:
-       case lw_op:
-               er = kvm_mips_emulate_load(inst, cause, run, vcpu);
-               break;
-
-       case cache_op:
-               ++vcpu->stat.cache_exits;
-               trace_kvm_exit(vcpu, CACHE_EXITS);
-               er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
-               break;
-
-       default:
-               printk("Instruction emulation not supported (%p/%#x)\n", opc,
-                      inst);
-               kvm_arch_vcpu_dump_regs(vcpu);
-               er = EMULATE_FAIL;
-               break;
-       }
-
-       return er;
-}
-
-enum emulation_result
-kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
-                        struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
-
-               kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (T_SYSCALL << CAUSEB_EXCCODE));
-
-               /* Set PC to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-
-       } else {
-               printk("Trying to deliver SYSCALL when EXL is already set\n");
-               er = EMULATE_FAIL;
-       }
-
-       return er;
-}
-
-enum emulation_result
-kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
-                           struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-       unsigned long entryhi = (vcpu->arch.  host_cp0_badvaddr & VPN2_MASK) |
-                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
-                         arch->pc);
-
-               /* set pc to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x0;
-
-       } else {
-               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
-                         arch->pc);
-
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-       }
-
-       kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
-
-       /* setup badvaddr, context and entryhi registers for the guest */
-       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
-       /* XXXKYMA: is the context register used by linux??? */
-       kvm_write_c0_guest_entryhi(cop0, entryhi);
-       /* Blow away the shadow host TLBs */
-       kvm_mips_flush_host_tlb(1);
-
-       return er;
-}
-
-enum emulation_result
-kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
-                          struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-       unsigned long entryhi =
-               (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
-                         arch->pc);
-
-               /* set pc to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-
-       } else {
-               kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
-                         arch->pc);
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-       }
-
-       kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
-
-       /* setup badvaddr, context and entryhi registers for the guest */
-       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
-       /* XXXKYMA: is the context register used by linux??? */
-       kvm_write_c0_guest_entryhi(cop0, entryhi);
-       /* Blow away the shadow host TLBs */
-       kvm_mips_flush_host_tlb(1);
-
-       return er;
-}
-
-enum emulation_result
-kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
-                           struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
-                         arch->pc);
-
-               /* Set PC to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x0;
-       } else {
-               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
-                         arch->pc);
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-       }
-
-       kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
-
-       /* setup badvaddr, context and entryhi registers for the guest */
-       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
-       /* XXXKYMA: is the context register used by linux??? */
-       kvm_write_c0_guest_entryhi(cop0, entryhi);
-       /* Blow away the shadow host TLBs */
-       kvm_mips_flush_host_tlb(1);
-
-       return er;
-}
-
-enum emulation_result
-kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
-                          struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
-                         arch->pc);
-
-               /* Set PC to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-       } else {
-               kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
-                         arch->pc);
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-       }
-
-       kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
-
-       /* setup badvaddr, context and entryhi registers for the guest */
-       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
-       /* XXXKYMA: is the context register used by linux??? */
-       kvm_write_c0_guest_entryhi(cop0, entryhi);
-       /* Blow away the shadow host TLBs */
-       kvm_mips_flush_host_tlb(1);
-
-       return er;
-}
-
-/* TLBMOD: store into address matching TLB with Dirty bit off */
-enum emulation_result
-kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
-                      struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       enum emulation_result er = EMULATE_DONE;
-#ifdef DEBUG
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
-       int index;
-
-       /*
-        * If address not in the guest TLB, then we are in trouble
-        */
-       index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
-       if (index < 0) {
-               /* XXXKYMA Invalidate and retry */
-               kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
-               kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
-                    __func__, entryhi);
-               kvm_mips_dump_guest_tlbs(vcpu);
-               kvm_mips_dump_host_tlbs();
-               return EMULATE_FAIL;
-       }
-#endif
-
-       er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
-       return er;
-}
-
-enum emulation_result
-kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
-                       struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
-                               (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
-                         arch->pc);
-
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-       } else {
-               kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
-                         arch->pc);
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-       }
-
-       kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
-
-       /* setup badvaddr, context and entryhi registers for the guest */
-       kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
-       /* XXXKYMA: is the context register used by linux??? */
-       kvm_write_c0_guest_entryhi(cop0, entryhi);
-       /* Blow away the shadow host TLBs */
-       kvm_mips_flush_host_tlb(1);
-
-       return er;
-}
-
-enum emulation_result
-kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
-                        struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-       }
-
-       arch->pc = KVM_GUEST_KSEG0 + 0x180;
-
-       kvm_change_c0_guest_cause(cop0, (0xff),
-                                 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
-       kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
-
-       return er;
-}
-
-enum emulation_result
-kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
-                       struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
-
-               kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (T_RES_INST << CAUSEB_EXCCODE));
-
-               /* Set PC to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-
-       } else {
-               kvm_err("Trying to deliver RI when EXL is already set\n");
-               er = EMULATE_FAIL;
-       }
-
-       return er;
-}
-
-enum emulation_result
-kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
-                       struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
-
-               kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (T_BREAK << CAUSEB_EXCCODE));
-
-               /* Set PC to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-
-       } else {
-               printk("Trying to deliver BP when EXL is already set\n");
-               er = EMULATE_FAIL;
-       }
-
-       return er;
-}
-
-/*
- * ll/sc, rdhwr, sync emulation
- */
-
-#define OPCODE 0xfc000000
-#define BASE   0x03e00000
-#define RT     0x001f0000
-#define OFFSET 0x0000ffff
-#define LL     0xc0000000
-#define SC     0xe0000000
-#define SPEC0  0x00000000
-#define SPEC3  0x7c000000
-#define RD     0x0000f800
-#define FUNC   0x0000003f
-#define SYNC   0x0000000f
-#define RDHWR  0x0000003b
-
-enum emulation_result
-kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
-                  struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-       unsigned long curr_pc;
-       uint32_t inst;
-
-       /*
-        * Update PC and hold onto current PC in case there is
-        * an error and we want to rollback the PC
-        */
-       curr_pc = vcpu->arch.pc;
-       er = update_pc(vcpu, cause);
-       if (er == EMULATE_FAIL)
-               return er;
-
-       /*
-        *  Fetch the instruction.
-        */
-       if (cause & CAUSEF_BD)
-               opc += 1;
-
-       inst = kvm_get_inst(opc, vcpu);
-
-       if (inst == KVM_INVALID_INST) {
-               printk("%s: Cannot get inst @ %p\n", __func__, opc);
-               return EMULATE_FAIL;
-       }
-
-       if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
-               int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
-               int rd = (inst & RD) >> 11;
-               int rt = (inst & RT) >> 16;
-               /* If usermode, check RDHWR rd is allowed by guest HWREna */
-               if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
-                       kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
-                                 rd, opc);
-                       goto emulate_ri;
-               }
-               switch (rd) {
-               case 0: /* CPU number */
-                       arch->gprs[rt] = 0;
-                       break;
-               case 1: /* SYNCI length */
-                       arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
-                                            current_cpu_data.icache.linesz);
-                       break;
-               case 2: /* Read count register */
-                       arch->gprs[rt] = kvm_mips_read_count(vcpu);
-                       break;
-               case 3: /* Count register resolution */
-                       switch (current_cpu_data.cputype) {
-                       case CPU_20KC:
-                       case CPU_25KF:
-                               arch->gprs[rt] = 1;
-                               break;
-                       default:
-                               arch->gprs[rt] = 2;
-                       }
-                       break;
-               case 29:
-                       arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
-                       break;
-
-               default:
-                       kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
-                       goto emulate_ri;
-               }
-       } else {
-               kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
-               goto emulate_ri;
-       }
-
-       return EMULATE_DONE;
-
-emulate_ri:
-       /*
-        * Rollback PC (if in branch delay slot then the PC already points to
-        * branch target), and pass the RI exception to the guest OS.
-        */
-       vcpu->arch.pc = curr_pc;
-       return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
-}
-
-enum emulation_result
-kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
-{
-       unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
-       enum emulation_result er = EMULATE_DONE;
-       unsigned long curr_pc;
-
-       if (run->mmio.len > sizeof(*gpr)) {
-               printk("Bad MMIO length: %d", run->mmio.len);
-               er = EMULATE_FAIL;
-               goto done;
-       }
-
-       /*
-        * Update PC and hold onto current PC in case there is
-        * an error and we want to rollback the PC
-        */
-       curr_pc = vcpu->arch.pc;
-       er = update_pc(vcpu, vcpu->arch.pending_load_cause);
-       if (er == EMULATE_FAIL)
-               return er;
-
-       switch (run->mmio.len) {
-       case 4:
-               *gpr = *(int32_t *) run->mmio.data;
-               break;
-
-       case 2:
-               if (vcpu->mmio_needed == 2)
-                       *gpr = *(int16_t *) run->mmio.data;
-               else
-                       *gpr = *(int16_t *) run->mmio.data;
-
-               break;
-       case 1:
-               if (vcpu->mmio_needed == 2)
-                       *gpr = *(int8_t *) run->mmio.data;
-               else
-                       *gpr = *(u8 *) run->mmio.data;
-               break;
-       }
-
-       if (vcpu->arch.pending_load_cause & CAUSEF_BD)
-               kvm_debug
-                   ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
-                    vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
-                    vcpu->mmio_needed);
-
-done:
-       return er;
-}
-
-static enum emulation_result
-kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
-                    struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       enum emulation_result er = EMULATE_DONE;
-
-       if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-               /* save old pc */
-               kvm_write_c0_guest_epc(cop0, arch->pc);
-               kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-               if (cause & CAUSEF_BD)
-                       kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-               else
-                       kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-               kvm_change_c0_guest_cause(cop0, (0xff),
-                                         (exccode << CAUSEB_EXCCODE));
-
-               /* Set PC to the exception entry point */
-               arch->pc = KVM_GUEST_KSEG0 + 0x180;
-               kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
-
-               kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
-                         exccode, kvm_read_c0_guest_epc(cop0),
-                         kvm_read_c0_guest_badvaddr(cop0));
-       } else {
-               printk("Trying to deliver EXC when EXL is already set\n");
-               er = EMULATE_FAIL;
-       }
-
-       return er;
-}
-
-enum emulation_result
-kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
-                        struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       enum emulation_result er = EMULATE_DONE;
-       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
-       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-
-       int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
-
-       if (usermode) {
-               switch (exccode) {
-               case T_INT:
-               case T_SYSCALL:
-               case T_BREAK:
-               case T_RES_INST:
-                       break;
-
-               case T_COP_UNUSABLE:
-                       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
-                               er = EMULATE_PRIV_FAIL;
-                       break;
-
-               case T_TLB_MOD:
-                       break;
-
-               case T_TLB_LD_MISS:
-                       /* We we are accessing Guest kernel space, then send an address error exception to the guest */
-                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
-                               printk("%s: LD MISS @ %#lx\n", __func__,
-                                      badvaddr);
-                               cause &= ~0xff;
-                               cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
-                               er = EMULATE_PRIV_FAIL;
-                       }
-                       break;
-
-               case T_TLB_ST_MISS:
-                       /* We we are accessing Guest kernel space, then send an address error exception to the guest */
-                       if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
-                               printk("%s: ST MISS @ %#lx\n", __func__,
-                                      badvaddr);
-                               cause &= ~0xff;
-                               cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
-                               er = EMULATE_PRIV_FAIL;
-                       }
-                       break;
-
-               case T_ADDR_ERR_ST:
-                       printk("%s: address error ST @ %#lx\n", __func__,
-                              badvaddr);
-                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
-                               cause &= ~0xff;
-                               cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
-                       }
-                       er = EMULATE_PRIV_FAIL;
-                       break;
-               case T_ADDR_ERR_LD:
-                       printk("%s: address error LD @ %#lx\n", __func__,
-                              badvaddr);
-                       if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
-                               cause &= ~0xff;
-                               cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
-                       }
-                       er = EMULATE_PRIV_FAIL;
-                       break;
-               default:
-                       er = EMULATE_PRIV_FAIL;
-                       break;
-               }
-       }
-
-       if (er == EMULATE_PRIV_FAIL) {
-               kvm_mips_emulate_exc(cause, opc, run, vcpu);
-       }
-       return er;
-}
-
-/* User Address (UA) fault, this could happen if
- * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
- *     case we pass on the fault to the guest kernel and let it handle it.
- * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
- *     case we inject the TLB from the Guest TLB into the shadow host TLB
- */
-enum emulation_result
-kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
-                       struct kvm_run *run, struct kvm_vcpu *vcpu)
-{
-       enum emulation_result er = EMULATE_DONE;
-       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
-       unsigned long va = vcpu->arch.host_cp0_badvaddr;
-       int index;
-
-       kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
-                 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
-
-       /* KVM would not have got the exception if this entry was valid in the shadow host TLB
-        * Check the Guest TLB, if the entry is not there then send the guest an
-        * exception. The guest exc handler should then inject an entry into the
-        * guest TLB
-        */
-       index = kvm_mips_guest_tlb_lookup(vcpu,
-                                         (va & VPN2_MASK) |
-                                         (kvm_read_c0_guest_entryhi
-                                          (vcpu->arch.cop0) & ASID_MASK));
-       if (index < 0) {
-               if (exccode == T_TLB_LD_MISS) {
-                       er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
-               } else if (exccode == T_TLB_ST_MISS) {
-                       er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
-               } else {
-                       printk("%s: invalid exc code: %d\n", __func__, exccode);
-                       er = EMULATE_FAIL;
-               }
-       } else {
-               struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
-
-               /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
-               if (!TLB_IS_VALID(*tlb, va)) {
-                       if (exccode == T_TLB_LD_MISS) {
-                               er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
-                                                               vcpu);
-                       } else if (exccode == T_TLB_ST_MISS) {
-                               er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
-                                                               vcpu);
-                       } else {
-                               printk("%s: invalid exc code: %d\n", __func__,
-                                      exccode);
-                               er = EMULATE_FAIL;
-                       }
-               } else {
-                       kvm_debug
-                           ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
-                            tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
-                       /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
-                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
-                                                            NULL);
-               }
-       }
-
-       return er;
-}
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/kvm_mips_int.c
deleted file mode 100644 (file)
index 1e5de16..0000000
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS: Interrupt delivery
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-#include <linux/fs.h>
-#include <linux/bootmem.h>
-#include <asm/page.h>
-#include <asm/cacheflush.h>
-
-#include <linux/kvm_host.h>
-
-#include "kvm_mips_int.h"
-
-void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
-{
-       set_bit(priority, &vcpu->arch.pending_exceptions);
-}
-
-void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
-{
-       clear_bit(priority, &vcpu->arch.pending_exceptions);
-}
-
-void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu)
-{
-       /* Cause bits to reflect the pending timer interrupt,
-        * the EXC code will be set when we are actually
-        * delivering the interrupt:
-        */
-       kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
-
-       /* Queue up an INT exception for the core */
-       kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
-
-}
-
-void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
-{
-       kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI));
-       kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
-}
-
-void
-kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq)
-{
-       int intr = (int)irq->irq;
-
-       /* Cause bits to reflect the pending IO interrupt,
-        * the EXC code will be set when we are actually
-        * delivering the interrupt:
-        */
-       switch (intr) {
-       case 2:
-               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
-               /* Queue up an INT exception for the core */
-               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO);
-               break;
-
-       case 3:
-               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
-               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
-               break;
-
-       case 4:
-               kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
-               kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
-               break;
-
-       default:
-               break;
-       }
-
-}
-
-void
-kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
-                          struct kvm_mips_interrupt *irq)
-{
-       int intr = (int)irq->irq;
-       switch (intr) {
-       case -2:
-               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0));
-               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
-               break;
-
-       case -3:
-               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1));
-               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
-               break;
-
-       case -4:
-               kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2));
-               kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
-               break;
-
-       default:
-               break;
-       }
-
-}
-
-/* Deliver the interrupt of the corresponding priority, if possible. */
-int
-kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
-                       uint32_t cause)
-{
-       int allowed = 0;
-       uint32_t exccode;
-
-       struct kvm_vcpu_arch *arch = &vcpu->arch;
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-
-       switch (priority) {
-       case MIPS_EXC_INT_TIMER:
-               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
-                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
-                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) {
-                       allowed = 1;
-                       exccode = T_INT;
-               }
-               break;
-
-       case MIPS_EXC_INT_IO:
-               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
-                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
-                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) {
-                       allowed = 1;
-                       exccode = T_INT;
-               }
-               break;
-
-       case MIPS_EXC_INT_IPI_1:
-               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
-                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
-                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) {
-                       allowed = 1;
-                       exccode = T_INT;
-               }
-               break;
-
-       case MIPS_EXC_INT_IPI_2:
-               if ((kvm_read_c0_guest_status(cop0) & ST0_IE)
-                   && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL)))
-                   && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) {
-                       allowed = 1;
-                       exccode = T_INT;
-               }
-               break;
-
-       default:
-               break;
-       }
-
-       /* Are we allowed to deliver the interrupt ??? */
-       if (allowed) {
-
-               if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
-                       /* save old pc */
-                       kvm_write_c0_guest_epc(cop0, arch->pc);
-                       kvm_set_c0_guest_status(cop0, ST0_EXL);
-
-                       if (cause & CAUSEF_BD)
-                               kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
-                       else
-                               kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
-
-                       kvm_debug("Delivering INT @ pc %#lx\n", arch->pc);
-
-               } else
-                       kvm_err("Trying to deliver interrupt when EXL is already set\n");
-
-               kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE,
-                                         (exccode << CAUSEB_EXCCODE));
-
-               /* XXXSL Set PC to the interrupt exception entry point */
-               if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV)
-                       arch->pc = KVM_GUEST_KSEG0 + 0x200;
-               else
-                       arch->pc = KVM_GUEST_KSEG0 + 0x180;
-
-               clear_bit(priority, &vcpu->arch.pending_exceptions);
-       }
-
-       return allowed;
-}
-
-int
-kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
-                     uint32_t cause)
-{
-       return 1;
-}
-
-void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
-{
-       unsigned long *pending = &vcpu->arch.pending_exceptions;
-       unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
-       unsigned int priority;
-
-       if (!(*pending) && !(*pending_clr))
-               return;
-
-       priority = __ffs(*pending_clr);
-       while (priority <= MIPS_EXC_MAX) {
-               if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) {
-                       if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE)
-                               break;
-               }
-
-               priority = find_next_bit(pending_clr,
-                                        BITS_PER_BYTE * sizeof(*pending_clr),
-                                        priority + 1);
-       }
-
-       priority = __ffs(*pending);
-       while (priority <= MIPS_EXC_MAX) {
-               if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) {
-                       if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE)
-                               break;
-               }
-
-               priority = find_next_bit(pending,
-                                        BITS_PER_BYTE * sizeof(*pending),
-                                        priority + 1);
-       }
-
-}
-
-int kvm_mips_pending_timer(struct kvm_vcpu *vcpu)
-{
-       return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions);
-}
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/kvm_mips_int.h
deleted file mode 100644 (file)
index 20da7d2..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS: Interrupts
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-/* MIPS Exception Priorities, exceptions (including interrupts) are queued up
- * for the guest in the order specified by their priorities
- */
-
-#define MIPS_EXC_RESET              0
-#define MIPS_EXC_SRESET             1
-#define MIPS_EXC_DEBUG_ST           2
-#define MIPS_EXC_DEBUG              3
-#define MIPS_EXC_DDB                4
-#define MIPS_EXC_NMI                5
-#define MIPS_EXC_MCHK               6
-#define MIPS_EXC_INT_TIMER          7
-#define MIPS_EXC_INT_IO             8
-#define MIPS_EXC_EXECUTE            9
-#define MIPS_EXC_INT_IPI_1          10
-#define MIPS_EXC_INT_IPI_2          11
-#define MIPS_EXC_MAX                12
-/* XXXSL More to follow */
-
-#define C_TI        (_ULCAST_(1) << 30)
-
-#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
-#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE   (0)
-
-void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
-void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
-int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
-
-void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
-void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu);
-void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
-                             struct kvm_mips_interrupt *irq);
-void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
-                               struct kvm_mips_interrupt *irq);
-int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
-                           uint32_t cause);
-int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
-                         uint32_t cause);
-void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h
deleted file mode 100644 (file)
index 86d3b4c..0000000
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-/*
- * Define opcode values not defined in <asm/isnt.h>
- */
-
-#ifndef __KVM_MIPS_OPCODE_H__
-#define __KVM_MIPS_OPCODE_H__
-
-/* COP0 Ops */
-#define     mfmcz_op         0x0b      /*  01011  */
-#define     wrpgpr_op        0x0e      /*  01110  */
-
-/*  COP0 opcodes (only if COP0 and CO=1):  */
-#define     wait_op               0x20 /*  100000  */
-
-#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/kvm_mips_stats.c
deleted file mode 100644 (file)
index 075904b..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS: COP0 access histogram
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-#include <linux/kvm_host.h>
-
-char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
-       "WAIT",
-       "CACHE",
-       "Signal",
-       "Interrupt",
-       "COP0/1 Unusable",
-       "TLB Mod",
-       "TLB Miss (LD)",
-       "TLB Miss (ST)",
-       "Address Err (ST)",
-       "Address Error (LD)",
-       "System Call",
-       "Reserved Inst",
-       "Break Inst",
-       "D-Cache Flushes",
-};
-
-char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
-       "Index",
-       "Random",
-       "EntryLo0",
-       "EntryLo1",
-       "Context",
-       "PG Mask",
-       "Wired",
-       "HWREna",
-       "BadVAddr",
-       "Count",
-       "EntryHI",
-       "Compare",
-       "Status",
-       "Cause",
-       "EXC PC",
-       "PRID",
-       "Config",
-       "LLAddr",
-       "Watch Lo",
-       "Watch Hi",
-       "X Context",
-       "Reserved",
-       "Impl Dep",
-       "Debug",
-       "DEPC",
-       "PerfCnt",
-       "ErrCtl",
-       "CacheErr",
-       "TagLo",
-       "TagHi",
-       "ErrorEPC",
-       "DESAVE"
-};
-
-int kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
-       int i, j;
-
-       printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
-       for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
-               for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
-                       if (vcpu->arch.cop0->stat[i][j])
-                               printk("%s[%d]: %lu\n", kvm_cop0_str[i], j,
-                                      vcpu->arch.cop0->stat[i][j]);
-               }
-       }
-#endif
-
-       return 0;
-}
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
deleted file mode 100644 (file)
index 8a5a700..0000000
+++ /dev/null
@@ -1,819 +0,0 @@
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
-* TLB handlers run from KSEG0
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-#include <linux/sched.h>
-#include <linux/smp.h>
-#include <linux/mm.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/kvm_host.h>
-#include <linux/srcu.h>
-
-
-#include <asm/cpu.h>
-#include <asm/bootinfo.h>
-#include <asm/mmu_context.h>
-#include <asm/pgtable.h>
-#include <asm/cacheflush.h>
-#include <asm/tlb.h>
-
-#undef CONFIG_MIPS_MT
-#include <asm/r4kcache.h>
-#define CONFIG_MIPS_MT
-
-#define KVM_GUEST_PC_TLB    0
-#define KVM_GUEST_SP_TLB    1
-
-#define PRIx64 "llx"
-
-atomic_t kvm_mips_instance;
-EXPORT_SYMBOL(kvm_mips_instance);
-
-/* These function pointers are initialized once the KVM module is loaded */
-pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
-EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
-
-void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
-EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
-
-bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
-EXPORT_SYMBOL(kvm_mips_is_error_pfn);
-
-uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
-}
-
-
-uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
-}
-
-inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
-{
-       return vcpu->kvm->arch.commpage_tlb;
-}
-
-
-/*
- * Structure defining an tlb entry data set.
- */
-
-void kvm_mips_dump_host_tlbs(void)
-{
-       unsigned long old_entryhi;
-       unsigned long old_pagemask;
-       struct kvm_mips_tlb tlb;
-       unsigned long flags;
-       int i;
-
-       local_irq_save(flags);
-
-       old_entryhi = read_c0_entryhi();
-       old_pagemask = read_c0_pagemask();
-
-       printk("HOST TLBs:\n");
-       printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
-
-       for (i = 0; i < current_cpu_data.tlbsize; i++) {
-               write_c0_index(i);
-               mtc0_tlbw_hazard();
-
-               tlb_read();
-               tlbw_use_hazard();
-
-               tlb.tlb_hi = read_c0_entryhi();
-               tlb.tlb_lo0 = read_c0_entrylo0();
-               tlb.tlb_lo1 = read_c0_entrylo1();
-               tlb.tlb_mask = read_c0_pagemask();
-
-               printk("TLB%c%3d Hi 0x%08lx ",
-                      (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
-                      i, tlb.tlb_hi);
-               printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
-                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
-                      (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
-                      (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
-                      (tlb.tlb_lo0 >> 3) & 7);
-               printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
-                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
-                      (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
-                      (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
-                      (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
-       }
-       write_c0_entryhi(old_entryhi);
-       write_c0_pagemask(old_pagemask);
-       mtc0_tlbw_hazard();
-       local_irq_restore(flags);
-}
-
-void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       struct kvm_mips_tlb tlb;
-       int i;
-
-       printk("Guest TLBs:\n");
-       printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
-
-       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
-               tlb = vcpu->arch.guest_tlb[i];
-               printk("TLB%c%3d Hi 0x%08lx ",
-                      (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
-                      i, tlb.tlb_hi);
-               printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
-                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
-                      (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
-                      (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
-                      (tlb.tlb_lo0 >> 3) & 7);
-               printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
-                      (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
-                      (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
-                      (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
-                      (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
-       }
-}
-
-static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
-{
-       int srcu_idx, err = 0;
-       pfn_t pfn;
-
-       if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
-               return 0;
-
-        srcu_idx = srcu_read_lock(&kvm->srcu);
-       pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
-
-       if (kvm_mips_is_error_pfn(pfn)) {
-               kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
-               err = -EFAULT;
-               goto out;
-       }
-
-       kvm->arch.guest_pmap[gfn] = pfn;
-out:
-       srcu_read_unlock(&kvm->srcu, srcu_idx);
-       return err;
-}
-
-/* Translate guest KSEG0 addresses to Host PA */
-unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
-       unsigned long gva)
-{
-       gfn_t gfn;
-       uint32_t offset = gva & ~PAGE_MASK;
-       struct kvm *kvm = vcpu->kvm;
-
-       if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
-               kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
-                       __builtin_return_address(0), gva);
-               return KVM_INVALID_PAGE;
-       }
-
-       gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
-
-       if (gfn >= kvm->arch.guest_pmap_npages) {
-               kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
-                       gva);
-               return KVM_INVALID_PAGE;
-       }
-
-       if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
-               return KVM_INVALID_ADDR;
-
-       return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
-}
-
-/* XXXKYMA: Must be called with interrupts disabled */
-/* set flush_dcache_mask == 0 if no dcache flush required */
-int
-kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
-       unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
-{
-       unsigned long flags;
-       unsigned long old_entryhi;
-       volatile int idx;
-
-       local_irq_save(flags);
-
-
-       old_entryhi = read_c0_entryhi();
-       write_c0_entryhi(entryhi);
-       mtc0_tlbw_hazard();
-
-       tlb_probe();
-       tlb_probe_hazard();
-       idx = read_c0_index();
-
-       if (idx > current_cpu_data.tlbsize) {
-               kvm_err("%s: Invalid Index: %d\n", __func__, idx);
-               kvm_mips_dump_host_tlbs();
-               return -1;
-       }
-
-       write_c0_entrylo0(entrylo0);
-       write_c0_entrylo1(entrylo1);
-       mtc0_tlbw_hazard();
-
-       if (idx < 0)
-               tlb_write_random();
-       else
-               tlb_write_indexed();
-       tlbw_use_hazard();
-
-       kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
-                 vcpu->arch.pc, idx, read_c0_entryhi(),
-                 read_c0_entrylo0(), read_c0_entrylo1());
-
-       /* Flush D-cache */
-       if (flush_dcache_mask) {
-               if (entrylo0 & MIPS3_PG_V) {
-                       ++vcpu->stat.flush_dcache_exits;
-                       flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
-               }
-               if (entrylo1 & MIPS3_PG_V) {
-                       ++vcpu->stat.flush_dcache_exits;
-                       flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
-                               (0x1 << PAGE_SHIFT));
-               }
-       }
-
-       /* Restore old ASID */
-       write_c0_entryhi(old_entryhi);
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-       local_irq_restore(flags);
-       return 0;
-}
-
-
-/* XXXKYMA: Must be called with interrupts disabled */
-int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
-       struct kvm_vcpu *vcpu)
-{
-       gfn_t gfn;
-       pfn_t pfn0, pfn1;
-       unsigned long vaddr = 0;
-       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
-       int even;
-       struct kvm *kvm = vcpu->kvm;
-       const int flush_dcache_mask = 0;
-
-
-       if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
-               kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
-               kvm_mips_dump_host_tlbs();
-               return -1;
-       }
-
-       gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
-       if (gfn >= kvm->arch.guest_pmap_npages) {
-               kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
-                       gfn, badvaddr);
-               kvm_mips_dump_host_tlbs();
-               return -1;
-       }
-       even = !(gfn & 0x1);
-       vaddr = badvaddr & (PAGE_MASK << 1);
-
-       if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
-               return -1;
-
-       if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
-               return -1;
-
-       if (even) {
-               pfn0 = kvm->arch.guest_pmap[gfn];
-               pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
-       } else {
-               pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
-               pfn1 = kvm->arch.guest_pmap[gfn];
-       }
-
-       entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
-       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
-                       (0x1 << 1);
-       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
-                       (0x1 << 1);
-
-       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
-                                      flush_dcache_mask);
-}
-
-int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
-       struct kvm_vcpu *vcpu)
-{
-       pfn_t pfn0, pfn1;
-       unsigned long flags, old_entryhi = 0, vaddr = 0;
-       unsigned long entrylo0 = 0, entrylo1 = 0;
-
-
-       pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
-       pfn1 = 0;
-       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
-                       (0x1 << 1);
-       entrylo1 = 0;
-
-       local_irq_save(flags);
-
-       old_entryhi = read_c0_entryhi();
-       vaddr = badvaddr & (PAGE_MASK << 1);
-       write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
-       mtc0_tlbw_hazard();
-       write_c0_entrylo0(entrylo0);
-       mtc0_tlbw_hazard();
-       write_c0_entrylo1(entrylo1);
-       mtc0_tlbw_hazard();
-       write_c0_index(kvm_mips_get_commpage_asid(vcpu));
-       mtc0_tlbw_hazard();
-       tlb_write_indexed();
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-
-       kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
-            vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
-            read_c0_entrylo0(), read_c0_entrylo1());
-
-       /* Restore old ASID */
-       write_c0_entryhi(old_entryhi);
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-       local_irq_restore(flags);
-
-       return 0;
-}
-
-int
-kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
-       struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
-{
-       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
-       struct kvm *kvm = vcpu->kvm;
-       pfn_t pfn0, pfn1;
-
-
-       if ((tlb->tlb_hi & VPN2_MASK) == 0) {
-               pfn0 = 0;
-               pfn1 = 0;
-       } else {
-               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
-                       return -1;
-
-               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
-                       return -1;
-
-               pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
-               pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
-       }
-
-       if (hpa0)
-               *hpa0 = pfn0 << PAGE_SHIFT;
-
-       if (hpa1)
-               *hpa1 = pfn1 << PAGE_SHIFT;
-
-       /* Get attributes from the Guest TLB */
-       entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
-                       kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
-       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
-                       (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
-       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
-                       (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
-
-       kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
-                 tlb->tlb_lo0, tlb->tlb_lo1);
-
-       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
-                                      tlb->tlb_mask);
-}
-
-int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
-{
-       int i;
-       int index = -1;
-       struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
-
-
-       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
-               if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
-                       (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
-                       index = i;
-                       break;
-               }
-       }
-
-       kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
-                 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
-
-       return index;
-}
-
-int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
-{
-       unsigned long old_entryhi, flags;
-       volatile int idx;
-
-
-       local_irq_save(flags);
-
-       old_entryhi = read_c0_entryhi();
-
-       if (KVM_GUEST_KERNEL_MODE(vcpu))
-               write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
-       else {
-               write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
-       }
-
-       mtc0_tlbw_hazard();
-
-       tlb_probe();
-       tlb_probe_hazard();
-       idx = read_c0_index();
-
-       /* Restore old ASID */
-       write_c0_entryhi(old_entryhi);
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-
-       local_irq_restore(flags);
-
-       kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
-
-       return idx;
-}
-
-int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
-{
-       int idx;
-       unsigned long flags, old_entryhi;
-
-       local_irq_save(flags);
-
-
-       old_entryhi = read_c0_entryhi();
-
-       write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
-       mtc0_tlbw_hazard();
-
-       tlb_probe();
-       tlb_probe_hazard();
-       idx = read_c0_index();
-
-       if (idx >= current_cpu_data.tlbsize)
-               BUG();
-
-       if (idx > 0) {
-               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
-               mtc0_tlbw_hazard();
-
-               write_c0_entrylo0(0);
-               mtc0_tlbw_hazard();
-
-               write_c0_entrylo1(0);
-               mtc0_tlbw_hazard();
-
-               tlb_write_indexed();
-               mtc0_tlbw_hazard();
-       }
-
-       write_c0_entryhi(old_entryhi);
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-
-       local_irq_restore(flags);
-
-       if (idx > 0)
-               kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
-                         (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
-
-       return 0;
-}
-
-/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
-int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
-{
-       unsigned long flags, old_entryhi;
-
-       if (index >= current_cpu_data.tlbsize)
-               BUG();
-
-       local_irq_save(flags);
-
-
-       old_entryhi = read_c0_entryhi();
-
-       write_c0_entryhi(UNIQUE_ENTRYHI(index));
-       mtc0_tlbw_hazard();
-
-       write_c0_index(index);
-       mtc0_tlbw_hazard();
-
-       write_c0_entrylo0(0);
-       mtc0_tlbw_hazard();
-
-       write_c0_entrylo1(0);
-       mtc0_tlbw_hazard();
-
-       tlb_write_indexed();
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-
-       write_c0_entryhi(old_entryhi);
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-
-       local_irq_restore(flags);
-
-       return 0;
-}
-
-void kvm_mips_flush_host_tlb(int skip_kseg0)
-{
-       unsigned long flags;
-       unsigned long old_entryhi, entryhi;
-       unsigned long old_pagemask;
-       int entry = 0;
-       int maxentry = current_cpu_data.tlbsize;
-
-
-       local_irq_save(flags);
-
-       old_entryhi = read_c0_entryhi();
-       old_pagemask = read_c0_pagemask();
-
-       /* Blast 'em all away. */
-       for (entry = 0; entry < maxentry; entry++) {
-
-               write_c0_index(entry);
-               mtc0_tlbw_hazard();
-
-               if (skip_kseg0) {
-                       tlb_read();
-                       tlbw_use_hazard();
-
-                       entryhi = read_c0_entryhi();
-
-                       /* Don't blow away guest kernel entries */
-                       if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
-                               continue;
-                       }
-               }
-
-               /* Make sure all entries differ. */
-               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
-               mtc0_tlbw_hazard();
-               write_c0_entrylo0(0);
-               mtc0_tlbw_hazard();
-               write_c0_entrylo1(0);
-               mtc0_tlbw_hazard();
-
-               tlb_write_indexed();
-               mtc0_tlbw_hazard();
-       }
-
-       tlbw_use_hazard();
-
-       write_c0_entryhi(old_entryhi);
-       write_c0_pagemask(old_pagemask);
-       mtc0_tlbw_hazard();
-       tlbw_use_hazard();
-
-       local_irq_restore(flags);
-}
-
-void
-kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
-                       struct kvm_vcpu *vcpu)
-{
-       unsigned long asid = asid_cache(cpu);
-
-       if (!((asid += ASID_INC) & ASID_MASK)) {
-               if (cpu_has_vtag_icache) {
-                       flush_icache_all();
-               }
-
-               kvm_local_flush_tlb_all();      /* start new asid cycle */
-
-               if (!asid)      /* fix version if needed */
-                       asid = ASID_FIRST_VERSION;
-       }
-
-       cpu_context(cpu, mm) = asid_cache(cpu) = asid;
-}
-
-void kvm_local_flush_tlb_all(void)
-{
-       unsigned long flags;
-       unsigned long old_ctx;
-       int entry = 0;
-
-       local_irq_save(flags);
-       /* Save old context and create impossible VPN2 value */
-       old_ctx = read_c0_entryhi();
-       write_c0_entrylo0(0);
-       write_c0_entrylo1(0);
-
-       /* Blast 'em all away. */
-       while (entry < current_cpu_data.tlbsize) {
-               /* Make sure all entries differ. */
-               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
-               write_c0_index(entry);
-               mtc0_tlbw_hazard();
-               tlb_write_indexed();
-               entry++;
-       }
-       tlbw_use_hazard();
-       write_c0_entryhi(old_ctx);
-       mtc0_tlbw_hazard();
-
-       local_irq_restore(flags);
-}
-
-/**
- * kvm_mips_migrate_count() - Migrate timer.
- * @vcpu:      Virtual CPU.
- *
- * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
- * if it was running prior to being cancelled.
- *
- * Must be called when the VCPU is migrated to a different CPU to ensure that
- * timer expiry during guest execution interrupts the guest and causes the
- * interrupt to be delivered in a timely manner.
- */
-static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
-{
-       if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
-               hrtimer_restart(&vcpu->arch.comparecount_timer);
-}
-
-/* Restore ASID once we are scheduled back after preemption */
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
-       unsigned long flags;
-       int newasid = 0;
-
-       kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
-
-       /* Alocate new kernel and user ASIDs if needed */
-
-       local_irq_save(flags);
-
-       if (((vcpu->arch.
-             guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
-               kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
-               vcpu->arch.guest_kernel_asid[cpu] =
-                   vcpu->arch.guest_kernel_mm.context.asid[cpu];
-               kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
-               vcpu->arch.guest_user_asid[cpu] =
-                   vcpu->arch.guest_user_mm.context.asid[cpu];
-               newasid++;
-
-               kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
-                         cpu_context(cpu, current->mm));
-               kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
-                         cpu, vcpu->arch.guest_kernel_asid[cpu]);
-               kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
-                         vcpu->arch.guest_user_asid[cpu]);
-       }
-
-       if (vcpu->arch.last_sched_cpu != cpu) {
-               kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
-                         vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
-               /*
-                * Migrate the timer interrupt to the current CPU so that it
-                * always interrupts the guest and synchronously triggers a
-                * guest timer interrupt.
-                */
-               kvm_mips_migrate_count(vcpu);
-       }
-
-       if (!newasid) {
-               /* If we preempted while the guest was executing, then reload the pre-empted ASID */
-               if (current->flags & PF_VCPU) {
-                       write_c0_entryhi(vcpu->arch.
-                                        preempt_entryhi & ASID_MASK);
-                       ehb();
-               }
-       } else {
-               /* New ASIDs were allocated for the VM */
-
-               /* Were we in guest context? If so then the pre-empted ASID is no longer
-                * valid, we need to set it to what it should be based on the mode of
-                * the Guest (Kernel/User)
-                */
-               if (current->flags & PF_VCPU) {
-                       if (KVM_GUEST_KERNEL_MODE(vcpu))
-                               write_c0_entryhi(vcpu->arch.
-                                                guest_kernel_asid[cpu] &
-                                                ASID_MASK);
-                       else
-                               write_c0_entryhi(vcpu->arch.
-                                                guest_user_asid[cpu] &
-                                                ASID_MASK);
-                       ehb();
-               }
-       }
-
-       local_irq_restore(flags);
-
-}
-
-/* ASID can change if another task is scheduled during preemption */
-void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
-{
-       unsigned long flags;
-       uint32_t cpu;
-
-       local_irq_save(flags);
-
-       cpu = smp_processor_id();
-
-
-       vcpu->arch.preempt_entryhi = read_c0_entryhi();
-       vcpu->arch.last_sched_cpu = cpu;
-
-       if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
-            ASID_VERSION_MASK)) {
-               kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
-                         cpu_context(cpu, current->mm));
-               drop_mmu_context(current->mm, cpu);
-       }
-       write_c0_entryhi(cpu_asid(cpu, current->mm));
-       ehb();
-
-       local_irq_restore(flags);
-}
-
-uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       unsigned long paddr, flags;
-       uint32_t inst;
-       int index;
-
-       if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
-           KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
-               local_irq_save(flags);
-               index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
-               if (index >= 0) {
-                       inst = *(opc);
-               } else {
-                       index =
-                           kvm_mips_guest_tlb_lookup(vcpu,
-                                                     ((unsigned long) opc & VPN2_MASK)
-                                                     |
-                                                     (kvm_read_c0_guest_entryhi
-                                                      (cop0) & ASID_MASK));
-                       if (index < 0) {
-                               kvm_err
-                                   ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
-                                    __func__, opc, vcpu, read_c0_entryhi());
-                               kvm_mips_dump_host_tlbs();
-                               local_irq_restore(flags);
-                               return KVM_INVALID_INST;
-                       }
-                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
-                                                            &vcpu->arch.
-                                                            guest_tlb[index],
-                                                            NULL, NULL);
-                       inst = *(opc);
-               }
-               local_irq_restore(flags);
-       } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
-               paddr =
-                   kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
-                                                        (unsigned long) opc);
-               inst = *(uint32_t *) CKSEG0ADDR(paddr);
-       } else {
-               kvm_err("%s: illegal address: %p\n", __func__, opc);
-               return KVM_INVALID_INST;
-       }
-
-       return inst;
-}
-
-EXPORT_SYMBOL(kvm_local_flush_tlb_all);
-EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
-EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
-EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
-EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
-EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
-EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
-EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
-EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
-EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
-EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
-EXPORT_SYMBOL(kvm_get_inst);
-EXPORT_SYMBOL(kvm_arch_vcpu_load);
-EXPORT_SYMBOL(kvm_arch_vcpu_put);
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
deleted file mode 100644 (file)
index 693f952..0000000
+++ /dev/null
@@ -1,494 +0,0 @@
-/*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
-
-#include <linux/errno.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/vmalloc.h>
-
-#include <linux/kvm_host.h>
-
-#include "kvm_mips_opcode.h"
-#include "kvm_mips_int.h"
-
-static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
-{
-       gpa_t gpa;
-       uint32_t kseg = KSEGX(gva);
-
-       if ((kseg == CKSEG0) || (kseg == CKSEG1))
-               gpa = CPHYSADDR(gva);
-       else {
-               printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
-               kvm_mips_dump_host_tlbs();
-               gpa = KVM_INVALID_ADDR;
-       }
-
-       kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
-
-       return gpa;
-}
-
-
-static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
-               er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
-       } else
-               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
-
-       switch (er) {
-       case EMULATE_DONE:
-               ret = RESUME_GUEST;
-               break;
-
-       case EMULATE_FAIL:
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-               break;
-
-       case EMULATE_WAIT:
-               run->exit_reason = KVM_EXIT_INTR;
-               ret = RESUME_HOST;
-               break;
-
-       default:
-               BUG();
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
-           || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
-               kvm_debug
-                   ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                    cause, opc, badvaddr);
-               er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
-
-               if (er == EMULATE_DONE)
-                       ret = RESUME_GUEST;
-               else {
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               }
-       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
-               /* XXXKYMA: The guest kernel does not expect to get this fault when we are not
-                * using HIGHMEM. Need to address this in a HIGHMEM kernel
-                */
-               printk
-                   ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                    cause, opc, badvaddr);
-               kvm_mips_dump_host_tlbs();
-               kvm_arch_vcpu_dump_regs(vcpu);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       } else {
-               printk
-                   ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                    cause, opc, badvaddr);
-               kvm_mips_dump_host_tlbs();
-               kvm_arch_vcpu_dump_regs(vcpu);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
-           && KVM_GUEST_KERNEL_MODE(vcpu)) {
-               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               }
-       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
-                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
-               kvm_debug
-                   ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                    cause, opc, badvaddr);
-               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
-               if (er == EMULATE_DONE)
-                       ret = RESUME_GUEST;
-               else {
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               }
-       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
-               /* All KSEG0 faults are handled by KVM, as the guest kernel does not
-                * expect to ever get them
-                */
-               if (kvm_mips_handle_kseg0_tlb_fault
-                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               }
-       } else {
-               kvm_err
-                   ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                    cause, opc, badvaddr);
-               kvm_mips_dump_host_tlbs();
-               kvm_arch_vcpu_dump_regs(vcpu);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
-           && KVM_GUEST_KERNEL_MODE(vcpu)) {
-               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               }
-       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
-                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
-               kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
-                         vcpu->arch.pc, badvaddr);
-
-               /* User Address (UA) fault, this could happen if
-                * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
-                *     case we pass on the fault to the guest kernel and let it handle it.
-                * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
-                *     case we inject the TLB from the Guest TLB into the shadow host TLB
-                */
-
-               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
-               if (er == EMULATE_DONE)
-                       ret = RESUME_GUEST;
-               else {
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               }
-       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
-               if (kvm_mips_handle_kseg0_tlb_fault
-                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               }
-       } else {
-               printk
-                   ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                    cause, opc, badvaddr);
-               kvm_mips_dump_host_tlbs();
-               kvm_arch_vcpu_dump_regs(vcpu);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       if (KVM_GUEST_KERNEL_MODE(vcpu)
-           && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
-               kvm_debug("Emulate Store to MMIO space\n");
-               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
-               if (er == EMULATE_FAIL) {
-                       printk("Emulate Store to MMIO space failed\n");
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               } else {
-                       run->exit_reason = KVM_EXIT_MMIO;
-                       ret = RESUME_HOST;
-               }
-       } else {
-               printk
-                   ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                    cause, opc, badvaddr);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
-               kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
-               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
-               if (er == EMULATE_FAIL) {
-                       printk("Emulate Load from MMIO space failed\n");
-                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-                       ret = RESUME_HOST;
-               } else {
-                       run->exit_reason = KVM_EXIT_MMIO;
-                       ret = RESUME_HOST;
-               }
-       } else {
-               printk
-                   ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
-                    cause, opc, badvaddr);
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-               er = EMULATE_FAIL;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
-       if (er == EMULATE_DONE)
-               ret = RESUME_GUEST;
-       else {
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       er = kvm_mips_handle_ri(cause, opc, run, vcpu);
-       if (er == EMULATE_DONE)
-               ret = RESUME_GUEST;
-       else {
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
-{
-       struct kvm_run *run = vcpu->run;
-       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
-       unsigned long cause = vcpu->arch.host_cp0_cause;
-       enum emulation_result er = EMULATE_DONE;
-       int ret = RESUME_GUEST;
-
-       er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
-       if (er == EMULATE_DONE)
-               ret = RESUME_GUEST;
-       else {
-               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
-               ret = RESUME_HOST;
-       }
-       return ret;
-}
-
-static int kvm_trap_emul_vm_init(struct kvm *kvm)
-{
-       return 0;
-}
-
-static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
-{
-       return 0;
-}
-
-static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       uint32_t config1;
-       int vcpu_id = vcpu->vcpu_id;
-
-       /* Arch specific stuff, set up config registers properly so that the
-        * guest will come up as expected, for now we simulate a
-        * MIPS 24kc
-        */
-       kvm_write_c0_guest_prid(cop0, 0x00019300);
-       kvm_write_c0_guest_config(cop0,
-                                 MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
-                                 (MMU_TYPE_R4000 << CP0C0_MT));
-
-       /* Read the cache characteristics from the host Config1 Register */
-       config1 = (read_c0_config1() & ~0x7f);
-
-       /* Set up MMU size */
-       config1 &= ~(0x3f << 25);
-       config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
-
-       /* We unset some bits that we aren't emulating */
-       config1 &=
-           ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
-             (1 << CP0C1_WR) | (1 << CP0C1_CA));
-       kvm_write_c0_guest_config1(cop0, config1);
-
-       kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
-       /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
-       kvm_write_c0_guest_config3(cop0,
-                                  MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 <<
-                                                                      CP0C3_ULRI));
-
-       /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
-       kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
-
-       /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */
-       kvm_write_c0_guest_intctl(cop0, 0xFC000000);
-
-       /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
-       kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
-
-       return 0;
-}
-
-static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
-                                    const struct kvm_one_reg *reg,
-                                    s64 *v)
-{
-       switch (reg->id) {
-       case KVM_REG_MIPS_CP0_COUNT:
-               *v = kvm_mips_read_count(vcpu);
-               break;
-       case KVM_REG_MIPS_COUNT_CTL:
-               *v = vcpu->arch.count_ctl;
-               break;
-       case KVM_REG_MIPS_COUNT_RESUME:
-               *v = ktime_to_ns(vcpu->arch.count_resume);
-               break;
-       case KVM_REG_MIPS_COUNT_HZ:
-               *v = vcpu->arch.count_hz;
-               break;
-       default:
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
-                                    const struct kvm_one_reg *reg,
-                                    s64 v)
-{
-       struct mips_coproc *cop0 = vcpu->arch.cop0;
-       int ret = 0;
-
-       switch (reg->id) {
-       case KVM_REG_MIPS_CP0_COUNT:
-               kvm_mips_write_count(vcpu, v);
-               break;
-       case KVM_REG_MIPS_CP0_COMPARE:
-               kvm_mips_write_compare(vcpu, v);
-               break;
-       case KVM_REG_MIPS_CP0_CAUSE:
-               /*
-                * If the timer is stopped or started (DC bit) it must look
-                * atomic with changes to the interrupt pending bits (TI, IRQ5).
-                * A timer interrupt should not happen in between.
-                */
-               if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
-                       if (v & CAUSEF_DC) {
-                               /* disable timer first */
-                               kvm_mips_count_disable_cause(vcpu);
-                               kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
-                       } else {
-                               /* enable timer last */
-                               kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
-                               kvm_mips_count_enable_cause(vcpu);
-                       }
-               } else {
-                       kvm_write_c0_guest_cause(cop0, v);
-               }
-               break;
-       case KVM_REG_MIPS_COUNT_CTL:
-               ret = kvm_mips_set_count_ctl(vcpu, v);
-               break;
-       case KVM_REG_MIPS_COUNT_RESUME:
-               ret = kvm_mips_set_count_resume(vcpu, v);
-               break;
-       case KVM_REG_MIPS_COUNT_HZ:
-               ret = kvm_mips_set_count_hz(vcpu, v);
-               break;
-       default:
-               return -EINVAL;
-       }
-       return ret;
-}
-
-static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
-       /* exit handlers */
-       .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
-       .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
-       .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
-       .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
-       .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
-       .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
-       .handle_syscall = kvm_trap_emul_handle_syscall,
-       .handle_res_inst = kvm_trap_emul_handle_res_inst,
-       .handle_break = kvm_trap_emul_handle_break,
-
-       .vm_init = kvm_trap_emul_vm_init,
-       .vcpu_init = kvm_trap_emul_vcpu_init,
-       .vcpu_setup = kvm_trap_emul_vcpu_setup,
-       .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
-       .queue_timer_int = kvm_mips_queue_timer_int_cb,
-       .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
-       .queue_io_int = kvm_mips_queue_io_int_cb,
-       .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
-       .irq_deliver = kvm_mips_irq_deliver_cb,
-       .irq_clear = kvm_mips_irq_clear_cb,
-       .get_one_reg = kvm_trap_emul_get_one_reg,
-       .set_one_reg = kvm_trap_emul_set_one_reg,
-};
-
-int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
-{
-       *install_callbacks = &kvm_trap_emul_callbacks;
-       return 0;
-}
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
new file mode 100644 (file)
index 0000000..d7279c0
--- /dev/null
@@ -0,0 +1,620 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Main entry point for the guest, exception handling.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asm-offsets.h>
+
+#define _C_LABEL(x)     x
+#define MIPSX(name)     mips32_ ## name
+#define CALLFRAME_SIZ   32
+
+/*
+ * VECTOR
+ *  exception vector entrypoint
+ */
+#define VECTOR(x, regmask)      \
+    .ent    _C_LABEL(x),0;      \
+    EXPORT(x);
+
+#define VECTOR_END(x)      \
+    EXPORT(x);
+
+/* Overload, Danger Will Robinson!! */
+#define PT_HOST_ASID        PT_BVADDR
+#define PT_HOST_USERLOCAL   PT_EPC
+
+#define CP0_DDATA_LO        $28,3
+#define CP0_EBASE           $15,1
+
+#define CP0_INTCTL          $12,1
+#define CP0_SRSCTL          $12,2
+#define CP0_SRSMAP          $12,3
+#define CP0_HWRENA          $7,0
+
+/* Resume Flags */
+#define RESUME_FLAG_HOST        (1<<1)  /* Resume host? */
+
+#define RESUME_GUEST            0
+#define RESUME_HOST             RESUME_FLAG_HOST
+
+/*
+ * __kvm_mips_vcpu_run: entry point to the guest
+ * a0: run
+ * a1: vcpu
+ */
+       .set    noreorder
+       .set    noat
+
+FEXPORT(__kvm_mips_vcpu_run)
+       /* k0/k1 not being used in host kernel context */
+       INT_ADDIU k1, sp, -PT_SIZE
+       LONG_S  $0, PT_R0(k1)
+       LONG_S  $1, PT_R1(k1)
+       LONG_S  $2, PT_R2(k1)
+       LONG_S  $3, PT_R3(k1)
+
+       LONG_S  $4, PT_R4(k1)
+       LONG_S  $5, PT_R5(k1)
+       LONG_S  $6, PT_R6(k1)
+       LONG_S  $7, PT_R7(k1)
+
+       LONG_S  $8,  PT_R8(k1)
+       LONG_S  $9,  PT_R9(k1)
+       LONG_S  $10, PT_R10(k1)
+       LONG_S  $11, PT_R11(k1)
+       LONG_S  $12, PT_R12(k1)
+       LONG_S  $13, PT_R13(k1)
+       LONG_S  $14, PT_R14(k1)
+       LONG_S  $15, PT_R15(k1)
+       LONG_S  $16, PT_R16(k1)
+       LONG_S  $17, PT_R17(k1)
+
+       LONG_S  $18, PT_R18(k1)
+       LONG_S  $19, PT_R19(k1)
+       LONG_S  $20, PT_R20(k1)
+       LONG_S  $21, PT_R21(k1)
+       LONG_S  $22, PT_R22(k1)
+       LONG_S  $23, PT_R23(k1)
+       LONG_S  $24, PT_R24(k1)
+       LONG_S  $25, PT_R25(k1)
+
+       /*
+        * XXXKYMA k0/k1 not saved, not being used if we got here through
+        * an ioctl()
+        */
+
+       LONG_S  $28, PT_R28(k1)
+       LONG_S  $29, PT_R29(k1)
+       LONG_S  $30, PT_R30(k1)
+       LONG_S  $31, PT_R31(k1)
+
+       /* Save hi/lo */
+       mflo    v0
+       LONG_S  v0, PT_LO(k1)
+       mfhi    v1
+       LONG_S  v1, PT_HI(k1)
+
+       /* Save host status */
+       mfc0    v0, CP0_STATUS
+       LONG_S  v0, PT_STATUS(k1)
+
+       /* Save host ASID, shove it into the BVADDR location */
+       mfc0    v1, CP0_ENTRYHI
+       andi    v1, 0xff
+       LONG_S  v1, PT_HOST_ASID(k1)
+
+       /* Save DDATA_LO, will be used to store pointer to vcpu */
+       mfc0    v1, CP0_DDATA_LO
+       LONG_S  v1, PT_HOST_USERLOCAL(k1)
+
+       /* DDATA_LO has pointer to vcpu */
+       mtc0    a1, CP0_DDATA_LO
+
+       /* Offset into vcpu->arch */
+       INT_ADDIU k1, a1, VCPU_HOST_ARCH
+
+       /*
+        * Save the host stack to VCPU, used for exception processing
+        * when we exit from the Guest
+        */
+       LONG_S  sp, VCPU_HOST_STACK(k1)
+
+       /* Save the kernel gp as well */
+       LONG_S  gp, VCPU_HOST_GP(k1)
+
+       /*
+        * Setup status register for running the guest in UM, interrupts
+        * are disabled
+        */
+       li      k0, (ST0_EXL | KSU_USER | ST0_BEV)
+       mtc0    k0, CP0_STATUS
+       ehb
+
+       /* load up the new EBASE */
+       LONG_L  k0, VCPU_GUEST_EBASE(k1)
+       mtc0    k0, CP0_EBASE
+
+       /*
+        * Now that the new EBASE has been loaded, unset BEV, set
+        * interrupt mask as it was but make sure that timer interrupts
+        * are enabled
+        */
+       li      k0, (ST0_EXL | KSU_USER | ST0_IE)
+       andi    v0, v0, ST0_IM
+       or      k0, k0, v0
+       mtc0    k0, CP0_STATUS
+       ehb
+
+       /* Set Guest EPC */
+       LONG_L  t0, VCPU_PC(k1)
+       mtc0    t0, CP0_EPC
+
+FEXPORT(__kvm_mips_load_asid)
+       /* Set the ASID for the Guest Kernel */
+       INT_SLL t0, t0, 1       /* with kseg0 @ 0x40000000, kernel */
+                               /* addresses shift to 0x80000000 */
+       bltz    t0, 1f          /* If kernel */
+        INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+       INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+       /* t1: contains the base of the ASID array, need to get the cpu id */
+       LONG_L  t2, TI_CPU($28)             /* smp_processor_id */
+       INT_SLL t2, t2, 2                   /* x4 */
+       REG_ADDU t3, t1, t2
+       LONG_L  k0, (t3)
+       andi    k0, k0, 0xff
+       mtc0    k0, CP0_ENTRYHI
+       ehb
+
+       /* Disable RDHWR access */
+       mtc0    zero, CP0_HWRENA
+
+       /* Now load up the Guest Context from VCPU */
+       LONG_L  $1, VCPU_R1(k1)
+       LONG_L  $2, VCPU_R2(k1)
+       LONG_L  $3, VCPU_R3(k1)
+
+       LONG_L  $4, VCPU_R4(k1)
+       LONG_L  $5, VCPU_R5(k1)
+       LONG_L  $6, VCPU_R6(k1)
+       LONG_L  $7, VCPU_R7(k1)
+
+       LONG_L  $8, VCPU_R8(k1)
+       LONG_L  $9, VCPU_R9(k1)
+       LONG_L  $10, VCPU_R10(k1)
+       LONG_L  $11, VCPU_R11(k1)
+       LONG_L  $12, VCPU_R12(k1)
+       LONG_L  $13, VCPU_R13(k1)
+       LONG_L  $14, VCPU_R14(k1)
+       LONG_L  $15, VCPU_R15(k1)
+       LONG_L  $16, VCPU_R16(k1)
+       LONG_L  $17, VCPU_R17(k1)
+       LONG_L  $18, VCPU_R18(k1)
+       LONG_L  $19, VCPU_R19(k1)
+       LONG_L  $20, VCPU_R20(k1)
+       LONG_L  $21, VCPU_R21(k1)
+       LONG_L  $22, VCPU_R22(k1)
+       LONG_L  $23, VCPU_R23(k1)
+       LONG_L  $24, VCPU_R24(k1)
+       LONG_L  $25, VCPU_R25(k1)
+
+       /* k0/k1 loaded up later */
+
+       LONG_L  $28, VCPU_R28(k1)
+       LONG_L  $29, VCPU_R29(k1)
+       LONG_L  $30, VCPU_R30(k1)
+       LONG_L  $31, VCPU_R31(k1)
+
+       /* Restore hi/lo */
+       LONG_L  k0, VCPU_LO(k1)
+       mtlo    k0
+
+       LONG_L  k0, VCPU_HI(k1)
+       mthi    k0
+
+FEXPORT(__kvm_mips_load_k0k1)
+       /* Restore the guest's k0/k1 registers */
+       LONG_L  k0, VCPU_R26(k1)
+       LONG_L  k1, VCPU_R27(k1)
+
+       /* Jump to guest */
+       eret
+
+VECTOR(MIPSX(exception), unknown)
+/* Find out what mode we came from and jump to the proper handler. */
+       mtc0    k0, CP0_ERROREPC        #01: Save guest k0
+       ehb                             #02:
+
+       mfc0    k0, CP0_EBASE           #02: Get EBASE
+       INT_SRL k0, k0, 10              #03: Get rid of CPUNum
+       INT_SLL k0, k0, 10              #04
+       LONG_S  k1, 0x3000(k0)          #05: Save k1 @ offset 0x3000
+       INT_ADDIU k0, k0, 0x2000        #06: Exception handler is
+                                       #    installed @ offset 0x2000
+       j       k0                      #07: jump to the function
+        nop                            #08: branch delay slot
+VECTOR_END(MIPSX(exceptionEnd))
+.end MIPSX(exception)
+
+/*
+ * Generic Guest exception handler. We end up here when the guest
+ * does something that causes a trap to kernel mode.
+ */
+NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
+       /* Get the VCPU pointer from DDTATA_LO */
+       mfc0    k1, CP0_DDATA_LO
+       INT_ADDIU k1, k1, VCPU_HOST_ARCH
+
+       /* Start saving Guest context to VCPU */
+       LONG_S  $0, VCPU_R0(k1)
+       LONG_S  $1, VCPU_R1(k1)
+       LONG_S  $2, VCPU_R2(k1)
+       LONG_S  $3, VCPU_R3(k1)
+       LONG_S  $4, VCPU_R4(k1)
+       LONG_S  $5, VCPU_R5(k1)
+       LONG_S  $6, VCPU_R6(k1)
+       LONG_S  $7, VCPU_R7(k1)
+       LONG_S  $8, VCPU_R8(k1)
+       LONG_S  $9, VCPU_R9(k1)
+       LONG_S  $10, VCPU_R10(k1)
+       LONG_S  $11, VCPU_R11(k1)
+       LONG_S  $12, VCPU_R12(k1)
+       LONG_S  $13, VCPU_R13(k1)
+       LONG_S  $14, VCPU_R14(k1)
+       LONG_S  $15, VCPU_R15(k1)
+       LONG_S  $16, VCPU_R16(k1)
+       LONG_S  $17, VCPU_R17(k1)
+       LONG_S  $18, VCPU_R18(k1)
+       LONG_S  $19, VCPU_R19(k1)
+       LONG_S  $20, VCPU_R20(k1)
+       LONG_S  $21, VCPU_R21(k1)
+       LONG_S  $22, VCPU_R22(k1)
+       LONG_S  $23, VCPU_R23(k1)
+       LONG_S  $24, VCPU_R24(k1)
+       LONG_S  $25, VCPU_R25(k1)
+
+       /* Guest k0/k1 saved later */
+
+       LONG_S  $28, VCPU_R28(k1)
+       LONG_S  $29, VCPU_R29(k1)
+       LONG_S  $30, VCPU_R30(k1)
+       LONG_S  $31, VCPU_R31(k1)
+
+       /* We need to save hi/lo and restore them on the way out */
+       mfhi    t0
+       LONG_S  t0, VCPU_HI(k1)
+
+       mflo    t0
+       LONG_S  t0, VCPU_LO(k1)
+
+       /* Finally save guest k0/k1 to VCPU */
+       mfc0    t0, CP0_ERROREPC
+       LONG_S  t0, VCPU_R26(k1)
+
+       /* Get GUEST k1 and save it in VCPU */
+       PTR_LI  t1, ~0x2ff
+       mfc0    t0, CP0_EBASE
+       and     t0, t0, t1
+       LONG_L  t0, 0x3000(t0)
+       LONG_S  t0, VCPU_R27(k1)
+
+       /* Now that context has been saved, we can use other registers */
+
+       /* Restore vcpu */
+       mfc0    a1, CP0_DDATA_LO
+       move    s1, a1
+
+       /* Restore run (vcpu->run) */
+       LONG_L  a0, VCPU_RUN(a1)
+       /* Save pointer to run in s0, will be saved by the compiler */
+       move    s0, a0
+
+       /*
+        * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
+        * process the exception
+        */
+       mfc0    k0,CP0_EPC
+       LONG_S  k0, VCPU_PC(k1)
+
+       mfc0    k0, CP0_BADVADDR
+       LONG_S  k0, VCPU_HOST_CP0_BADVADDR(k1)
+
+       mfc0    k0, CP0_CAUSE
+       LONG_S  k0, VCPU_HOST_CP0_CAUSE(k1)
+
+       mfc0    k0, CP0_ENTRYHI
+       LONG_S  k0, VCPU_HOST_ENTRYHI(k1)
+
+       /* Now restore the host state just enough to run the handlers */
+
+       /* Swtich EBASE to the one used by Linux */
+       /* load up the host EBASE */
+       mfc0    v0, CP0_STATUS
+
+       .set    at
+       or      k0, v0, ST0_BEV
+       .set    noat
+
+       mtc0    k0, CP0_STATUS
+       ehb
+
+       LONG_L  k0, VCPU_HOST_EBASE(k1)
+       mtc0    k0,CP0_EBASE
+
+       /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+       .set    at
+       and     v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
+       or      v0, v0, ST0_CU0
+       .set    noat
+       mtc0    v0, CP0_STATUS
+       ehb
+
+       /* Load up host GP */
+       LONG_L  gp, VCPU_HOST_GP(k1)
+
+       /* Need a stack before we can jump to "C" */
+       LONG_L  sp, VCPU_HOST_STACK(k1)
+
+       /* Saved host state */
+       INT_ADDIU sp, sp, -PT_SIZE
+
+       /*
+        * XXXKYMA do we need to load the host ASID, maybe not because the
+        * kernel entries are marked GLOBAL, need to verify
+        */
+
+       /* Restore host DDATA_LO */
+       LONG_L  k0, PT_HOST_USERLOCAL(sp)
+       mtc0    k0, CP0_DDATA_LO
+
+       /* Restore RDHWR access */
+       PTR_LI  k0, 0x2000000F
+       mtc0    k0, CP0_HWRENA
+
+       /* Jump to handler */
+FEXPORT(__kvm_mips_jump_to_handler)
+       /*
+        * XXXKYMA: not sure if this is safe, how large is the stack??
+        * Now jump to the kvm_mips_handle_exit() to see if we can deal
+        * with this in the kernel
+        */
+       PTR_LA  t9, kvm_mips_handle_exit
+       jalr.hb t9
+        INT_ADDIU sp, sp, -CALLFRAME_SIZ           /* BD Slot */
+
+       /* Return from handler Make sure interrupts are disabled */
+       di
+       ehb
+
+       /*
+        * XXXKYMA: k0/k1 could have been blown away if we processed
+        * an exception while we were handling the exception from the
+        * guest, reload k1
+        */
+
+       move    k1, s1
+       INT_ADDIU k1, k1, VCPU_HOST_ARCH
+
+       /*
+        * Check return value, should tell us if we are returning to the
+        * host (handle I/O etc)or resuming the guest
+        */
+       andi    t0, v0, RESUME_HOST
+       bnez    t0, __kvm_mips_return_to_host
+        nop
+
+__kvm_mips_return_to_guest:
+       /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
+       mtc0    s1, CP0_DDATA_LO
+
+       /* Load up the Guest EBASE to minimize the window where BEV is set */
+       LONG_L  t0, VCPU_GUEST_EBASE(k1)
+
+       /* Switch EBASE back to the one used by KVM */
+       mfc0    v1, CP0_STATUS
+       .set    at
+       or      k0, v1, ST0_BEV
+       .set    noat
+       mtc0    k0, CP0_STATUS
+       ehb
+       mtc0    t0, CP0_EBASE
+
+       /* Setup status register for running guest in UM */
+       .set    at
+       or      v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
+       and     v1, v1, ~ST0_CU0
+       .set    noat
+       mtc0    v1, CP0_STATUS
+       ehb
+
+       /* Set Guest EPC */
+       LONG_L  t0, VCPU_PC(k1)
+       mtc0    t0, CP0_EPC
+
+       /* Set the ASID for the Guest Kernel */
+       INT_SLL t0, t0, 1       /* with kseg0 @ 0x40000000, kernel */
+                               /* addresses shift to 0x80000000 */
+       bltz    t0, 1f          /* If kernel */
+        INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID  /* (BD)  */
+       INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID    /* else user */
+1:
+       /* t1: contains the base of the ASID array, need to get the cpu id  */
+       LONG_L  t2, TI_CPU($28)         /* smp_processor_id */
+       INT_SLL t2, t2, 2               /* x4 */
+       REG_ADDU t3, t1, t2
+       LONG_L  k0, (t3)
+       andi    k0, k0, 0xff
+       mtc0    k0,CP0_ENTRYHI
+       ehb
+
+       /* Disable RDHWR access */
+       mtc0    zero,  CP0_HWRENA
+
+       /* load the guest context from VCPU and return */
+       LONG_L  $0, VCPU_R0(k1)
+       LONG_L  $1, VCPU_R1(k1)
+       LONG_L  $2, VCPU_R2(k1)
+       LONG_L  $3, VCPU_R3(k1)
+       LONG_L  $4, VCPU_R4(k1)
+       LONG_L  $5, VCPU_R5(k1)
+       LONG_L  $6, VCPU_R6(k1)
+       LONG_L  $7, VCPU_R7(k1)
+       LONG_L  $8, VCPU_R8(k1)
+       LONG_L  $9, VCPU_R9(k1)
+       LONG_L  $10, VCPU_R10(k1)
+       LONG_L  $11, VCPU_R11(k1)
+       LONG_L  $12, VCPU_R12(k1)
+       LONG_L  $13, VCPU_R13(k1)
+       LONG_L  $14, VCPU_R14(k1)
+       LONG_L  $15, VCPU_R15(k1)
+       LONG_L  $16, VCPU_R16(k1)
+       LONG_L  $17, VCPU_R17(k1)
+       LONG_L  $18, VCPU_R18(k1)
+       LONG_L  $19, VCPU_R19(k1)
+       LONG_L  $20, VCPU_R20(k1)
+       LONG_L  $21, VCPU_R21(k1)
+       LONG_L  $22, VCPU_R22(k1)
+       LONG_L  $23, VCPU_R23(k1)
+       LONG_L  $24, VCPU_R24(k1)
+       LONG_L  $25, VCPU_R25(k1)
+
+       /* $/k1 loaded later */
+       LONG_L  $28, VCPU_R28(k1)
+       LONG_L  $29, VCPU_R29(k1)
+       LONG_L  $30, VCPU_R30(k1)
+       LONG_L  $31, VCPU_R31(k1)
+
+FEXPORT(__kvm_mips_skip_guest_restore)
+       LONG_L  k0, VCPU_HI(k1)
+       mthi    k0
+
+       LONG_L  k0, VCPU_LO(k1)
+       mtlo    k0
+
+       LONG_L  k0, VCPU_R26(k1)
+       LONG_L  k1, VCPU_R27(k1)
+
+       eret
+
+__kvm_mips_return_to_host:
+       /* EBASE is already pointing to Linux */
+       LONG_L  k1, VCPU_HOST_STACK(k1)
+       INT_ADDIU k1,k1, -PT_SIZE
+
+       /* Restore host DDATA_LO */
+       LONG_L  k0, PT_HOST_USERLOCAL(k1)
+       mtc0    k0, CP0_DDATA_LO
+
+       /* Restore host ASID */
+       LONG_L  k0, PT_HOST_ASID(sp)
+       andi    k0, 0xff
+       mtc0    k0,CP0_ENTRYHI
+       ehb
+
+       /* Load context saved on the host stack */
+       LONG_L  $0, PT_R0(k1)
+       LONG_L  $1, PT_R1(k1)
+
+       /*
+        * r2/v0 is the return code, shift it down by 2 (arithmetic)
+        * to recover the err code
+        */
+       INT_SRA k0, v0, 2
+       move    $2, k0
+
+       LONG_L  $3, PT_R3(k1)
+       LONG_L  $4, PT_R4(k1)
+       LONG_L  $5, PT_R5(k1)
+       LONG_L  $6, PT_R6(k1)
+       LONG_L  $7, PT_R7(k1)
+       LONG_L  $8, PT_R8(k1)
+       LONG_L  $9, PT_R9(k1)
+       LONG_L  $10, PT_R10(k1)
+       LONG_L  $11, PT_R11(k1)
+       LONG_L  $12, PT_R12(k1)
+       LONG_L  $13, PT_R13(k1)
+       LONG_L  $14, PT_R14(k1)
+       LONG_L  $15, PT_R15(k1)
+       LONG_L  $16, PT_R16(k1)
+       LONG_L  $17, PT_R17(k1)
+       LONG_L  $18, PT_R18(k1)
+       LONG_L  $19, PT_R19(k1)
+       LONG_L  $20, PT_R20(k1)
+       LONG_L  $21, PT_R21(k1)
+       LONG_L  $22, PT_R22(k1)
+       LONG_L  $23, PT_R23(k1)
+       LONG_L  $24, PT_R24(k1)
+       LONG_L  $25, PT_R25(k1)
+
+       /* Host k0/k1 were not saved */
+
+       LONG_L  $28, PT_R28(k1)
+       LONG_L  $29, PT_R29(k1)
+       LONG_L  $30, PT_R30(k1)
+
+       LONG_L  k0, PT_HI(k1)
+       mthi    k0
+
+       LONG_L  k0, PT_LO(k1)
+       mtlo    k0
+
+       /* Restore RDHWR access */
+       PTR_LI  k0, 0x2000000F
+       mtc0    k0,  CP0_HWRENA
+
+       /* Restore RA, which is the address we will return to */
+       LONG_L  ra, PT_R31(k1)
+       j       ra
+        nop
+
+VECTOR_END(MIPSX(GuestExceptionEnd))
+.end MIPSX(GuestException)
+
+MIPSX(exceptions):
+       ####
+       ##### The exception handlers.
+       #####
+       .word _C_LABEL(MIPSX(GuestException))   #  0
+       .word _C_LABEL(MIPSX(GuestException))   #  1
+       .word _C_LABEL(MIPSX(GuestException))   #  2
+       .word _C_LABEL(MIPSX(GuestException))   #  3
+       .word _C_LABEL(MIPSX(GuestException))   #  4
+       .word _C_LABEL(MIPSX(GuestException))   #  5
+       .word _C_LABEL(MIPSX(GuestException))   #  6
+       .word _C_LABEL(MIPSX(GuestException))   #  7
+       .word _C_LABEL(MIPSX(GuestException))   #  8
+       .word _C_LABEL(MIPSX(GuestException))   #  9
+       .word _C_LABEL(MIPSX(GuestException))   # 10
+       .word _C_LABEL(MIPSX(GuestException))   # 11
+       .word _C_LABEL(MIPSX(GuestException))   # 12
+       .word _C_LABEL(MIPSX(GuestException))   # 13
+       .word _C_LABEL(MIPSX(GuestException))   # 14
+       .word _C_LABEL(MIPSX(GuestException))   # 15
+       .word _C_LABEL(MIPSX(GuestException))   # 16
+       .word _C_LABEL(MIPSX(GuestException))   # 17
+       .word _C_LABEL(MIPSX(GuestException))   # 18
+       .word _C_LABEL(MIPSX(GuestException))   # 19
+       .word _C_LABEL(MIPSX(GuestException))   # 20
+       .word _C_LABEL(MIPSX(GuestException))   # 21
+       .word _C_LABEL(MIPSX(GuestException))   # 22
+       .word _C_LABEL(MIPSX(GuestException))   # 23
+       .word _C_LABEL(MIPSX(GuestException))   # 24
+       .word _C_LABEL(MIPSX(GuestException))   # 25
+       .word _C_LABEL(MIPSX(GuestException))   # 26
+       .word _C_LABEL(MIPSX(GuestException))   # 27
+       .word _C_LABEL(MIPSX(GuestException))   # 28
+       .word _C_LABEL(MIPSX(GuestException))   # 29
+       .word _C_LABEL(MIPSX(GuestException))   # 30
+       .word _C_LABEL(MIPSX(GuestException))   # 31
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
new file mode 100644 (file)
index 0000000..4fda672
--- /dev/null
@@ -0,0 +1,1219 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: MIPS specific KVM APIs
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/bootmem.h>
+#include <asm/page.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+#include <linux/kvm_host.h>
+
+#include "interrupt.h"
+#include "commpage.h"
+
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#ifndef VECTORSPACING
+#define VECTORSPACING 0x100    /* for EI/VI mode */
+#endif
+
+#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x)
+struct kvm_stats_debugfs_item debugfs_entries[] = {
+       { "wait",         VCPU_STAT(wait_exits),         KVM_STAT_VCPU },
+       { "cache",        VCPU_STAT(cache_exits),        KVM_STAT_VCPU },
+       { "signal",       VCPU_STAT(signal_exits),       KVM_STAT_VCPU },
+       { "interrupt",    VCPU_STAT(int_exits),          KVM_STAT_VCPU },
+       { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU },
+       { "tlbmod",       VCPU_STAT(tlbmod_exits),       KVM_STAT_VCPU },
+       { "tlbmiss_ld",   VCPU_STAT(tlbmiss_ld_exits),   KVM_STAT_VCPU },
+       { "tlbmiss_st",   VCPU_STAT(tlbmiss_st_exits),   KVM_STAT_VCPU },
+       { "addrerr_st",   VCPU_STAT(addrerr_st_exits),   KVM_STAT_VCPU },
+       { "addrerr_ld",   VCPU_STAT(addrerr_ld_exits),   KVM_STAT_VCPU },
+       { "syscall",      VCPU_STAT(syscall_exits),      KVM_STAT_VCPU },
+       { "resvd_inst",   VCPU_STAT(resvd_inst_exits),   KVM_STAT_VCPU },
+       { "break_inst",   VCPU_STAT(break_inst_exits),   KVM_STAT_VCPU },
+       { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
+       { "halt_wakeup",  VCPU_STAT(halt_wakeup),        KVM_STAT_VCPU },
+       {NULL}
+};
+
+static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu)
+{
+       int i;
+
+       for_each_possible_cpu(i) {
+               vcpu->arch.guest_kernel_asid[i] = 0;
+               vcpu->arch.guest_user_asid[i] = 0;
+       }
+
+       return 0;
+}
+
+/*
+ * XXXKYMA: We are simulatoring a processor that has the WII bit set in
+ * Config7, so we are "runnable" if interrupts are pending
+ */
+int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
+{
+       return !!(vcpu->arch.pending_exceptions);
+}
+
+int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
+{
+       return 1;
+}
+
+int kvm_arch_hardware_enable(void *garbage)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_disable(void *garbage)
+{
+}
+
+int kvm_arch_hardware_setup(void)
+{
+       return 0;
+}
+
+void kvm_arch_hardware_unsetup(void)
+{
+}
+
+void kvm_arch_check_processor_compat(void *rtn)
+{
+       *(int *)rtn = 0;
+}
+
+static void kvm_mips_init_tlbs(struct kvm *kvm)
+{
+       unsigned long wired;
+
+       /*
+        * Add a wired entry to the TLB, it is used to map the commpage to
+        * the Guest kernel
+        */
+       wired = read_c0_wired();
+       write_c0_wired(wired + 1);
+       mtc0_tlbw_hazard();
+       kvm->arch.commpage_tlb = wired;
+
+       kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(),
+                 kvm->arch.commpage_tlb);
+}
+
+static void kvm_mips_init_vm_percpu(void *arg)
+{
+       struct kvm *kvm = (struct kvm *)arg;
+
+       kvm_mips_init_tlbs(kvm);
+       kvm_mips_callbacks->vm_init(kvm);
+
+}
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+       if (atomic_inc_return(&kvm_mips_instance) == 1) {
+               kvm_debug("%s: 1st KVM instance, setup host TLB parameters\n",
+                         __func__);
+               on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1);
+       }
+
+       return 0;
+}
+
+void kvm_mips_free_vcpus(struct kvm *kvm)
+{
+       unsigned int i;
+       struct kvm_vcpu *vcpu;
+
+       /* Put the pages we reserved for the guest pmap */
+       for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
+               if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
+                       kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+       }
+       kfree(kvm->arch.guest_pmap);
+
+       kvm_for_each_vcpu(i, vcpu, kvm) {
+               kvm_arch_vcpu_free(vcpu);
+       }
+
+       mutex_lock(&kvm->lock);
+
+       for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
+               kvm->vcpus[i] = NULL;
+
+       atomic_set(&kvm->online_vcpus, 0);
+
+       mutex_unlock(&kvm->lock);
+}
+
+void kvm_arch_sync_events(struct kvm *kvm)
+{
+}
+
+static void kvm_mips_uninit_tlbs(void *arg)
+{
+       /* Restore wired count */
+       write_c0_wired(0);
+       mtc0_tlbw_hazard();
+       /* Clear out all the TLBs */
+       kvm_local_flush_tlb_all();
+}
+
+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+       kvm_mips_free_vcpus(kvm);
+
+       /* If this is the last instance, restore wired count */
+       if (atomic_dec_return(&kvm_mips_instance) == 0) {
+               kvm_debug("%s: last KVM instance, restoring TLB parameters\n",
+                         __func__);
+               on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1);
+       }
+}
+
+long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
+                       unsigned long arg)
+{
+       return -ENOIOCTLCMD;
+}
+
+void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
+                          struct kvm_memory_slot *dont)
+{
+}
+
+int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+                           unsigned long npages)
+{
+       return 0;
+}
+
+void kvm_arch_memslots_updated(struct kvm *kvm)
+{
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+                                  struct kvm_memory_slot *memslot,
+                                  struct kvm_userspace_memory_region *mem,
+                                  enum kvm_mr_change change)
+{
+       return 0;
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+                                  struct kvm_userspace_memory_region *mem,
+                                  const struct kvm_memory_slot *old,
+                                  enum kvm_mr_change change)
+{
+       unsigned long npages = 0;
+       int i;
+
+       kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n",
+                 __func__, kvm, mem->slot, mem->guest_phys_addr,
+                 mem->memory_size, mem->userspace_addr);
+
+       /* Setup Guest PMAP table */
+       if (!kvm->arch.guest_pmap) {
+               if (mem->slot == 0)
+                       npages = mem->memory_size >> PAGE_SHIFT;
+
+               if (npages) {
+                       kvm->arch.guest_pmap_npages = npages;
+                       kvm->arch.guest_pmap =
+                           kzalloc(npages * sizeof(unsigned long), GFP_KERNEL);
+
+                       if (!kvm->arch.guest_pmap) {
+                               kvm_err("Failed to allocate guest PMAP");
+                               return;
+                       }
+
+                       kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n",
+                                 npages, kvm->arch.guest_pmap);
+
+                       /* Now setup the page table */
+                       for (i = 0; i < npages; i++)
+                               kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE;
+               }
+       }
+}
+
+void kvm_arch_flush_shadow_all(struct kvm *kvm)
+{
+}
+
+void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
+                                  struct kvm_memory_slot *slot)
+{
+}
+
+void kvm_arch_flush_shadow(struct kvm *kvm)
+{
+}
+
+struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
+{
+       int err, size, offset;
+       void *gebase;
+       int i;
+
+       struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
+
+       if (!vcpu) {
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = kvm_vcpu_init(vcpu, kvm, id);
+
+       if (err)
+               goto out_free_cpu;
+
+       kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu);
+
+       /*
+        * Allocate space for host mode exception handlers that handle
+        * guest mode exits
+        */
+       if (cpu_has_veic || cpu_has_vint)
+               size = 0x200 + VECTORSPACING * 64;
+       else
+               size = 0x4000;
+
+       /* Save Linux EBASE */
+       vcpu->arch.host_ebase = (void *)read_c0_ebase();
+
+       gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
+
+       if (!gebase) {
+               err = -ENOMEM;
+               goto out_free_cpu;
+       }
+       kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
+                 ALIGN(size, PAGE_SIZE), gebase);
+
+       /* Save new ebase */
+       vcpu->arch.guest_ebase = gebase;
+
+       /* Copy L1 Guest Exception handler to correct offset */
+
+       /* TLB Refill, EXL = 0 */
+       memcpy(gebase, mips32_exception,
+              mips32_exceptionEnd - mips32_exception);
+
+       /* General Exception Entry point */
+       memcpy(gebase + 0x180, mips32_exception,
+              mips32_exceptionEnd - mips32_exception);
+
+       /* For vectored interrupts poke the exception code @ all offsets 0-7 */
+       for (i = 0; i < 8; i++) {
+               kvm_debug("L1 Vectored handler @ %p\n",
+                         gebase + 0x200 + (i * VECTORSPACING));
+               memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
+                      mips32_exceptionEnd - mips32_exception);
+       }
+
+       /* General handler, relocate to unmapped space for sanity's sake */
+       offset = 0x2000;
+       kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
+                 gebase + offset,
+                 mips32_GuestExceptionEnd - mips32_GuestException);
+
+       memcpy(gebase + offset, mips32_GuestException,
+              mips32_GuestExceptionEnd - mips32_GuestException);
+
+       /* Invalidate the icache for these ranges */
+       local_flush_icache_range((unsigned long)gebase,
+                               (unsigned long)gebase + ALIGN(size, PAGE_SIZE));
+
+       /*
+        * Allocate comm page for guest kernel, a TLB will be reserved for
+        * mapping GVA @ 0xFFFF8000 to this page
+        */
+       vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
+
+       if (!vcpu->arch.kseg0_commpage) {
+               err = -ENOMEM;
+               goto out_free_gebase;
+       }
+
+       kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage);
+       kvm_mips_commpage_init(vcpu);
+
+       /* Init */
+       vcpu->arch.last_sched_cpu = -1;
+
+       /* Start off the timer */
+       kvm_mips_init_count(vcpu);
+
+       return vcpu;
+
+out_free_gebase:
+       kfree(gebase);
+
+out_free_cpu:
+       kfree(vcpu);
+
+out:
+       return ERR_PTR(err);
+}
+
+void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+{
+       hrtimer_cancel(&vcpu->arch.comparecount_timer);
+
+       kvm_vcpu_uninit(vcpu);
+
+       kvm_mips_dump_stats(vcpu);
+
+       kfree(vcpu->arch.guest_ebase);
+       kfree(vcpu->arch.kseg0_commpage);
+       kfree(vcpu);
+}
+
+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+       kvm_arch_vcpu_free(vcpu);
+}
+
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+                                       struct kvm_guest_debug *dbg)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       int r = 0;
+       sigset_t sigsaved;
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+
+       if (vcpu->mmio_needed) {
+               if (!vcpu->mmio_is_write)
+                       kvm_mips_complete_mmio_load(vcpu, run);
+               vcpu->mmio_needed = 0;
+       }
+
+       local_irq_disable();
+       /* Check if we have any exceptions/interrupts pending */
+       kvm_mips_deliver_interrupts(vcpu,
+                                   kvm_read_c0_guest_cause(vcpu->arch.cop0));
+
+       kvm_guest_enter();
+
+       r = __kvm_mips_vcpu_run(run, vcpu);
+
+       kvm_guest_exit();
+       local_irq_enable();
+
+       if (vcpu->sigset_active)
+               sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+
+       return r;
+}
+
+int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
+                            struct kvm_mips_interrupt *irq)
+{
+       int intr = (int)irq->irq;
+       struct kvm_vcpu *dvcpu = NULL;
+
+       if (intr == 3 || intr == -3 || intr == 4 || intr == -4)
+               kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu,
+                         (int)intr);
+
+       if (irq->cpu == -1)
+               dvcpu = vcpu;
+       else
+               dvcpu = vcpu->kvm->vcpus[irq->cpu];
+
+       if (intr == 2 || intr == 3 || intr == 4) {
+               kvm_mips_callbacks->queue_io_int(dvcpu, irq);
+
+       } else if (intr == -2 || intr == -3 || intr == -4) {
+               kvm_mips_callbacks->dequeue_io_int(dvcpu, irq);
+       } else {
+               kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__,
+                       irq->cpu, irq->irq);
+               return -EINVAL;
+       }
+
+       dvcpu->arch.wait = 0;
+
+       if (waitqueue_active(&dvcpu->wq))
+               wake_up_interruptible(&dvcpu->wq);
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
+                                   struct kvm_mp_state *mp_state)
+{
+       return -ENOIOCTLCMD;
+}
+
+static u64 kvm_mips_get_one_regs[] = {
+       KVM_REG_MIPS_R0,
+       KVM_REG_MIPS_R1,
+       KVM_REG_MIPS_R2,
+       KVM_REG_MIPS_R3,
+       KVM_REG_MIPS_R4,
+       KVM_REG_MIPS_R5,
+       KVM_REG_MIPS_R6,
+       KVM_REG_MIPS_R7,
+       KVM_REG_MIPS_R8,
+       KVM_REG_MIPS_R9,
+       KVM_REG_MIPS_R10,
+       KVM_REG_MIPS_R11,
+       KVM_REG_MIPS_R12,
+       KVM_REG_MIPS_R13,
+       KVM_REG_MIPS_R14,
+       KVM_REG_MIPS_R15,
+       KVM_REG_MIPS_R16,
+       KVM_REG_MIPS_R17,
+       KVM_REG_MIPS_R18,
+       KVM_REG_MIPS_R19,
+       KVM_REG_MIPS_R20,
+       KVM_REG_MIPS_R21,
+       KVM_REG_MIPS_R22,
+       KVM_REG_MIPS_R23,
+       KVM_REG_MIPS_R24,
+       KVM_REG_MIPS_R25,
+       KVM_REG_MIPS_R26,
+       KVM_REG_MIPS_R27,
+       KVM_REG_MIPS_R28,
+       KVM_REG_MIPS_R29,
+       KVM_REG_MIPS_R30,
+       KVM_REG_MIPS_R31,
+
+       KVM_REG_MIPS_HI,
+       KVM_REG_MIPS_LO,
+       KVM_REG_MIPS_PC,
+
+       KVM_REG_MIPS_CP0_INDEX,
+       KVM_REG_MIPS_CP0_CONTEXT,
+       KVM_REG_MIPS_CP0_USERLOCAL,
+       KVM_REG_MIPS_CP0_PAGEMASK,
+       KVM_REG_MIPS_CP0_WIRED,
+       KVM_REG_MIPS_CP0_HWRENA,
+       KVM_REG_MIPS_CP0_BADVADDR,
+       KVM_REG_MIPS_CP0_COUNT,
+       KVM_REG_MIPS_CP0_ENTRYHI,
+       KVM_REG_MIPS_CP0_COMPARE,
+       KVM_REG_MIPS_CP0_STATUS,
+       KVM_REG_MIPS_CP0_CAUSE,
+       KVM_REG_MIPS_CP0_EPC,
+       KVM_REG_MIPS_CP0_CONFIG,
+       KVM_REG_MIPS_CP0_CONFIG1,
+       KVM_REG_MIPS_CP0_CONFIG2,
+       KVM_REG_MIPS_CP0_CONFIG3,
+       KVM_REG_MIPS_CP0_CONFIG7,
+       KVM_REG_MIPS_CP0_ERROREPC,
+
+       KVM_REG_MIPS_COUNT_CTL,
+       KVM_REG_MIPS_COUNT_RESUME,
+       KVM_REG_MIPS_COUNT_HZ,
+};
+
+static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
+                           const struct kvm_one_reg *reg)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       int ret;
+       s64 v;
+
+       switch (reg->id) {
+       case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
+               v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
+               break;
+       case KVM_REG_MIPS_HI:
+               v = (long)vcpu->arch.hi;
+               break;
+       case KVM_REG_MIPS_LO:
+               v = (long)vcpu->arch.lo;
+               break;
+       case KVM_REG_MIPS_PC:
+               v = (long)vcpu->arch.pc;
+               break;
+
+       case KVM_REG_MIPS_CP0_INDEX:
+               v = (long)kvm_read_c0_guest_index(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONTEXT:
+               v = (long)kvm_read_c0_guest_context(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_USERLOCAL:
+               v = (long)kvm_read_c0_guest_userlocal(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_PAGEMASK:
+               v = (long)kvm_read_c0_guest_pagemask(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_WIRED:
+               v = (long)kvm_read_c0_guest_wired(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_HWRENA:
+               v = (long)kvm_read_c0_guest_hwrena(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_BADVADDR:
+               v = (long)kvm_read_c0_guest_badvaddr(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_ENTRYHI:
+               v = (long)kvm_read_c0_guest_entryhi(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_COMPARE:
+               v = (long)kvm_read_c0_guest_compare(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_STATUS:
+               v = (long)kvm_read_c0_guest_status(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CAUSE:
+               v = (long)kvm_read_c0_guest_cause(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_EPC:
+               v = (long)kvm_read_c0_guest_epc(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_ERROREPC:
+               v = (long)kvm_read_c0_guest_errorepc(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG:
+               v = (long)kvm_read_c0_guest_config(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG1:
+               v = (long)kvm_read_c0_guest_config1(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG2:
+               v = (long)kvm_read_c0_guest_config2(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG3:
+               v = (long)kvm_read_c0_guest_config3(cop0);
+               break;
+       case KVM_REG_MIPS_CP0_CONFIG7:
+               v = (long)kvm_read_c0_guest_config7(cop0);
+               break;
+       /* registers to be handled specially */
+       case KVM_REG_MIPS_CP0_COUNT:
+       case KVM_REG_MIPS_COUNT_CTL:
+       case KVM_REG_MIPS_COUNT_RESUME:
+       case KVM_REG_MIPS_COUNT_HZ:
+               ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
+               if (ret)
+                       return ret;
+               break;
+       default:
+               return -EINVAL;
+       }
+       if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
+               u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+
+               return put_user(v, uaddr64);
+       } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
+               u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
+               u32 v32 = (u32)v;
+
+               return put_user(v32, uaddr32);
+       } else {
+               return -EINVAL;
+       }
+}
+
+static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
+                           const struct kvm_one_reg *reg)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       u64 v;
+
+       if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
+               u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
+
+               if (get_user(v, uaddr64) != 0)
+                       return -EFAULT;
+       } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) {
+               u32 __user *uaddr32 = (u32 __user *)(long)reg->addr;
+               s32 v32;
+
+               if (get_user(v32, uaddr32) != 0)
+                       return -EFAULT;
+               v = (s64)v32;
+       } else {
+               return -EINVAL;
+       }
+
+       switch (reg->id) {
+       case KVM_REG_MIPS_R0:
+               /* Silently ignore requests to set $0 */
+               break;
+       case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
+               vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
+               break;
+       case KVM_REG_MIPS_HI:
+               vcpu->arch.hi = v;
+               break;
+       case KVM_REG_MIPS_LO:
+               vcpu->arch.lo = v;
+               break;
+       case KVM_REG_MIPS_PC:
+               vcpu->arch.pc = v;
+               break;
+
+       case KVM_REG_MIPS_CP0_INDEX:
+               kvm_write_c0_guest_index(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_CONTEXT:
+               kvm_write_c0_guest_context(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_USERLOCAL:
+               kvm_write_c0_guest_userlocal(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_PAGEMASK:
+               kvm_write_c0_guest_pagemask(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_WIRED:
+               kvm_write_c0_guest_wired(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_HWRENA:
+               kvm_write_c0_guest_hwrena(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_BADVADDR:
+               kvm_write_c0_guest_badvaddr(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_ENTRYHI:
+               kvm_write_c0_guest_entryhi(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_STATUS:
+               kvm_write_c0_guest_status(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_EPC:
+               kvm_write_c0_guest_epc(cop0, v);
+               break;
+       case KVM_REG_MIPS_CP0_ERROREPC:
+               kvm_write_c0_guest_errorepc(cop0, v);
+               break;
+       /* registers to be handled specially */
+       case KVM_REG_MIPS_CP0_COUNT:
+       case KVM_REG_MIPS_CP0_COMPARE:
+       case KVM_REG_MIPS_CP0_CAUSE:
+       case KVM_REG_MIPS_COUNT_CTL:
+       case KVM_REG_MIPS_COUNT_RESUME:
+       case KVM_REG_MIPS_COUNT_HZ:
+               return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
+                        unsigned long arg)
+{
+       struct kvm_vcpu *vcpu = filp->private_data;
+       void __user *argp = (void __user *)arg;
+       long r;
+
+       switch (ioctl) {
+       case KVM_SET_ONE_REG:
+       case KVM_GET_ONE_REG: {
+               struct kvm_one_reg reg;
+
+               if (copy_from_user(&reg, argp, sizeof(reg)))
+                       return -EFAULT;
+               if (ioctl == KVM_SET_ONE_REG)
+                       return kvm_mips_set_reg(vcpu, &reg);
+               else
+                       return kvm_mips_get_reg(vcpu, &reg);
+       }
+       case KVM_GET_REG_LIST: {
+               struct kvm_reg_list __user *user_list = argp;
+               u64 __user *reg_dest;
+               struct kvm_reg_list reg_list;
+               unsigned n;
+
+               if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
+                       return -EFAULT;
+               n = reg_list.n;
+               reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
+               if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
+                       return -EFAULT;
+               if (n < reg_list.n)
+                       return -E2BIG;
+               reg_dest = user_list->reg;
+               if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
+                                sizeof(kvm_mips_get_one_regs)))
+                       return -EFAULT;
+               return 0;
+       }
+       case KVM_NMI:
+               /* Treat the NMI as a CPU reset */
+               r = kvm_mips_reset_vcpu(vcpu);
+               break;
+       case KVM_INTERRUPT:
+               {
+                       struct kvm_mips_interrupt irq;
+
+                       r = -EFAULT;
+                       if (copy_from_user(&irq, argp, sizeof(irq)))
+                               goto out;
+
+                       kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__,
+                                 irq.irq);
+
+                       r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
+                       break;
+               }
+       default:
+               r = -ENOIOCTLCMD;
+       }
+
+out:
+       return r;
+}
+
+/* Get (and clear) the dirty memory log for a memory slot. */
+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
+{
+       struct kvm_memory_slot *memslot;
+       unsigned long ga, ga_end;
+       int is_dirty = 0;
+       int r;
+       unsigned long n;
+
+       mutex_lock(&kvm->slots_lock);
+
+       r = kvm_get_dirty_log(kvm, log, &is_dirty);
+       if (r)
+               goto out;
+
+       /* If nothing is dirty, don't bother messing with page tables. */
+       if (is_dirty) {
+               memslot = &kvm->memslots->memslots[log->slot];
+
+               ga = memslot->base_gfn << PAGE_SHIFT;
+               ga_end = ga + (memslot->npages << PAGE_SHIFT);
+
+               kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga,
+                        ga_end);
+
+               n = kvm_dirty_bitmap_bytes(memslot);
+               memset(memslot->dirty_bitmap, 0, n);
+       }
+
+       r = 0;
+out:
+       mutex_unlock(&kvm->slots_lock);
+       return r;
+
+}
+
+long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+       long r;
+
+       switch (ioctl) {
+       default:
+               r = -ENOIOCTLCMD;
+       }
+
+       return r;
+}
+
+int kvm_arch_init(void *opaque)
+{
+       if (kvm_mips_callbacks) {
+               kvm_err("kvm: module already exists\n");
+               return -EEXIST;
+       }
+
+       return kvm_mips_emulation_init(&kvm_mips_callbacks);
+}
+
+void kvm_arch_exit(void)
+{
+       kvm_mips_callbacks = NULL;
+}
+
+int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
+                                 struct kvm_sregs *sregs)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
+                                 struct kvm_sregs *sregs)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
+{
+       return -ENOIOCTLCMD;
+}
+
+int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
+{
+       return VM_FAULT_SIGBUS;
+}
+
+int kvm_dev_ioctl_check_extension(long ext)
+{
+       int r;
+
+       switch (ext) {
+       case KVM_CAP_ONE_REG:
+               r = 1;
+               break;
+       case KVM_CAP_COALESCED_MMIO:
+               r = KVM_COALESCED_MMIO_PAGE_OFFSET;
+               break;
+       default:
+               r = 0;
+               break;
+       }
+       return r;
+}
+
+int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
+{
+       return kvm_mips_pending_timer(vcpu);
+}
+
+int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
+{
+       int i;
+       struct mips_coproc *cop0;
+
+       if (!vcpu)
+               return -1;
+
+       kvm_debug("VCPU Register Dump:\n");
+       kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc);
+       kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions);
+
+       for (i = 0; i < 32; i += 4) {
+               kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i,
+                      vcpu->arch.gprs[i],
+                      vcpu->arch.gprs[i + 1],
+                      vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]);
+       }
+       kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
+       kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
+
+       cop0 = vcpu->arch.cop0;
+       kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n",
+                 kvm_read_c0_guest_status(cop0),
+                 kvm_read_c0_guest_cause(cop0));
+
+       kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0));
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+
+       for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+               vcpu->arch.gprs[i] = regs->gpr[i];
+       vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */
+       vcpu->arch.hi = regs->hi;
+       vcpu->arch.lo = regs->lo;
+       vcpu->arch.pc = regs->pc;
+
+       return 0;
+}
+
+int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++)
+               regs->gpr[i] = vcpu->arch.gprs[i];
+
+       regs->hi = vcpu->arch.hi;
+       regs->lo = vcpu->arch.lo;
+       regs->pc = vcpu->arch.pc;
+
+       return 0;
+}
+
+static void kvm_mips_comparecount_func(unsigned long data)
+{
+       struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+
+       kvm_mips_callbacks->queue_timer_int(vcpu);
+
+       vcpu->arch.wait = 0;
+       if (waitqueue_active(&vcpu->wq))
+               wake_up_interruptible(&vcpu->wq);
+}
+
+/* low level hrtimer wake routine */
+static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer)
+{
+       struct kvm_vcpu *vcpu;
+
+       vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer);
+       kvm_mips_comparecount_func((unsigned long) vcpu);
+       return kvm_mips_count_timeout(vcpu);
+}
+
+int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       kvm_mips_callbacks->vcpu_init(vcpu);
+       hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC,
+                    HRTIMER_MODE_REL);
+       vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup;
+       return 0;
+}
+
+void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
+{
+}
+
+int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
+                                 struct kvm_translation *tr)
+{
+       return 0;
+}
+
+/* Initial guest state */
+int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       return kvm_mips_callbacks->vcpu_setup(vcpu);
+}
+
+static void kvm_mips_set_c0_status(void)
+{
+       uint32_t status = read_c0_status();
+
+       if (cpu_has_fpu)
+               status |= (ST0_CU1);
+
+       if (cpu_has_dsp)
+               status |= (ST0_MX);
+
+       write_c0_status(status);
+       ehb();
+}
+
+/*
+ * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
+ */
+int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
+{
+       uint32_t cause = vcpu->arch.host_cp0_cause;
+       uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       /* Set a default exit reason */
+       run->exit_reason = KVM_EXIT_UNKNOWN;
+       run->ready_for_interrupt_injection = 1;
+
+       /*
+        * Set the appropriate status bits based on host CPU features,
+        * before we hit the scheduler
+        */
+       kvm_mips_set_c0_status();
+
+       local_irq_enable();
+
+       kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
+                       cause, opc, run, vcpu);
+
+       /*
+        * Do a privilege check, if in UM most of these exit conditions end up
+        * causing an exception to be delivered to the Guest Kernel
+        */
+       er = kvm_mips_check_privilege(cause, opc, run, vcpu);
+       if (er == EMULATE_PRIV_FAIL) {
+               goto skip_emul;
+       } else if (er == EMULATE_FAIL) {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               goto skip_emul;
+       }
+
+       switch (exccode) {
+       case T_INT:
+               kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc);
+
+               ++vcpu->stat.int_exits;
+               trace_kvm_exit(vcpu, INT_EXITS);
+
+               if (need_resched())
+                       cond_resched();
+
+               ret = RESUME_GUEST;
+               break;
+
+       case T_COP_UNUSABLE:
+               kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc);
+
+               ++vcpu->stat.cop_unusable_exits;
+               trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
+               ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
+               /* XXXKYMA: Might need to return to user space */
+               if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
+                       ret = RESUME_HOST;
+               break;
+
+       case T_TLB_MOD:
+               ++vcpu->stat.tlbmod_exits;
+               trace_kvm_exit(vcpu, TLBMOD_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
+               break;
+
+       case T_TLB_ST_MISS:
+               kvm_debug("TLB ST fault:  cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n",
+                         cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+                         badvaddr);
+
+               ++vcpu->stat.tlbmiss_st_exits;
+               trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
+               break;
+
+       case T_TLB_LD_MISS:
+               kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+                         cause, opc, badvaddr);
+
+               ++vcpu->stat.tlbmiss_ld_exits;
+               trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
+               ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
+               break;
+
+       case T_ADDR_ERR_ST:
+               ++vcpu->stat.addrerr_st_exits;
+               trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
+               ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
+               break;
+
+       case T_ADDR_ERR_LD:
+               ++vcpu->stat.addrerr_ld_exits;
+               trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
+               ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
+               break;
+
+       case T_SYSCALL:
+               ++vcpu->stat.syscall_exits;
+               trace_kvm_exit(vcpu, SYSCALL_EXITS);
+               ret = kvm_mips_callbacks->handle_syscall(vcpu);
+               break;
+
+       case T_RES_INST:
+               ++vcpu->stat.resvd_inst_exits;
+               trace_kvm_exit(vcpu, RESVD_INST_EXITS);
+               ret = kvm_mips_callbacks->handle_res_inst(vcpu);
+               break;
+
+       case T_BREAK:
+               ++vcpu->stat.break_inst_exits;
+               trace_kvm_exit(vcpu, BREAK_INST_EXITS);
+               ret = kvm_mips_callbacks->handle_break(vcpu);
+               break;
+
+       default:
+               kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x  BadVaddr: %#lx Status: %#lx\n",
+                       exccode, opc, kvm_get_inst(opc, vcpu), badvaddr,
+                       kvm_read_c0_guest_status(vcpu->arch.cop0));
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               break;
+
+       }
+
+skip_emul:
+       local_irq_disable();
+
+       if (er == EMULATE_DONE && !(ret & RESUME_HOST))
+               kvm_mips_deliver_interrupts(vcpu, cause);
+
+       if (!(ret & RESUME_HOST)) {
+               /* Only check for signals if not already exiting to userspace */
+               if (signal_pending(current)) {
+                       run->exit_reason = KVM_EXIT_INTR;
+                       ret = (-EINTR << 2) | RESUME_HOST;
+                       ++vcpu->stat.signal_exits;
+                       trace_kvm_exit(vcpu, SIGNAL_EXITS);
+               }
+       }
+
+       return ret;
+}
+
+int __init kvm_mips_init(void)
+{
+       int ret;
+
+       ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
+
+       if (ret)
+               return ret;
+
+       /*
+        * On MIPS, kernel modules are executed from "mapped space", which
+        * requires TLBs. The TLB handling code is statically linked with
+        * the rest of the kernel (tlb.c) to avoid the possibility of
+        * double faulting. The issue is that the TLB code references
+        * routines that are part of the the KVM module, which are only
+        * available once the module is loaded.
+        */
+       kvm_mips_gfn_to_pfn = gfn_to_pfn;
+       kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
+       kvm_mips_is_error_pfn = is_error_pfn;
+
+       pr_info("KVM/MIPS Initialized\n");
+       return 0;
+}
+
+void __exit kvm_mips_exit(void)
+{
+       kvm_exit();
+
+       kvm_mips_gfn_to_pfn = NULL;
+       kvm_mips_release_pfn_clean = NULL;
+       kvm_mips_is_error_pfn = NULL;
+
+       pr_info("KVM/MIPS unloaded\n");
+}
+
+module_init(kvm_mips_init);
+module_exit(kvm_mips_exit);
+
+EXPORT_TRACEPOINT_SYMBOL(kvm_exit);
diff --git a/arch/mips/kvm/opcode.h b/arch/mips/kvm/opcode.h
new file mode 100644 (file)
index 0000000..03a6ae8
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+/* Define opcode values not defined in <asm/isnt.h> */
+
+#ifndef __KVM_MIPS_OPCODE_H__
+#define __KVM_MIPS_OPCODE_H__
+
+/* COP0 Ops */
+#define mfmcz_op       0x0b    /* 01011 */
+#define wrpgpr_op      0x0e    /* 01110 */
+
+/* COP0 opcodes (only if COP0 and CO=1): */
+#define wait_op                0x20    /* 100000 */
+
+#endif /* __KVM_MIPS_OPCODE_H__ */
diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c
new file mode 100644 (file)
index 0000000..a74d602
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: COP0 access histogram
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/kvm_host.h>
+
+char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
+       "WAIT",
+       "CACHE",
+       "Signal",
+       "Interrupt",
+       "COP0/1 Unusable",
+       "TLB Mod",
+       "TLB Miss (LD)",
+       "TLB Miss (ST)",
+       "Address Err (ST)",
+       "Address Error (LD)",
+       "System Call",
+       "Reserved Inst",
+       "Break Inst",
+       "D-Cache Flushes",
+};
+
+char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
+       "Index",
+       "Random",
+       "EntryLo0",
+       "EntryLo1",
+       "Context",
+       "PG Mask",
+       "Wired",
+       "HWREna",
+       "BadVAddr",
+       "Count",
+       "EntryHI",
+       "Compare",
+       "Status",
+       "Cause",
+       "EXC PC",
+       "PRID",
+       "Config",
+       "LLAddr",
+       "Watch Lo",
+       "Watch Hi",
+       "X Context",
+       "Reserved",
+       "Impl Dep",
+       "Debug",
+       "DEPC",
+       "PerfCnt",
+       "ErrCtl",
+       "CacheErr",
+       "TagLo",
+       "TagHi",
+       "ErrorEPC",
+       "DESAVE"
+};
+
+void kvm_mips_dump_stats(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
+       int i, j;
+
+       kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id);
+       for (i = 0; i < N_MIPS_COPROC_REGS; i++) {
+               for (j = 0; j < N_MIPS_COPROC_SEL; j++) {
+                       if (vcpu->arch.cop0->stat[i][j])
+                               kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j,
+                                        vcpu->arch.cop0->stat[i][j]);
+               }
+       }
+#endif
+}
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
new file mode 100644 (file)
index 0000000..bbcd822
--- /dev/null
@@ -0,0 +1,809 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
+ * TLB handlers run from KSEG0
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kvm_host.h>
+#include <linux/srcu.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/cacheflush.h>
+#include <asm/tlb.h>
+
+#undef CONFIG_MIPS_MT
+#include <asm/r4kcache.h>
+#define CONFIG_MIPS_MT
+
+#define KVM_GUEST_PC_TLB    0
+#define KVM_GUEST_SP_TLB    1
+
+#define PRIx64 "llx"
+
+atomic_t kvm_mips_instance;
+EXPORT_SYMBOL(kvm_mips_instance);
+
+/* These function pointers are initialized once the KVM module is loaded */
+pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
+EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
+
+void (*kvm_mips_release_pfn_clean)(pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
+
+bool (*kvm_mips_is_error_pfn)(pfn_t pfn);
+EXPORT_SYMBOL(kvm_mips_is_error_pfn);
+
+uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
+}
+
+uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+{
+       return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
+}
+
+inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
+{
+       return vcpu->kvm->arch.commpage_tlb;
+}
+
+/* Structure defining an tlb entry data set. */
+
+void kvm_mips_dump_host_tlbs(void)
+{
+       unsigned long old_entryhi;
+       unsigned long old_pagemask;
+       struct kvm_mips_tlb tlb;
+       unsigned long flags;
+       int i;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       kvm_info("HOST TLBs:\n");
+       kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
+
+       for (i = 0; i < current_cpu_data.tlbsize; i++) {
+               write_c0_index(i);
+               mtc0_tlbw_hazard();
+
+               tlb_read();
+               tlbw_use_hazard();
+
+               tlb.tlb_hi = read_c0_entryhi();
+               tlb.tlb_lo0 = read_c0_entrylo0();
+               tlb.tlb_lo1 = read_c0_entrylo1();
+               tlb.tlb_mask = read_c0_pagemask();
+
+               kvm_info("TLB%c%3d Hi 0x%08lx ",
+                        (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                        i, tlb.tlb_hi);
+               kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                        (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                        (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                        (tlb.tlb_lo0 >> 3) & 7);
+               kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                        (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                        (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                        (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
+
+void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       struct kvm_mips_tlb tlb;
+       int i;
+
+       kvm_info("Guest TLBs:\n");
+       kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
+
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               tlb = vcpu->arch.guest_tlb[i];
+               kvm_info("TLB%c%3d Hi 0x%08lx ",
+                        (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+                        i, tlb.tlb_hi);
+               kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
+                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
+                        (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
+                        (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
+                        (tlb.tlb_lo0 >> 3) & 7);
+               kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
+                        (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
+                        (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
+                        (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
+                        (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+       }
+}
+EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
+
+static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+{
+       int srcu_idx, err = 0;
+       pfn_t pfn;
+
+       if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
+               return 0;
+
+       srcu_idx = srcu_read_lock(&kvm->srcu);
+       pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
+
+       if (kvm_mips_is_error_pfn(pfn)) {
+               kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
+               err = -EFAULT;
+               goto out;
+       }
+
+       kvm->arch.guest_pmap[gfn] = pfn;
+out:
+       srcu_read_unlock(&kvm->srcu, srcu_idx);
+       return err;
+}
+
+/* Translate guest KSEG0 addresses to Host PA */
+unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+                                                   unsigned long gva)
+{
+       gfn_t gfn;
+       uint32_t offset = gva & ~PAGE_MASK;
+       struct kvm *kvm = vcpu->kvm;
+
+       if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
+               kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
+                       __builtin_return_address(0), gva);
+               return KVM_INVALID_PAGE;
+       }
+
+       gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
+
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
+                       gva);
+               return KVM_INVALID_PAGE;
+       }
+
+       if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+               return KVM_INVALID_ADDR;
+
+       return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
+}
+EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
+
+/* XXXKYMA: Must be called with interrupts disabled */
+/* set flush_dcache_mask == 0 if no dcache flush required */
+int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
+                           unsigned long entrylo0, unsigned long entrylo1,
+                           int flush_dcache_mask)
+{
+       unsigned long flags;
+       unsigned long old_entryhi;
+       int idx;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       write_c0_entryhi(entryhi);
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       if (idx > current_cpu_data.tlbsize) {
+               kvm_err("%s: Invalid Index: %d\n", __func__, idx);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+
+       write_c0_entrylo0(entrylo0);
+       write_c0_entrylo1(entrylo1);
+       mtc0_tlbw_hazard();
+
+       if (idx < 0)
+               tlb_write_random();
+       else
+               tlb_write_indexed();
+       tlbw_use_hazard();
+
+       kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+                 vcpu->arch.pc, idx, read_c0_entryhi(),
+                 read_c0_entrylo0(), read_c0_entrylo1());
+
+       /* Flush D-cache */
+       if (flush_dcache_mask) {
+               if (entrylo0 & MIPS3_PG_V) {
+                       ++vcpu->stat.flush_dcache_exits;
+                       flush_data_cache_page((entryhi & VPN2_MASK) &
+                                             ~flush_dcache_mask);
+               }
+               if (entrylo1 & MIPS3_PG_V) {
+                       ++vcpu->stat.flush_dcache_exits;
+                       flush_data_cache_page(((entryhi & VPN2_MASK) &
+                                              ~flush_dcache_mask) |
+                                             (0x1 << PAGE_SHIFT));
+               }
+       }
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+       local_irq_restore(flags);
+       return 0;
+}
+
+/* XXXKYMA: Must be called with interrupts disabled */
+int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+                                   struct kvm_vcpu *vcpu)
+{
+       gfn_t gfn;
+       pfn_t pfn0, pfn1;
+       unsigned long vaddr = 0;
+       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+       int even;
+       struct kvm *kvm = vcpu->kvm;
+       const int flush_dcache_mask = 0;
+
+       if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
+               kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+
+       gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+       if (gfn >= kvm->arch.guest_pmap_npages) {
+               kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+                       gfn, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               return -1;
+       }
+       even = !(gfn & 0x1);
+       vaddr = badvaddr & (PAGE_MASK << 1);
+
+       if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+               return -1;
+
+       if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
+               return -1;
+
+       if (even) {
+               pfn0 = kvm->arch.guest_pmap[gfn];
+               pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
+       } else {
+               pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
+               pfn1 = kvm->arch.guest_pmap[gfn];
+       }
+
+       entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+                  (1 << 2) | (0x1 << 1);
+       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+                  (1 << 2) | (0x1 << 1);
+
+       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+                                      flush_dcache_mask);
+}
+EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
+
+int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
+       struct kvm_vcpu *vcpu)
+{
+       pfn_t pfn0, pfn1;
+       unsigned long flags, old_entryhi = 0, vaddr = 0;
+       unsigned long entrylo0 = 0, entrylo1 = 0;
+
+       pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
+       pfn1 = 0;
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+                  (1 << 2) | (0x1 << 1);
+       entrylo1 = 0;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       vaddr = badvaddr & (PAGE_MASK << 1);
+       write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
+       mtc0_tlbw_hazard();
+       write_c0_entrylo0(entrylo0);
+       mtc0_tlbw_hazard();
+       write_c0_entrylo1(entrylo1);
+       mtc0_tlbw_hazard();
+       write_c0_index(kvm_mips_get_commpage_asid(vcpu));
+       mtc0_tlbw_hazard();
+       tlb_write_indexed();
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
+                 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
+                 read_c0_entrylo0(), read_c0_entrylo1());
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+       local_irq_restore(flags);
+
+       return 0;
+}
+EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
+
+int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+                                        struct kvm_mips_tlb *tlb,
+                                        unsigned long *hpa0,
+                                        unsigned long *hpa1)
+{
+       unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+       struct kvm *kvm = vcpu->kvm;
+       pfn_t pfn0, pfn1;
+
+       if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+               pfn0 = 0;
+               pfn1 = 0;
+       } else {
+               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
+                                          >> PAGE_SHIFT) < 0)
+                       return -1;
+
+               if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
+                                          >> PAGE_SHIFT) < 0)
+                       return -1;
+
+               pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
+                                           >> PAGE_SHIFT];
+               pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
+                                           >> PAGE_SHIFT];
+       }
+
+       if (hpa0)
+               *hpa0 = pfn0 << PAGE_SHIFT;
+
+       if (hpa1)
+               *hpa1 = pfn1 << PAGE_SHIFT;
+
+       /* Get attributes from the Guest TLB */
+       entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+                                              kvm_mips_get_kernel_asid(vcpu) :
+                                              kvm_mips_get_user_asid(vcpu));
+       entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
+                  (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
+       entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
+                  (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
+
+       kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+                 tlb->tlb_lo0, tlb->tlb_lo1);
+
+       return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+                                      tlb->tlb_mask);
+}
+EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
+
+int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
+{
+       int i;
+       int index = -1;
+       struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
+
+       for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
+               if (TLB_HI_VPN2_HIT(tlb[i], entryhi) &&
+                   TLB_HI_ASID_HIT(tlb[i], entryhi)) {
+                       index = i;
+                       break;
+               }
+       }
+
+       kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
+                 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
+
+       return index;
+}
+EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
+
+int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
+{
+       unsigned long old_entryhi, flags;
+       int idx;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+
+       if (KVM_GUEST_KERNEL_MODE(vcpu))
+               write_c0_entryhi((vaddr & VPN2_MASK) |
+                                kvm_mips_get_kernel_asid(vcpu));
+       else {
+               write_c0_entryhi((vaddr & VPN2_MASK) |
+                                kvm_mips_get_user_asid(vcpu));
+       }
+
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       /* Restore old ASID */
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+       kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
+
+       return idx;
+}
+EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
+
+int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
+{
+       int idx;
+       unsigned long flags, old_entryhi;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+
+       write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
+       mtc0_tlbw_hazard();
+
+       tlb_probe();
+       tlb_probe_hazard();
+       idx = read_c0_index();
+
+       if (idx >= current_cpu_data.tlbsize)
+               BUG();
+
+       if (idx > 0) {
+               write_c0_entryhi(UNIQUE_ENTRYHI(idx));
+               mtc0_tlbw_hazard();
+
+               write_c0_entrylo0(0);
+               mtc0_tlbw_hazard();
+
+               write_c0_entrylo1(0);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               mtc0_tlbw_hazard();
+       }
+
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+       if (idx > 0)
+               kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
+                         (va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu), idx);
+
+       return 0;
+}
+EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
+
+/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */
+int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
+{
+       unsigned long flags, old_entryhi;
+
+       if (index >= current_cpu_data.tlbsize)
+               BUG();
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+
+       write_c0_entryhi(UNIQUE_ENTRYHI(index));
+       mtc0_tlbw_hazard();
+
+       write_c0_index(index);
+       mtc0_tlbw_hazard();
+
+       write_c0_entrylo0(0);
+       mtc0_tlbw_hazard();
+
+       write_c0_entrylo1(0);
+       mtc0_tlbw_hazard();
+
+       tlb_write_indexed();
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       write_c0_entryhi(old_entryhi);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+
+       return 0;
+}
+
+void kvm_mips_flush_host_tlb(int skip_kseg0)
+{
+       unsigned long flags;
+       unsigned long old_entryhi, entryhi;
+       unsigned long old_pagemask;
+       int entry = 0;
+       int maxentry = current_cpu_data.tlbsize;
+
+       local_irq_save(flags);
+
+       old_entryhi = read_c0_entryhi();
+       old_pagemask = read_c0_pagemask();
+
+       /* Blast 'em all away. */
+       for (entry = 0; entry < maxentry; entry++) {
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+
+               if (skip_kseg0) {
+                       tlb_read();
+                       tlbw_use_hazard();
+
+                       entryhi = read_c0_entryhi();
+
+                       /* Don't blow away guest kernel entries */
+                       if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
+                               continue;
+               }
+
+               /* Make sure all entries differ. */
+               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+               mtc0_tlbw_hazard();
+               write_c0_entrylo0(0);
+               mtc0_tlbw_hazard();
+               write_c0_entrylo1(0);
+               mtc0_tlbw_hazard();
+
+               tlb_write_indexed();
+               mtc0_tlbw_hazard();
+       }
+
+       tlbw_use_hazard();
+
+       write_c0_entryhi(old_entryhi);
+       write_c0_pagemask(old_pagemask);
+       mtc0_tlbw_hazard();
+       tlbw_use_hazard();
+
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
+
+void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+                            struct kvm_vcpu *vcpu)
+{
+       unsigned long asid = asid_cache(cpu);
+
+       asid += ASID_INC;
+       if (!(asid & ASID_MASK)) {
+               if (cpu_has_vtag_icache)
+                       flush_icache_all();
+
+               kvm_local_flush_tlb_all();      /* start new asid cycle */
+
+               if (!asid)      /* fix version if needed */
+                       asid = ASID_FIRST_VERSION;
+       }
+
+       cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+void kvm_local_flush_tlb_all(void)
+{
+       unsigned long flags;
+       unsigned long old_ctx;
+       int entry = 0;
+
+       local_irq_save(flags);
+       /* Save old context and create impossible VPN2 value */
+       old_ctx = read_c0_entryhi();
+       write_c0_entrylo0(0);
+       write_c0_entrylo1(0);
+
+       /* Blast 'em all away. */
+       while (entry < current_cpu_data.tlbsize) {
+               /* Make sure all entries differ. */
+               write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+               write_c0_index(entry);
+               mtc0_tlbw_hazard();
+               tlb_write_indexed();
+               entry++;
+       }
+       tlbw_use_hazard();
+       write_c0_entryhi(old_ctx);
+       mtc0_tlbw_hazard();
+
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(kvm_local_flush_tlb_all);
+
+/**
+ * kvm_mips_migrate_count() - Migrate timer.
+ * @vcpu:      Virtual CPU.
+ *
+ * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
+ * if it was running prior to being cancelled.
+ *
+ * Must be called when the VCPU is migrated to a different CPU to ensure that
+ * timer expiry during guest execution interrupts the guest and causes the
+ * interrupt to be delivered in a timely manner.
+ */
+static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
+{
+       if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
+               hrtimer_restart(&vcpu->arch.comparecount_timer);
+}
+
+/* Restore ASID once we are scheduled back after preemption */
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+       unsigned long flags;
+       int newasid = 0;
+
+       kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
+
+       /* Alocate new kernel and user ASIDs if needed */
+
+       local_irq_save(flags);
+
+       if (((vcpu->arch.
+             guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
+               kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
+               vcpu->arch.guest_kernel_asid[cpu] =
+                   vcpu->arch.guest_kernel_mm.context.asid[cpu];
+               kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
+               vcpu->arch.guest_user_asid[cpu] =
+                   vcpu->arch.guest_user_mm.context.asid[cpu];
+               newasid++;
+
+               kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
+                         cpu_context(cpu, current->mm));
+               kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+                         cpu, vcpu->arch.guest_kernel_asid[cpu]);
+               kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
+                         vcpu->arch.guest_user_asid[cpu]);
+       }
+
+       if (vcpu->arch.last_sched_cpu != cpu) {
+               kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
+                         vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+               /*
+                * Migrate the timer interrupt to the current CPU so that it
+                * always interrupts the guest and synchronously triggers a
+                * guest timer interrupt.
+                */
+               kvm_mips_migrate_count(vcpu);
+       }
+
+       if (!newasid) {
+               /*
+                * If we preempted while the guest was executing, then reload
+                * the pre-empted ASID
+                */
+               if (current->flags & PF_VCPU) {
+                       write_c0_entryhi(vcpu->arch.
+                                        preempt_entryhi & ASID_MASK);
+                       ehb();
+               }
+       } else {
+               /* New ASIDs were allocated for the VM */
+
+               /*
+                * Were we in guest context? If so then the pre-empted ASID is
+                * no longer valid, we need to set it to what it should be based
+                * on the mode of the Guest (Kernel/User)
+                */
+               if (current->flags & PF_VCPU) {
+                       if (KVM_GUEST_KERNEL_MODE(vcpu))
+                               write_c0_entryhi(vcpu->arch.
+                                                guest_kernel_asid[cpu] &
+                                                ASID_MASK);
+                       else
+                               write_c0_entryhi(vcpu->arch.
+                                                guest_user_asid[cpu] &
+                                                ASID_MASK);
+                       ehb();
+               }
+       }
+
+       local_irq_restore(flags);
+
+}
+EXPORT_SYMBOL(kvm_arch_vcpu_load);
+
+/* ASID can change if another task is scheduled during preemption */
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       unsigned long flags;
+       uint32_t cpu;
+
+       local_irq_save(flags);
+
+       cpu = smp_processor_id();
+
+       vcpu->arch.preempt_entryhi = read_c0_entryhi();
+       vcpu->arch.last_sched_cpu = cpu;
+
+       if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
+            ASID_VERSION_MASK)) {
+               kvm_debug("%s: Dropping MMU Context:  %#lx\n", __func__,
+                         cpu_context(cpu, current->mm));
+               drop_mmu_context(current->mm, cpu);
+       }
+       write_c0_entryhi(cpu_asid(cpu, current->mm));
+       ehb();
+
+       local_irq_restore(flags);
+}
+EXPORT_SYMBOL(kvm_arch_vcpu_put);
+
+uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       unsigned long paddr, flags, vpn2, asid;
+       uint32_t inst;
+       int index;
+
+       if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
+           KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+               local_irq_save(flags);
+               index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
+               if (index >= 0) {
+                       inst = *(opc);
+               } else {
+                       vpn2 = (unsigned long) opc & VPN2_MASK;
+                       asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
+                       index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
+                       if (index < 0) {
+                               kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
+                                       __func__, opc, vcpu, read_c0_entryhi());
+                               kvm_mips_dump_host_tlbs();
+                               local_irq_restore(flags);
+                               return KVM_INVALID_INST;
+                       }
+                       kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+                                                            &vcpu->arch.
+                                                            guest_tlb[index],
+                                                            NULL, NULL);
+                       inst = *(opc);
+               }
+               local_irq_restore(flags);
+       } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
+               paddr =
+                   kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
+                                                         (unsigned long) opc);
+               inst = *(uint32_t *) CKSEG0ADDR(paddr);
+       } else {
+               kvm_err("%s: illegal address: %p\n", __func__, opc);
+               return KVM_INVALID_INST;
+       }
+
+       return inst;
+}
+EXPORT_SYMBOL(kvm_get_inst);
index bc9e0f4..c1388d4 100644 (file)
@@ -1,11 +1,11 @@
 /*
-* This file is subject to the terms and conditions of the GNU General Public
-* License.  See the file "COPYING" in the main directory of this archive
-* for more details.
-*
-* Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
-* Authors: Sanjay Lal <sanjayl@kymasys.com>
-*/
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
 
 #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
 #define _TRACE_KVM_H
@@ -17,9 +17,7 @@
 #define TRACE_INCLUDE_PATH .
 #define TRACE_INCLUDE_FILE trace
 
-/*
- * Tracepoints for VM eists
- */
+/* Tracepoints for VM eists */
 extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
 
 TRACE_EVENT(kvm_exit,
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
new file mode 100644 (file)
index 0000000..fd7257b
--- /dev/null
@@ -0,0 +1,492 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
+ *
+ * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+
+#include <linux/kvm_host.h>
+
+#include "opcode.h"
+#include "interrupt.h"
+
+static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
+{
+       gpa_t gpa;
+       uint32_t kseg = KSEGX(gva);
+
+       if ((kseg == CKSEG0) || (kseg == CKSEG1))
+               gpa = CPHYSADDR(gva);
+       else {
+               kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
+               kvm_mips_dump_host_tlbs();
+               gpa = KVM_INVALID_ADDR;
+       }
+
+       kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
+
+       return gpa;
+}
+
+static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1)
+               er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
+       else
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+
+       switch (er) {
+       case EMULATE_DONE:
+               ret = RESUME_GUEST;
+               break;
+
+       case EMULATE_FAIL:
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               break;
+
+       case EMULATE_WAIT:
+               run->exit_reason = KVM_EXIT_INTR;
+               ret = RESUME_HOST;
+               break;
+
+       default:
+               BUG();
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+           || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+               kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                         cause, opc, badvaddr);
+               er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
+
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               /*
+                * XXXKYMA: The guest kernel does not expect to get this fault
+                * when we are not using HIGHMEM. Need to address this in a
+                * HIGHMEM kernel
+                */
+               kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                       cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       } else {
+               kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                       cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+           && KVM_GUEST_KERNEL_MODE(vcpu)) {
+               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+               kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                         cause, opc, badvaddr);
+               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               /*
+                * All KSEG0 faults are handled by KVM, as the guest kernel does
+                * not expect to ever get them
+                */
+               if (kvm_mips_handle_kseg0_tlb_fault
+                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                       cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
+           && KVM_GUEST_KERNEL_MODE(vcpu)) {
+               if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
+                  || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
+               kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
+                         vcpu->arch.pc, badvaddr);
+
+               /*
+                * User Address (UA) fault, this could happen if
+                * (1) TLB entry not present/valid in both Guest and shadow host
+                *     TLBs, in this case we pass on the fault to the guest
+                *     kernel and let it handle it.
+                * (2) TLB entry is present in the Guest TLB but not in the
+                *     shadow, in this case we inject the TLB from the Guest TLB
+                *     into the shadow host TLB
+                */
+
+               er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
+               if (er == EMULATE_DONE)
+                       ret = RESUME_GUEST;
+               else {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+               if (kvm_mips_handle_kseg0_tlb_fault
+                   (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                       cause, opc, badvaddr);
+               kvm_mips_dump_host_tlbs();
+               kvm_arch_vcpu_dump_regs(vcpu);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KVM_GUEST_KERNEL_MODE(vcpu)
+           && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
+               kvm_debug("Emulate Store to MMIO space\n");
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+               if (er == EMULATE_FAIL) {
+                       kvm_err("Emulate Store to MMIO space failed\n");
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               } else {
+                       run->exit_reason = KVM_EXIT_MMIO;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                       cause, opc, badvaddr);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
+               kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
+               er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
+               if (er == EMULATE_FAIL) {
+                       kvm_err("Emulate Load from MMIO space failed\n");
+                       run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+                       ret = RESUME_HOST;
+               } else {
+                       run->exit_reason = KVM_EXIT_MMIO;
+                       ret = RESUME_HOST;
+               }
+       } else {
+               kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+                       cause, opc, badvaddr);
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+               er = EMULATE_FAIL;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_handle_ri(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
+{
+       struct kvm_run *run = vcpu->run;
+       uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+       unsigned long cause = vcpu->arch.host_cp0_cause;
+       enum emulation_result er = EMULATE_DONE;
+       int ret = RESUME_GUEST;
+
+       er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
+       if (er == EMULATE_DONE)
+               ret = RESUME_GUEST;
+       else {
+               run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+               ret = RESUME_HOST;
+       }
+       return ret;
+}
+
+static int kvm_trap_emul_vm_init(struct kvm *kvm)
+{
+       return 0;
+}
+
+static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
+{
+       return 0;
+}
+
+static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       uint32_t config1;
+       int vcpu_id = vcpu->vcpu_id;
+
+       /*
+        * Arch specific stuff, set up config registers properly so that the
+        * guest will come up as expected, for now we simulate a MIPS 24kc
+        */
+       kvm_write_c0_guest_prid(cop0, 0x00019300);
+       kvm_write_c0_guest_config(cop0,
+                                 MIPS_CONFIG0 | (0x1 << CP0C0_AR) |
+                                 (MMU_TYPE_R4000 << CP0C0_MT));
+
+       /* Read the cache characteristics from the host Config1 Register */
+       config1 = (read_c0_config1() & ~0x7f);
+
+       /* Set up MMU size */
+       config1 &= ~(0x3f << 25);
+       config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
+
+       /* We unset some bits that we aren't emulating */
+       config1 &=
+           ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
+             (1 << CP0C1_WR) | (1 << CP0C1_CA));
+       kvm_write_c0_guest_config1(cop0, config1);
+
+       kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2);
+       /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */
+       kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) |
+                                        (1 << CP0C3_ULRI));
+
+       /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
+       kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
+
+       /*
+        * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
+        */
+       kvm_write_c0_guest_intctl(cop0, 0xFC000000);
+
+       /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
+       kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
+
+       return 0;
+}
+
+static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
+                                    const struct kvm_one_reg *reg,
+                                    s64 *v)
+{
+       switch (reg->id) {
+       case KVM_REG_MIPS_CP0_COUNT:
+               *v = kvm_mips_read_count(vcpu);
+               break;
+       case KVM_REG_MIPS_COUNT_CTL:
+               *v = vcpu->arch.count_ctl;
+               break;
+       case KVM_REG_MIPS_COUNT_RESUME:
+               *v = ktime_to_ns(vcpu->arch.count_resume);
+               break;
+       case KVM_REG_MIPS_COUNT_HZ:
+               *v = vcpu->arch.count_hz;
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
+                                    const struct kvm_one_reg *reg,
+                                    s64 v)
+{
+       struct mips_coproc *cop0 = vcpu->arch.cop0;
+       int ret = 0;
+
+       switch (reg->id) {
+       case KVM_REG_MIPS_CP0_COUNT:
+               kvm_mips_write_count(vcpu, v);
+               break;
+       case KVM_REG_MIPS_CP0_COMPARE:
+               kvm_mips_write_compare(vcpu, v);
+               break;
+       case KVM_REG_MIPS_CP0_CAUSE:
+               /*
+                * If the timer is stopped or started (DC bit) it must look
+                * atomic with changes to the interrupt pending bits (TI, IRQ5).
+                * A timer interrupt should not happen in between.
+                */
+               if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
+                       if (v & CAUSEF_DC) {
+                               /* disable timer first */
+                               kvm_mips_count_disable_cause(vcpu);
+                               kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
+                       } else {
+                               /* enable timer last */
+                               kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
+                               kvm_mips_count_enable_cause(vcpu);
+                       }
+               } else {
+                       kvm_write_c0_guest_cause(cop0, v);
+               }
+               break;
+       case KVM_REG_MIPS_COUNT_CTL:
+               ret = kvm_mips_set_count_ctl(vcpu, v);
+               break;
+       case KVM_REG_MIPS_COUNT_RESUME:
+               ret = kvm_mips_set_count_resume(vcpu, v);
+               break;
+       case KVM_REG_MIPS_COUNT_HZ:
+               ret = kvm_mips_set_count_hz(vcpu, v);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return ret;
+}
+
+static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
+       /* exit handlers */
+       .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
+       .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
+       .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
+       .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
+       .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
+       .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
+       .handle_syscall = kvm_trap_emul_handle_syscall,
+       .handle_res_inst = kvm_trap_emul_handle_res_inst,
+       .handle_break = kvm_trap_emul_handle_break,
+
+       .vm_init = kvm_trap_emul_vm_init,
+       .vcpu_init = kvm_trap_emul_vcpu_init,
+       .vcpu_setup = kvm_trap_emul_vcpu_setup,
+       .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
+       .queue_timer_int = kvm_mips_queue_timer_int_cb,
+       .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
+       .queue_io_int = kvm_mips_queue_io_int_cb,
+       .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
+       .irq_deliver = kvm_mips_irq_deliver_cb,
+       .irq_clear = kvm_mips_irq_clear_cb,
+       .get_one_reg = kvm_trap_emul_get_one_reg,
+       .set_one_reg = kvm_trap_emul_set_one_reg,
+};
+
+int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
+{
+       *install_callbacks = &kvm_trap_emul_callbacks;
+       return 0;
+}
index 53f1d22..8e97acb 100644 (file)
  * Special constants
  */
 
-#define DPCNST(s, b, m)                                                        \
+/*
+ * Older GCC requires the inner braces for initialization of union ieee754dp's
+ * anonymous struct member.  Without an error will result.
+ */
+#define xPCNST(s, b, m, ebias)                                         \
 {                                                                      \
-       .sign   = (s),                                                  \
-       .bexp   = (b) + DP_EBIAS,                                       \
-       .mant   = (m)                                                   \
+       {                                                               \
+               .sign   = (s),                                          \
+               .bexp   = (b) + ebias,                                  \
+               .mant   = (m)                                           \
+       }                                                               \
 }
 
+#define DPCNST(s, b, m)                                                        \
+       xPCNST(s, b, m, DP_EBIAS)
+
 const union ieee754dp __ieee754dp_spcvals[] = {
        DPCNST(0, DP_EMIN - 1, 0x0000000000000ULL),     /* + zero   */
        DPCNST(1, DP_EMIN - 1, 0x0000000000000ULL),     /* - zero   */
@@ -62,11 +71,7 @@ const union ieee754dp __ieee754dp_spcvals[] = {
 };
 
 #define SPCNST(s, b, m)                                                        \
-{                                                                      \
-       .sign   = (s),                                                  \
-       .bexp   = (b) + SP_EBIAS,                                       \
-       .mant   = (m)                                                   \
-}
+       xPCNST(s, b, m, SP_EBIAS)
 
 const union ieee754sp __ieee754sp_spcvals[] = {
        SPCNST(0, SP_EMIN - 1, 0x000000),       /* + zero   */
index 775c280..8399ddf 100644 (file)
@@ -102,6 +102,7 @@ static struct insn insn_table_MM[] = {
        { insn_sd, 0, 0 },
        { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
        { insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD },
+       { insn_slt, M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD },
        { insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
        { insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD },
        { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
index 38792c2..6708a2d 100644 (file)
@@ -89,7 +89,7 @@ static struct insn insn_table[] = {
        { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
        { insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
-       { insn_lh,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_lh,  M(lh_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_lld,  M(lld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_lui,  M(lui_op, 0, 0, 0, 0, 0),  RT | SIMM },
@@ -110,6 +110,7 @@ static struct insn insn_table[] = {
        { insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
        { insn_sllv,  M(spec_op, 0, 0, 0, 0, sllv_op),  RS | RT | RD },
+       { insn_slt,  M(spec_op, 0, 0, 0, 0, slt_op),  RS | RT | RD },
        { insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
        { insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD },
        { insn_sra,  M(spec_op, 0, 0, 0, 0, sra_op),  RT | RD | RE },
index 0051580..a01b0d6 100644 (file)
@@ -53,7 +53,7 @@ enum opcode {
        insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw,
        insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul,
        insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd,
-       insn_sd, insn_sll, insn_sllv, insn_sltiu, insn_sltu, insn_sra,
+       insn_sd, insn_sll, insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra,
        insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall,
        insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh,
        insn_xor, insn_xori, insn_yield,
@@ -139,6 +139,13 @@ Ip_u1u2u3(op)                                              \
 }                                                      \
 UASM_EXPORT_SYMBOL(uasm_i##op);
 
+#define I_s3s1s2(op)                                   \
+Ip_s3s1s2(op)                                          \
+{                                                      \
+       build_insn(buf, insn##op, b, c, a);             \
+}                                                      \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
 #define I_u2u1u3(op)                                   \
 Ip_u2u1u3(op)                                          \
 {                                                      \
@@ -289,6 +296,7 @@ I_u2s3u1(_scd)
 I_u2s3u1(_sd)
 I_u2u1u3(_sll)
 I_u3u2u1(_sllv)
+I_s3s1s2(_slt)
 I_u2u1s3(_sltiu)
 I_u3u1u2(_sltu)
 I_u2u1u3(_sra)
index a67b975..b87390a 100644 (file)
 /* Arguments used by JIT */
 #define ARGS_USED_BY_JIT       2 /* only applicable to 64-bit */
 
-#define FLAG_NEED_X_RESET      (1 << 0)
-
 #define SBIT(x)                        (1 << (x)) /* Signed version of BIT() */
 
 /**
@@ -153,6 +151,8 @@ static inline int optimize_div(u32 *k)
        return 0;
 }
 
+static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
+
 /* Simply emit the instruction if the JIT memory space has been allocated */
 #define emit_instr(ctx, func, ...)                     \
 do {                                                   \
@@ -166,9 +166,7 @@ do {                                                        \
 /* Determine if immediate is within the 16-bit signed range */
 static inline bool is_range16(s32 imm)
 {
-       if (imm >= SBIT(15) || imm < -SBIT(15))
-               return true;
-       return false;
+       return !(imm >= SBIT(15) || imm < -SBIT(15));
 }
 
 static inline void emit_addu(unsigned int dst, unsigned int src1,
@@ -187,7 +185,7 @@ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
 {
        if (ctx->target != NULL) {
                /* addiu can only handle s16 */
-               if (is_range16(imm)) {
+               if (!is_range16(imm)) {
                        u32 *p = &ctx->target[ctx->idx];
                        uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
                        p = &ctx->target[ctx->idx + 1];
@@ -199,7 +197,7 @@ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
        }
        ctx->idx++;
 
-       if (is_range16(imm))
+       if (!is_range16(imm))
                ctx->idx++;
 }
 
@@ -240,7 +238,7 @@ static inline void emit_daddiu(unsigned int dst, unsigned int src,
 static inline void emit_addiu(unsigned int dst, unsigned int src,
                              u32 imm, struct jit_ctx *ctx)
 {
-       if (is_range16(imm)) {
+       if (!is_range16(imm)) {
                emit_load_imm(r_tmp, imm, ctx);
                emit_addu(dst, r_tmp, src, ctx);
        } else {
@@ -313,8 +311,11 @@ static inline void emit_sll(unsigned int dst, unsigned int src,
                            unsigned int sa, struct jit_ctx *ctx)
 {
        /* sa is 5-bits long */
-       BUG_ON(sa >= BIT(5));
-       emit_instr(ctx, sll, dst, src, sa);
+       if (sa >= BIT(5))
+               /* Shifting >= 32 results in zero */
+               emit_jit_reg_move(dst, r_zero, ctx);
+       else
+               emit_instr(ctx, sll, dst, src, sa);
 }
 
 static inline void emit_srlv(unsigned int dst, unsigned int src,
@@ -327,8 +328,17 @@ static inline void emit_srl(unsigned int dst, unsigned int src,
                            unsigned int sa, struct jit_ctx *ctx)
 {
        /* sa is 5-bits long */
-       BUG_ON(sa >= BIT(5));
-       emit_instr(ctx, srl, dst, src, sa);
+       if (sa >= BIT(5))
+               /* Shifting >= 32 results in zero */
+               emit_jit_reg_move(dst, r_zero, ctx);
+       else
+               emit_instr(ctx, srl, dst, src, sa);
+}
+
+static inline void emit_slt(unsigned int dst, unsigned int src1,
+                           unsigned int src2, struct jit_ctx *ctx)
+{
+       emit_instr(ctx, slt, dst, src1, src2);
 }
 
 static inline void emit_sltu(unsigned int dst, unsigned int src1,
@@ -341,7 +351,7 @@ static inline void emit_sltiu(unsigned dst, unsigned int src,
                              unsigned int imm, struct jit_ctx *ctx)
 {
        /* 16 bit immediate */
-       if (is_range16((s32)imm)) {
+       if (!is_range16((s32)imm)) {
                emit_load_imm(r_tmp, imm, ctx);
                emit_sltu(dst, src, r_tmp, ctx);
        } else {
@@ -408,7 +418,7 @@ static inline void emit_div(unsigned int dst, unsigned int src,
                u32 *p = &ctx->target[ctx->idx];
                uasm_i_divu(&p, dst, src);
                p = &ctx->target[ctx->idx + 1];
-               uasm_i_mfhi(&p, dst);
+               uasm_i_mflo(&p, dst);
        }
        ctx->idx += 2; /* 2 insts */
 }
@@ -443,6 +453,17 @@ static inline void emit_wsbh(unsigned int dst, unsigned int src,
        emit_instr(ctx, wsbh, dst, src);
 }
 
+/* load pointer to register */
+static inline void emit_load_ptr(unsigned int dst, unsigned int src,
+                                    int imm, struct jit_ctx *ctx)
+{
+       /* src contains the base addr of the 32/64-pointer */
+       if (config_enabled(CONFIG_64BIT))
+               emit_instr(ctx, ld, dst, imm, src);
+       else
+               emit_instr(ctx, lw, dst, imm, src);
+}
+
 /* load a function pointer to register */
 static inline void emit_load_func(unsigned int reg, ptr imm,
                                  struct jit_ctx *ctx)
@@ -545,29 +566,13 @@ static inline u16 align_sp(unsigned int num)
        return num;
 }
 
-static inline void update_on_xread(struct jit_ctx *ctx)
-{
-       if (!(ctx->flags & SEEN_X))
-               ctx->flags |= FLAG_NEED_X_RESET;
-
-       ctx->flags |= SEEN_X;
-}
-
 static bool is_load_to_a(u16 inst)
 {
        switch (inst) {
-       case BPF_S_LD_W_LEN:
-       case BPF_S_LD_W_ABS:
-       case BPF_S_LD_H_ABS:
-       case BPF_S_LD_B_ABS:
-       case BPF_S_ANC_CPU:
-       case BPF_S_ANC_IFINDEX:
-       case BPF_S_ANC_MARK:
-       case BPF_S_ANC_PROTOCOL:
-       case BPF_S_ANC_RXHASH:
-       case BPF_S_ANC_VLAN_TAG:
-       case BPF_S_ANC_VLAN_TAG_PRESENT:
-       case BPF_S_ANC_QUEUE:
+       case BPF_LD | BPF_W | BPF_LEN:
+       case BPF_LD | BPF_W | BPF_ABS:
+       case BPF_LD | BPF_H | BPF_ABS:
+       case BPF_LD | BPF_B | BPF_ABS:
                return true;
        default:
                return false;
@@ -618,7 +623,10 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
        if (ctx->flags & SEEN_MEM) {
                if (real_off % (RSIZE * 2))
                        real_off += RSIZE;
-               emit_addiu(r_M, r_sp, real_off, ctx);
+               if (config_enabled(CONFIG_64BIT))
+                       emit_daddiu(r_M, r_sp, real_off, ctx);
+               else
+                       emit_addiu(r_M, r_sp, real_off, ctx);
        }
 }
 
@@ -705,11 +713,11 @@ static void build_prologue(struct jit_ctx *ctx)
        if (ctx->flags & SEEN_SKB)
                emit_reg_move(r_skb, MIPS_R_A0, ctx);
 
-       if (ctx->flags & FLAG_NEED_X_RESET)
+       if (ctx->flags & SEEN_X)
                emit_jit_reg_move(r_X, r_zero, ctx);
 
        /* Do not leak kernel data to userspace */
-       if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
+       if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
                emit_jit_reg_move(r_A, r_zero, ctx);
 }
 
@@ -757,13 +765,17 @@ static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
        return (u64)err << 32 | ntohl(ret);
 }
 
-#define PKT_TYPE_MAX 7
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_TYPE_MAX   (7 << 5)
+#else
+#define PKT_TYPE_MAX   7
+#endif
 static int pkt_type_offset(void)
 {
        struct sk_buff skb_probe = {
                .pkt_type = ~0,
        };
-       char *ct = (char *)&skb_probe;
+       u8 *ct = (u8 *)&skb_probe;
        unsigned int off;
 
        for (off = 0; off < sizeof(struct sk_buff); off++) {
@@ -783,46 +795,62 @@ static int build_body(struct jit_ctx *ctx)
        u32 k, b_off __maybe_unused;
 
        for (i = 0; i < prog->len; i++) {
+               u16 code;
+
                inst = &(prog->insns[i]);
                pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
                         __func__, inst->code, inst->jt, inst->jf, inst->k);
                k = inst->k;
+               code = bpf_anc_helper(inst);
 
                if (ctx->target == NULL)
                        ctx->offsets[i] = ctx->idx * 4;
 
-               switch (inst->code) {
-               case BPF_S_LD_IMM:
+               switch (code) {
+               case BPF_LD | BPF_IMM:
                        /* A <- k ==> li r_A, k */
                        ctx->flags |= SEEN_A;
                        emit_load_imm(r_A, k, ctx);
                        break;
-               case BPF_S_LD_W_LEN:
+               case BPF_LD | BPF_W | BPF_LEN:
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
                        /* A <- len ==> lw r_A, offset(skb) */
                        ctx->flags |= SEEN_SKB | SEEN_A;
                        off = offsetof(struct sk_buff, len);
                        emit_load(r_A, r_skb, off, ctx);
                        break;
-               case BPF_S_LD_MEM:
+               case BPF_LD | BPF_MEM:
                        /* A <- M[k] ==> lw r_A, offset(M) */
                        ctx->flags |= SEEN_MEM | SEEN_A;
                        emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
                        break;
-               case BPF_S_LD_W_ABS:
+               case BPF_LD | BPF_W | BPF_ABS:
                        /* A <- P[k:4] */
                        load_order = 2;
                        goto load;
-               case BPF_S_LD_H_ABS:
+               case BPF_LD | BPF_H | BPF_ABS:
                        /* A <- P[k:2] */
                        load_order = 1;
                        goto load;
-               case BPF_S_LD_B_ABS:
+               case BPF_LD | BPF_B | BPF_ABS:
                        /* A <- P[k:1] */
                        load_order = 0;
 load:
+                       /* the interpreter will deal with the negative K */
+                       if ((int)k < 0)
+                               return -ENOTSUPP;
+
                        emit_load_imm(r_off, k, ctx);
 load_common:
+                       /*
+                        * We may got here from the indirect loads so
+                        * return if offset is negative.
+                        */
+                       emit_slt(r_s0, r_off, r_zero, ctx);
+                       emit_bcond(MIPS_COND_NE, r_s0, r_zero,
+                                  b_imm(prog->len, ctx), ctx);
+                       emit_reg_move(r_ret, r_zero, ctx);
+
                        ctx->flags |= SEEN_CALL | SEEN_OFF | SEEN_S0 |
                                SEEN_SKB | SEEN_A;
 
@@ -852,39 +880,42 @@ load_common:
                        emit_b(b_imm(prog->len, ctx), ctx);
                        emit_reg_move(r_ret, r_zero, ctx);
                        break;
-               case BPF_S_LD_W_IND:
+               case BPF_LD | BPF_W | BPF_IND:
                        /* A <- P[X + k:4] */
                        load_order = 2;
                        goto load_ind;
-               case BPF_S_LD_H_IND:
+               case BPF_LD | BPF_H | BPF_IND:
                        /* A <- P[X + k:2] */
                        load_order = 1;
                        goto load_ind;
-               case BPF_S_LD_B_IND:
+               case BPF_LD | BPF_B | BPF_IND:
                        /* A <- P[X + k:1] */
                        load_order = 0;
 load_ind:
-                       update_on_xread(ctx);
                        ctx->flags |= SEEN_OFF | SEEN_X;
                        emit_addiu(r_off, r_X, k, ctx);
                        goto load_common;
-               case BPF_S_LDX_IMM:
+               case BPF_LDX | BPF_IMM:
                        /* X <- k */
                        ctx->flags |= SEEN_X;
                        emit_load_imm(r_X, k, ctx);
                        break;
-               case BPF_S_LDX_MEM:
+               case BPF_LDX | BPF_MEM:
                        /* X <- M[k] */
                        ctx->flags |= SEEN_X | SEEN_MEM;
                        emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
                        break;
-               case BPF_S_LDX_W_LEN:
+               case BPF_LDX | BPF_W | BPF_LEN:
                        /* X <- len */
                        ctx->flags |= SEEN_X | SEEN_SKB;
                        off = offsetof(struct sk_buff, len);
                        emit_load(r_X, r_skb, off, ctx);
                        break;
-               case BPF_S_LDX_B_MSH:
+               case BPF_LDX | BPF_B | BPF_MSH:
+                       /* the interpreter will deal with the negative K */
+                       if ((int)k < 0)
+                               return -ENOTSUPP;
+
                        /* X <- 4 * (P[k:1] & 0xf) */
                        ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB;
                        /* Load offset to a1 */
@@ -917,50 +948,49 @@ load_ind:
                        emit_b(b_imm(prog->len, ctx), ctx);
                        emit_load_imm(r_ret, 0, ctx); /* delay slot */
                        break;
-               case BPF_S_ST:
+               case BPF_ST:
                        /* M[k] <- A */
                        ctx->flags |= SEEN_MEM | SEEN_A;
                        emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
                        break;
-               case BPF_S_STX:
+               case BPF_STX:
                        /* M[k] <- X */
                        ctx->flags |= SEEN_MEM | SEEN_X;
                        emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
                        break;
-               case BPF_S_ALU_ADD_K:
+               case BPF_ALU | BPF_ADD | BPF_K:
                        /* A += K */
                        ctx->flags |= SEEN_A;
                        emit_addiu(r_A, r_A, k, ctx);
                        break;
-               case BPF_S_ALU_ADD_X:
+               case BPF_ALU | BPF_ADD | BPF_X:
                        /* A += X */
                        ctx->flags |= SEEN_A | SEEN_X;
                        emit_addu(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_SUB_K:
+               case BPF_ALU | BPF_SUB | BPF_K:
                        /* A -= K */
                        ctx->flags |= SEEN_A;
                        emit_addiu(r_A, r_A, -k, ctx);
                        break;
-               case BPF_S_ALU_SUB_X:
+               case BPF_ALU | BPF_SUB | BPF_X:
                        /* A -= X */
                        ctx->flags |= SEEN_A | SEEN_X;
                        emit_subu(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_MUL_K:
+               case BPF_ALU | BPF_MUL | BPF_K:
                        /* A *= K */
                        /* Load K to scratch register before MUL */
                        ctx->flags |= SEEN_A | SEEN_S0;
                        emit_load_imm(r_s0, k, ctx);
                        emit_mul(r_A, r_A, r_s0, ctx);
                        break;
-               case BPF_S_ALU_MUL_X:
+               case BPF_ALU | BPF_MUL | BPF_X:
                        /* A *= X */
-                       update_on_xread(ctx);
                        ctx->flags |= SEEN_A | SEEN_X;
                        emit_mul(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_DIV_K:
+               case BPF_ALU | BPF_DIV | BPF_K:
                        /* A /= k */
                        if (k == 1)
                                break;
@@ -973,7 +1003,7 @@ load_ind:
                        emit_load_imm(r_s0, k, ctx);
                        emit_div(r_A, r_s0, ctx);
                        break;
-               case BPF_S_ALU_MOD_K:
+               case BPF_ALU | BPF_MOD | BPF_K:
                        /* A %= k */
                        if (k == 1 || optimize_div(&k)) {
                                ctx->flags |= SEEN_A;
@@ -984,9 +1014,8 @@ load_ind:
                                emit_mod(r_A, r_s0, ctx);
                        }
                        break;
-               case BPF_S_ALU_DIV_X:
+               case BPF_ALU | BPF_DIV | BPF_X:
                        /* A /= X */
-                       update_on_xread(ctx);
                        ctx->flags |= SEEN_X | SEEN_A;
                        /* Check if r_X is zero */
                        emit_bcond(MIPS_COND_EQ, r_X, r_zero,
@@ -994,9 +1023,8 @@ load_ind:
                        emit_load_imm(r_val, 0, ctx); /* delay slot */
                        emit_div(r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_MOD_X:
+               case BPF_ALU | BPF_MOD | BPF_X:
                        /* A %= X */
-                       update_on_xread(ctx);
                        ctx->flags |= SEEN_X | SEEN_A;
                        /* Check if r_X is zero */
                        emit_bcond(MIPS_COND_EQ, r_X, r_zero,
@@ -1004,94 +1032,89 @@ load_ind:
                        emit_load_imm(r_val, 0, ctx); /* delay slot */
                        emit_mod(r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_OR_K:
+               case BPF_ALU | BPF_OR | BPF_K:
                        /* A |= K */
                        ctx->flags |= SEEN_A;
                        emit_ori(r_A, r_A, k, ctx);
                        break;
-               case BPF_S_ALU_OR_X:
+               case BPF_ALU | BPF_OR | BPF_X:
                        /* A |= X */
-                       update_on_xread(ctx);
                        ctx->flags |= SEEN_A;
                        emit_ori(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_XOR_K:
+               case BPF_ALU | BPF_XOR | BPF_K:
                        /* A ^= k */
                        ctx->flags |= SEEN_A;
                        emit_xori(r_A, r_A, k, ctx);
                        break;
-               case BPF_S_ANC_ALU_XOR_X:
-               case BPF_S_ALU_XOR_X:
+               case BPF_ANC | SKF_AD_ALU_XOR_X:
+               case BPF_ALU | BPF_XOR | BPF_X:
                        /* A ^= X */
-                       update_on_xread(ctx);
                        ctx->flags |= SEEN_A;
                        emit_xor(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_AND_K:
+               case BPF_ALU | BPF_AND | BPF_K:
                        /* A &= K */
                        ctx->flags |= SEEN_A;
                        emit_andi(r_A, r_A, k, ctx);
                        break;
-               case BPF_S_ALU_AND_X:
+               case BPF_ALU | BPF_AND | BPF_X:
                        /* A &= X */
-                       update_on_xread(ctx);
                        ctx->flags |= SEEN_A | SEEN_X;
                        emit_and(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_LSH_K:
+               case BPF_ALU | BPF_LSH | BPF_K:
                        /* A <<= K */
                        ctx->flags |= SEEN_A;
                        emit_sll(r_A, r_A, k, ctx);
                        break;
-               case BPF_S_ALU_LSH_X:
+               case BPF_ALU | BPF_LSH | BPF_X:
                        /* A <<= X */
                        ctx->flags |= SEEN_A | SEEN_X;
-                       update_on_xread(ctx);
                        emit_sllv(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_RSH_K:
+               case BPF_ALU | BPF_RSH | BPF_K:
                        /* A >>= K */
                        ctx->flags |= SEEN_A;
                        emit_srl(r_A, r_A, k, ctx);
                        break;
-               case BPF_S_ALU_RSH_X:
+               case BPF_ALU | BPF_RSH | BPF_X:
                        ctx->flags |= SEEN_A | SEEN_X;
-                       update_on_xread(ctx);
                        emit_srlv(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_NEG:
+               case BPF_ALU | BPF_NEG:
                        /* A = -A */
                        ctx->flags |= SEEN_A;
                        emit_neg(r_A, ctx);
                        break;
-               case BPF_S_JMP_JA:
+               case BPF_JMP | BPF_JA:
                        /* pc += K */
                        emit_b(b_imm(i + k + 1, ctx), ctx);
                        emit_nop(ctx);
                        break;
-               case BPF_S_JMP_JEQ_K:
+               case BPF_JMP | BPF_JEQ | BPF_K:
                        /* pc += ( A == K ) ? pc->jt : pc->jf */
                        condt = MIPS_COND_EQ | MIPS_COND_K;
                        goto jmp_cmp;
-               case BPF_S_JMP_JEQ_X:
+               case BPF_JMP | BPF_JEQ | BPF_X:
                        ctx->flags |= SEEN_X;
                        /* pc += ( A == X ) ? pc->jt : pc->jf */
                        condt = MIPS_COND_EQ | MIPS_COND_X;
                        goto jmp_cmp;
-               case BPF_S_JMP_JGE_K:
+               case BPF_JMP | BPF_JGE | BPF_K:
                        /* pc += ( A >= K ) ? pc->jt : pc->jf */
                        condt = MIPS_COND_GE | MIPS_COND_K;
                        goto jmp_cmp;
-               case BPF_S_JMP_JGE_X:
+               case BPF_JMP | BPF_JGE | BPF_X:
                        ctx->flags |= SEEN_X;
                        /* pc += ( A >= X ) ? pc->jt : pc->jf */
                        condt = MIPS_COND_GE | MIPS_COND_X;
                        goto jmp_cmp;
-               case BPF_S_JMP_JGT_K:
+               case BPF_JMP | BPF_JGT | BPF_K:
                        /* pc += ( A > K ) ? pc->jt : pc->jf */
                        condt = MIPS_COND_GT | MIPS_COND_K;
                        goto jmp_cmp;
-               case BPF_S_JMP_JGT_X:
+               case BPF_JMP | BPF_JGT | BPF_X:
                        ctx->flags |= SEEN_X;
                        /* pc += ( A > X ) ? pc->jt : pc->jf */
                        condt = MIPS_COND_GT | MIPS_COND_X;
@@ -1109,7 +1132,7 @@ jmp_cmp:
                                }
                                /* A < (K|X) ? r_scrach = 1 */
                                b_off = b_imm(i + inst->jf + 1, ctx);
-                               emit_bcond(MIPS_COND_GT, r_s0, r_zero, b_off,
+                               emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
                                           ctx);
                                emit_nop(ctx);
                                /* A > (K|X) ? scratch = 0 */
@@ -1167,7 +1190,7 @@ jmp_cmp:
                                }
                        }
                        break;
-               case BPF_S_JMP_JSET_K:
+               case BPF_JMP | BPF_JSET | BPF_K:
                        ctx->flags |= SEEN_S0 | SEEN_S1 | SEEN_A;
                        /* pc += (A & K) ? pc -> jt : pc -> jf */
                        emit_load_imm(r_s1, k, ctx);
@@ -1181,7 +1204,7 @@ jmp_cmp:
                        emit_b(b_off, ctx);
                        emit_nop(ctx);
                        break;
-               case BPF_S_JMP_JSET_X:
+               case BPF_JMP | BPF_JSET | BPF_X:
                        ctx->flags |= SEEN_S0 | SEEN_X | SEEN_A;
                        /* pc += (A & X) ? pc -> jt : pc -> jf */
                        emit_and(r_s0, r_A, r_X, ctx);
@@ -1194,7 +1217,7 @@ jmp_cmp:
                        emit_b(b_off, ctx);
                        emit_nop(ctx);
                        break;
-               case BPF_S_RET_A:
+               case BPF_RET | BPF_A:
                        ctx->flags |= SEEN_A;
                        if (i != prog->len - 1)
                                /*
@@ -1204,7 +1227,7 @@ jmp_cmp:
                                emit_b(b_imm(prog->len, ctx), ctx);
                        emit_reg_move(r_ret, r_A, ctx); /* delay slot */
                        break;
-               case BPF_S_RET_K:
+               case BPF_RET | BPF_K:
                        /*
                         * It can emit two instructions so it does not fit on
                         * the delay slot.
@@ -1219,19 +1242,18 @@ jmp_cmp:
                                emit_nop(ctx);
                        }
                        break;
-               case BPF_S_MISC_TAX:
+               case BPF_MISC | BPF_TAX:
                        /* X = A */
                        ctx->flags |= SEEN_X | SEEN_A;
                        emit_jit_reg_move(r_X, r_A, ctx);
                        break;
-               case BPF_S_MISC_TXA:
+               case BPF_MISC | BPF_TXA:
                        /* A = X */
                        ctx->flags |= SEEN_A | SEEN_X;
-                       update_on_xread(ctx);
                        emit_jit_reg_move(r_A, r_X, ctx);
                        break;
                /* AUX */
-               case BPF_S_ANC_PROTOCOL:
+               case BPF_ANC | SKF_AD_PROTOCOL:
                        /* A = ntohs(skb->protocol */
                        ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -1256,7 +1278,7 @@ jmp_cmp:
                        }
 #endif
                        break;
-               case BPF_S_ANC_CPU:
+               case BPF_ANC | SKF_AD_CPU:
                        ctx->flags |= SEEN_A | SEEN_OFF;
                        /* A = current_thread_info()->cpu */
                        BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
@@ -1265,11 +1287,12 @@ jmp_cmp:
                        /* $28/gp points to the thread_info struct */
                        emit_load(r_A, 28, off, ctx);
                        break;
-               case BPF_S_ANC_IFINDEX:
+               case BPF_ANC | SKF_AD_IFINDEX:
                        /* A = skb->dev->ifindex */
                        ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0;
                        off = offsetof(struct sk_buff, dev);
-                       emit_load(r_s0, r_skb, off, ctx);
+                       /* Load *dev pointer */
+                       emit_load_ptr(r_s0, r_skb, off, ctx);
                        /* error (0) in the delay slot */
                        emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
                                   b_imm(prog->len, ctx), ctx);
@@ -1279,31 +1302,36 @@ jmp_cmp:
                        off = offsetof(struct net_device, ifindex);
                        emit_load(r_A, r_s0, off, ctx);
                        break;
-               case BPF_S_ANC_MARK:
+               case BPF_ANC | SKF_AD_MARK:
                        ctx->flags |= SEEN_SKB | SEEN_A;
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
                        off = offsetof(struct sk_buff, mark);
                        emit_load(r_A, r_skb, off, ctx);
                        break;
-               case BPF_S_ANC_RXHASH:
+               case BPF_ANC | SKF_AD_RXHASH:
                        ctx->flags |= SEEN_SKB | SEEN_A;
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
                        off = offsetof(struct sk_buff, hash);
                        emit_load(r_A, r_skb, off, ctx);
                        break;
-               case BPF_S_ANC_VLAN_TAG:
-               case BPF_S_ANC_VLAN_TAG_PRESENT:
+               case BPF_ANC | SKF_AD_VLAN_TAG:
+               case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
                        ctx->flags |= SEEN_SKB | SEEN_S0 | SEEN_A;
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
                                                  vlan_tci) != 2);
                        off = offsetof(struct sk_buff, vlan_tci);
                        emit_half_load(r_s0, r_skb, off, ctx);
-                       if (inst->code == BPF_S_ANC_VLAN_TAG)
-                               emit_and(r_A, r_s0, VLAN_VID_MASK, ctx);
-                       else
-                               emit_and(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
+                       if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
+                               emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
+                       } else {
+                               emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
+                               /* return 1 if present */
+                               emit_sltu(r_A, r_zero, r_A, ctx);
+                       }
                        break;
-               case BPF_S_ANC_PKTTYPE:
+               case BPF_ANC | SKF_AD_PKTTYPE:
+                       ctx->flags |= SEEN_SKB;
+
                        off = pkt_type_offset();
 
                        if (off < 0)
@@ -1311,8 +1339,12 @@ jmp_cmp:
                        emit_load_byte(r_tmp, r_skb, off, ctx);
                        /* Keep only the last 3 bits */
                        emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
+#ifdef __BIG_ENDIAN_BITFIELD
+                       /* Get the actual packet type to the lower 3 bits */
+                       emit_srl(r_A, r_A, 5, ctx);
+#endif
                        break;
-               case BPF_S_ANC_QUEUE:
+               case BPF_ANC | SKF_AD_QUEUE:
                        ctx->flags |= SEEN_SKB | SEEN_A;
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
                                                  queue_mapping) != 2);
@@ -1322,8 +1354,8 @@ jmp_cmp:
                        emit_half_load(r_A, r_skb, off, ctx);
                        break;
                default:
-                       pr_warn("%s: Unhandled opcode: 0x%02x\n", __FILE__,
-                               inst->code);
+                       pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
+                                inst->code);
                        return -1;
                }
        }
index 8b80b19..769d5ed 100644 (file)
@@ -68,7 +68,9 @@ extern struct mn10300_cpuinfo cpu_data[];
 extern void identify_cpu(struct mn10300_cpuinfo *);
 extern void print_cpu_info(struct mn10300_cpuinfo *);
 extern void dodgy_tsc(void);
+
 #define cpu_relax() barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 /*
  * User space process size: 1.75GB (default).
index cab746f..4d235e3 100644 (file)
@@ -101,6 +101,7 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
 #define init_stack      (init_thread_union.stack)
 
 #define cpu_relax()     barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_OPENRISC_PROCESSOR_H */
index 108d48e..6e75e20 100644 (file)
@@ -6,7 +6,6 @@ config PARISC
        select HAVE_OPROFILE
        select HAVE_FUNCTION_TRACER if 64BIT
        select HAVE_FUNCTION_GRAPH_TRACER if 64BIT
-       select HAVE_FUNCTION_TRACE_MCOUNT_TEST if 64BIT
        select ARCH_WANT_FRAME_POINTERS
        select RTC_CLASS
        select RTC_DRV_GENERIC
index d951c96..689a8ad 100644 (file)
@@ -338,6 +338,7 @@ extern unsigned long get_wchan(struct task_struct *p);
 #define KSTK_ESP(tsk)  ((tsk)->thread.regs.gr[30])
 
 #define cpu_relax()    barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 /* Used as a macro to identify the combined VIPT/PIPT cached
  * CPUs which require a guarantee of coherency (no inequivalent
index a2fa297..f5645d6 100644 (file)
@@ -69,8 +69,6 @@
 #define SA_NOMASK      SA_NODEFER
 #define SA_ONESHOT     SA_RESETHAND
 
-#define SA_RESTORER    0x04000000 /* obsolete -- ignored */
-
 #define MINSIGSTKSZ    2048
 #define SIGSTKSZ       8192
 
index 5beb97b..559d400 100644 (file)
@@ -112,6 +112,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
        unsigned long long calltime;
        struct ftrace_graph_ent trace;
 
+       if (unlikely(ftrace_graph_is_dead()))
+               return;
+
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return;
 
@@ -152,9 +155,6 @@ void ftrace_function_trampoline(unsigned long parent,
 {
        extern ftrace_func_t ftrace_trace_function;
 
-       if (function_trace_stop)
-               return;
-
        if (ftrace_trace_function != ftrace_stub) {
                ftrace_trace_function(parent, self_addr);
                return;
index 608716f..af3bc35 100644 (file)
@@ -1210,7 +1210,8 @@ static struct hp_hardware hp_hardware_list[] = {
        {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, 
        {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, 
        {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, 
-       {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"},
+       {HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"},
+       {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"},
        {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, 
        {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, 
        {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, 
index bb9f3b6..93c1963 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2000-2001 Hewlett Packard Company
  * Copyright (C) 2000 John Marvin
  * Copyright (C) 2001 Matthew Wilcox
+ * Copyright (C) 2014 Helge Deller <deller@gmx.de>
  *
  * These routines maintain argument size conversion between 32bit and 64bit
  * environment. Based heavily on sys_ia32.c and sys_sparc32.c.
 
 #include <linux/compat.h>
 #include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/fs.h> 
-#include <linux/mm.h> 
-#include <linux/file.h> 
-#include <linux/signal.h>
-#include <linux/resource.h>
-#include <linux/times.h>
-#include <linux/time.h>
-#include <linux/smp.h>
-#include <linux/sem.h>
-#include <linux/shm.h>
-#include <linux/slab.h>
-#include <linux/uio.h>
-#include <linux/ncp_fs.h>
-#include <linux/poll.h>
-#include <linux/personality.h>
-#include <linux/stat.h>
-#include <linux/highmem.h>
-#include <linux/highuid.h>
-#include <linux/mman.h>
-#include <linux/binfmts.h>
-#include <linux/namei.h>
-#include <linux/vfs.h>
-#include <linux/ptrace.h>
-#include <linux/swap.h>
 #include <linux/syscalls.h>
 
-#include <asm/types.h>
-#include <asm/uaccess.h>
-#include <asm/mmu_context.h>
-
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(x) printk x
-#else
-#define DBG(x)
-#endif
 
 asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
        int r22, int r21, int r20)
@@ -57,3 +22,12 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
        current->comm, current->pid, r20);
     return -ENOSYS;
 }
+
+asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
+       compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
+       const char  __user * pathname)
+{
+       return sys_fanotify_mark(fanotify_fd, flags,
+                       ((__u64)mask1 << 32) | mask0,
+                        dfd, pathname);
+}
index c5fa7a6..84c5d3a 100644 (file)
        ENTRY_SAME(accept4)             /* 320 */
        ENTRY_SAME(prlimit64)
        ENTRY_SAME(fanotify_init)
-       ENTRY_COMP(fanotify_mark)
+       ENTRY_DIFF(fanotify_mark)
        ENTRY_COMP(clock_adjtime)
        ENTRY_SAME(name_to_handle_at)   /* 325 */
        ENTRY_COMP(open_by_handle_at)
index ae085ad..0bef864 100644 (file)
@@ -728,7 +728,6 @@ static void __init pagetable_init(void)
 #endif
 
        empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
-       memset(empty_zero_page, 0, PAGE_SIZE);
 }
 
 static void __init gateway_init(void)
index bd6dd6e..80b94b0 100644 (file)
@@ -145,6 +145,7 @@ config PPC
        select HAVE_IRQ_EXIT_ON_IRQ_STACK
        select ARCH_USE_CMPXCHG_LOCKREF if PPC64
        select HAVE_ARCH_AUDITSYSCALL
+       select ARCH_SUPPORTS_ATOMIC_RMW
 
 config GENERIC_CSUM
        def_bool CPU_LITTLE_ENDIAN
@@ -414,7 +415,7 @@ config KEXEC
 config CRASH_DUMP
        bool "Build a kdump crash kernel"
        depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
-       select RELOCATABLE if PPC64 || 44x || FSL_BOOKE
+       select RELOCATABLE if (PPC64 && !COMPILE_TEST) || 44x || FSL_BOOKE
        help
          Build a kernel suitable for use as a kdump capture kernel.
          The same kernel binary can be used as production kernel and dump
@@ -1017,6 +1018,7 @@ endmenu
 if PPC64
 config RELOCATABLE
        bool "Build a relocatable kernel"
+       depends on !COMPILE_TEST
        select NONSTATIC_KERNEL
        help
          This builds a kernel image that is capable of running anywhere
index 790352f..35d16bd 100644 (file)
@@ -303,7 +303,6 @@ config PPC_EARLY_DEBUG_OPAL_VTERMNO
          This correspond to which /dev/hvcN you want to use for early
          debug.
 
-         On OPAL v1 (takeover) this should always be 0
          On OPAL v2, this will be 0 for network console and 1 or 2 for
          the machine built-in serial ports.
 
index f75b4f8..7d4a6a2 100644 (file)
@@ -32,7 +32,8 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-       compatible = "fsl,sec-v6.0";
+       compatible = "fsl,sec-v6.0", "fsl,sec-v5.0",
+                    "fsl,sec-v4.0";
        fsl,sec-era = <6>;
        #address-cells = <1>;
        #size-cells = <1>;
index 37991e1..840a550 100644 (file)
@@ -88,4 +88,15 @@ static inline unsigned long ppc_function_entry(void *func)
 #endif
 }
 
+static inline unsigned long ppc_global_function_entry(void *func)
+{
+#if defined(CONFIG_PPC64) && defined(_CALL_ELF) && _CALL_ELF == 2
+       /* PPC64 ABIv2 the global entry point is at the address */
+       return (unsigned long)func;
+#else
+       /* All other cases there is no change vs ppc_function_entry() */
+       return ppc_function_entry(func);
+#endif
+}
+
 #endif /* _ASM_POWERPC_CODE_PATCHING_H */
index bc23477..0fdd7ee 100644 (file)
@@ -447,6 +447,7 @@ extern const char *powerpc_base_platform;
            CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
            CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
 #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
+#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
 #define CPU_FTRS_CELL  (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
index fddb72b..d645428 100644 (file)
@@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
        return rb;
 }
 
-static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
+                                            bool is_base_size)
 {
+
        int size, a_psize;
        /* Look at the 8 bit LP value */
        unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
@@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
                                continue;
 
                        a_psize = __hpte_actual_psize(lp, size);
-                       if (a_psize != -1)
+                       if (a_psize != -1) {
+                               if (is_base_size)
+                                       return 1ul << mmu_psize_defs[size].shift;
                                return 1ul << mmu_psize_defs[a_psize].shift;
+                       }
                }
 
        }
        return 0;
 }
 
+static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+{
+       return __hpte_page_size(h, l, 0);
+}
+
+static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
+{
+       return __hpte_page_size(h, l, 1);
+}
+
 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
 {
        return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
index 807014d..c2b4dcf 100644 (file)
@@ -22,6 +22,7 @@
  */
 #include <asm/pgtable-ppc64.h>
 #include <asm/bug.h>
+#include <asm/processor.h>
 
 /*
  * Segment table
@@ -496,7 +497,7 @@ extern void slb_set_size(u16 size);
  */
 struct subpage_prot_table {
        unsigned long maxaddr;  /* only addresses < this are protected */
-       unsigned int **protptrs[2];
+       unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
        unsigned int *low_prot[4];
 };
 
index f8d1d6d..e61f24e 100644 (file)
@@ -19,8 +19,7 @@
 #define MMU_FTR_TYPE_40x               ASM_CONST(0x00000004)
 #define MMU_FTR_TYPE_44x               ASM_CONST(0x00000008)
 #define MMU_FTR_TYPE_FSL_E             ASM_CONST(0x00000010)
-#define MMU_FTR_TYPE_3E                        ASM_CONST(0x00000020)
-#define MMU_FTR_TYPE_47x               ASM_CONST(0x00000040)
+#define MMU_FTR_TYPE_47x               ASM_CONST(0x00000020)
 
 /*
  * This is individual features
                                MMU_FTR_CI_LARGE_PAGE
 #define MMU_FTRS_PA6T          MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
                                MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
-#define MMU_FTRS_A2            MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | \
-                               MMU_FTR_USE_TLBIVAX_BCAST | \
-                               MMU_FTR_LOCK_BCAST_INVAL | \
-                               MMU_FTR_USE_TLBRSRV | \
-                               MMU_FTR_USE_PAIRED_MAS | \
-                               MMU_FTR_TLBIEL | \
-                               MMU_FTR_16M_PAGE
 #ifndef __ASSEMBLY__
 #include <asm/cputable.h>
 
index 4600188..0da1dbd 100644 (file)
 #ifndef __OPAL_H
 #define __OPAL_H
 
-/****** Takeover interface ********/
-
-/* PAPR H-Call used to querty the HAL existence and/or instanciate
- * it from within pHyp (tech preview only).
- *
- * This is exclusively used in prom_init.c
- */
-
 #ifndef __ASSEMBLY__
-
-struct opal_takeover_args {
-       u64     k_image;                /* r4 */
-       u64     k_size;                 /* r5 */
-       u64     k_entry;                /* r6 */
-       u64     k_entry2;               /* r7 */
-       u64     hal_addr;               /* r8 */
-       u64     rd_image;               /* r9 */
-       u64     rd_size;                /* r10 */
-       u64     rd_loc;                 /* r11 */
-};
-
 /*
  * SG entry
  *
@@ -55,15 +35,6 @@ struct opal_sg_list {
 /* We calculate number of sg entries based on PAGE_SIZE */
 #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
 
-extern long opal_query_takeover(u64 *hal_size, u64 *hal_align);
-
-extern long opal_do_takeover(struct opal_takeover_args *args);
-
-struct rtas_args;
-extern int opal_enter_rtas(struct rtas_args *args,
-                          unsigned long data,
-                          unsigned long entry);
-
 #endif /* __ASSEMBLY__ */
 
 /****** OPAL APIs ******/
index 9ed7371..b3e9360 100644 (file)
@@ -61,8 +61,7 @@ struct power_pmu {
 #define PPMU_SIAR_VALID                0x00000010 /* Processor has SIAR Valid bit */
 #define PPMU_HAS_SSLOT         0x00000020 /* Has sampled slot in MMCRA */
 #define PPMU_HAS_SIER          0x00000040 /* Has SIER */
-#define PPMU_BHRB              0x00000080 /* has BHRB feature enabled */
-#define PPMU_EBB               0x00000100 /* supports event based branch */
+#define PPMU_ARCH_207S         0x00000080 /* PMC is architecture v2.07S */
 
 /*
  * Values for flags to get_alternatives()
index 9ea266e..7e46125 100644 (file)
@@ -277,6 +277,8 @@ n:
        .globl n;       \
 n:
 
+#define _GLOBAL_TOC(name) _GLOBAL(name)
+
 #define _KPROBE(n)     \
        .section ".kprobes.text","a";   \
        .globl  n;      \
index 6d59072..dda7ac4 100644 (file)
@@ -400,6 +400,8 @@ static inline unsigned long __pack_fe01(unsigned int fpmode)
 #define cpu_relax()    barrier()
 #endif
 
+#define cpu_relax_lowlatency() cpu_relax()
+
 /* Check that a certain kernel stack pointer is valid in task_struct p */
 int validate_sp(unsigned long sp, struct task_struct *p,
                        unsigned long nbytes);
index b9bd1ca..96f59de 100644 (file)
@@ -9,10 +9,6 @@
 
 #include <uapi/asm/swab.h>
 
-#ifdef __GNUC__
-#ifndef __powerpc64__
-#endif /* __powerpc64__ */
-
 static __inline__ __u16 ld_le16(const volatile __u16 *addr)
 {
        __u16 val;
@@ -20,19 +16,12 @@ static __inline__ __u16 ld_le16(const volatile __u16 *addr)
        __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
        return val;
 }
-#define __arch_swab16p ld_le16
 
 static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
 {
        __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
 }
 
-static inline void __arch_swab16s(__u16 *addr)
-{
-       st_le16(addr, *addr);
-}
-#define __arch_swab16s __arch_swab16s
-
 static __inline__ __u32 ld_le32(const volatile __u32 *addr)
 {
        __u32 val;
@@ -40,42 +29,10 @@ static __inline__ __u32 ld_le32(const volatile __u32 *addr)
        __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
        return val;
 }
-#define __arch_swab32p ld_le32
 
 static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
 {
        __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
 }
 
-static inline void __arch_swab32s(__u32 *addr)
-{
-       st_le32(addr, *addr);
-}
-#define __arch_swab32s __arch_swab32s
-
-static inline __attribute_const__ __u16 __arch_swab16(__u16 value)
-{
-       __u16 result;
-
-       __asm__("rlwimi %0,%1,8,16,23"
-           : "=r" (result)
-           : "r" (value), "0" (value >> 8));
-       return result;
-}
-#define __arch_swab16 __arch_swab16
-
-static inline __attribute_const__ __u32 __arch_swab32(__u32 value)
-{
-       __u32 result;
-
-       __asm__("rlwimi %0,%1,24,16,23\n\t"
-           "rlwimi %0,%1,8,8,15\n\t"
-           "rlwimi %0,%1,24,0,7"
-           : "=r" (result)
-           : "r" (value), "0" (value >> 24));
-       return result;
-}
-#define __arch_swab32 __arch_swab32
-
-#endif /* __GNUC__ */
 #endif /* _ASM_POWERPC_SWAB_H */
index 965291b..0c15764 100644 (file)
@@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .machine_check_early    = __machine_check_early_realmode_p8,
                .platform               = "power8",
        },
+       {       /* Power8 DD1: Does not support doorbell IPIs */
+               .pvr_mask               = 0xffffff00,
+               .pvr_value              = 0x004d0100,
+               .cpu_name               = "POWER8 (raw)",
+               .cpu_features           = CPU_FTRS_POWER8_DD1,
+               .cpu_user_features      = COMMON_USER_POWER8,
+               .cpu_user_features2     = COMMON_USER2_POWER8,
+               .mmu_features           = MMU_FTRS_POWER8,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 6,
+               .pmc_type               = PPC_PMC_IBM,
+               .oprofile_cpu_type      = "ppc64/power8",
+               .oprofile_type          = PPC_OPROFILE_INVALID,
+               .cpu_setup              = __setup_cpu_power8,
+               .cpu_restore            = __restore_cpu_power8,
+               .flush_tlb              = __flush_tlb_power8,
+               .machine_check_early    = __machine_check_early_realmode_p8,
+               .platform               = "power8",
+       },
        {       /* Power8 */
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x004d0000,
index f202d07..390311c 100644 (file)
@@ -10,6 +10,8 @@
  *
  */
 
+#define pr_fmt(fmt) "ftrace-powerpc: " fmt
+
 #include <linux/spinlock.h>
 #include <linux/hardirq.h>
 #include <linux/uaccess.h>
@@ -105,7 +107,7 @@ __ftrace_make_nop(struct module *mod,
                  struct dyn_ftrace *rec, unsigned long addr)
 {
        unsigned int op;
-       unsigned long ptr;
+       unsigned long entry, ptr;
        unsigned long ip = rec->ip;
        void *tramp;
 
@@ -115,7 +117,7 @@ __ftrace_make_nop(struct module *mod,
 
        /* Make sure that that this is still a 24bit jump */
        if (!is_bl_op(op)) {
-               printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
+               pr_err("Not expected bl: opcode is %x\n", op);
                return -EINVAL;
        }
 
@@ -125,21 +127,21 @@ __ftrace_make_nop(struct module *mod,
        pr_devel("ip:%lx jumps to %p", ip, tramp);
 
        if (!is_module_trampoline(tramp)) {
-               printk(KERN_ERR "Not a trampoline\n");
+               pr_err("Not a trampoline\n");
                return -EINVAL;
        }
 
        if (module_trampoline_target(mod, tramp, &ptr)) {
-               printk(KERN_ERR "Failed to get trampoline target\n");
+               pr_err("Failed to get trampoline target\n");
                return -EFAULT;
        }
 
        pr_devel("trampoline target %lx", ptr);
 
+       entry = ppc_global_function_entry((void *)addr);
        /* This should match what was called */
-       if (ptr != ppc_function_entry((void *)addr)) {
-               printk(KERN_ERR "addr %lx does not match expected %lx\n",
-                       ptr, ppc_function_entry((void *)addr));
+       if (ptr != entry) {
+               pr_err("addr %lx does not match expected %lx\n", ptr, entry);
                return -EINVAL;
        }
 
@@ -179,7 +181,7 @@ __ftrace_make_nop(struct module *mod,
 
        /* Make sure that that this is still a 24bit jump */
        if (!is_bl_op(op)) {
-               printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
+               pr_err("Not expected bl: opcode is %x\n", op);
                return -EINVAL;
        }
 
@@ -198,7 +200,7 @@ __ftrace_make_nop(struct module *mod,
 
        /* Find where the trampoline jumps to */
        if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
-               printk(KERN_ERR "Failed to read %lx\n", tramp);
+               pr_err("Failed to read %lx\n", tramp);
                return -EFAULT;
        }
 
@@ -209,7 +211,7 @@ __ftrace_make_nop(struct module *mod,
            ((jmp[1] & 0xffff0000) != 0x398c0000) ||
            (jmp[2] != 0x7d8903a6) ||
            (jmp[3] != 0x4e800420)) {
-               printk(KERN_ERR "Not a trampoline\n");
+               pr_err("Not a trampoline\n");
                return -EINVAL;
        }
 
@@ -221,8 +223,7 @@ __ftrace_make_nop(struct module *mod,
        pr_devel(" %lx ", tramp);
 
        if (tramp != addr) {
-               printk(KERN_ERR
-                      "Trampoline location %08lx does not match addr\n",
+               pr_err("Trampoline location %08lx does not match addr\n",
                       tramp);
                return -EINVAL;
        }
@@ -263,15 +264,13 @@ int ftrace_make_nop(struct module *mod,
         */
        if (!rec->arch.mod) {
                if (!mod) {
-                       printk(KERN_ERR "No module loaded addr=%lx\n",
-                              addr);
+                       pr_err("No module loaded addr=%lx\n", addr);
                        return -EFAULT;
                }
                rec->arch.mod = mod;
        } else if (mod) {
                if (mod != rec->arch.mod) {
-                       printk(KERN_ERR
-                              "Record mod %p not equal to passed in mod %p\n",
+                       pr_err("Record mod %p not equal to passed in mod %p\n",
                               rec->arch.mod, mod);
                        return -EINVAL;
                }
@@ -307,26 +306,25 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
         * The load offset is different depending on the ABI. For simplicity
         * just mask it out when doing the compare.
         */
-       if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) {
-               printk(KERN_ERR "Unexpected call sequence: %x %x\n",
-                       op[0], op[1]);
+       if ((op[0] != 0x48000008) || ((op[1] & 0xffff0000) != 0xe8410000)) {
+               pr_err("Unexpected call sequence: %x %x\n", op[0], op[1]);
                return -EINVAL;
        }
 
        /* If we never set up a trampoline to ftrace_caller, then bail */
        if (!rec->arch.mod->arch.tramp) {
-               printk(KERN_ERR "No ftrace trampoline\n");
+               pr_err("No ftrace trampoline\n");
                return -EINVAL;
        }
 
        /* Ensure branch is within 24 bits */
-       if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
-               printk(KERN_ERR "Branch out of range");
+       if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
+               pr_err("Branch out of range\n");
                return -EINVAL;
        }
 
        if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
-               printk(KERN_ERR "REL24 out of range!\n");
+               pr_err("REL24 out of range!\n");
                return -EINVAL;
        }
 
@@ -345,13 +343,13 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
        /* It should be pointing to a nop */
        if (op != PPC_INST_NOP) {
-               printk(KERN_ERR "Expected NOP but have %x\n", op);
+               pr_err("Expected NOP but have %x\n", op);
                return -EINVAL;
        }
 
        /* If we never set up a trampoline to ftrace_caller, then bail */
        if (!rec->arch.mod->arch.tramp) {
-               printk(KERN_ERR "No ftrace trampoline\n");
+               pr_err("No ftrace trampoline\n");
                return -EINVAL;
        }
 
@@ -359,7 +357,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
        op = create_branch((unsigned int *)ip,
                           rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
        if (!op) {
-               printk(KERN_ERR "REL24 out of range!\n");
+               pr_err("REL24 out of range!\n");
                return -EINVAL;
        }
 
@@ -397,7 +395,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
         * already have a module defined.
         */
        if (!rec->arch.mod) {
-               printk(KERN_ERR "No module loaded\n");
+               pr_err("No module loaded\n");
                return -EINVAL;
        }
 
@@ -527,6 +525,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
        struct ftrace_graph_ent trace;
        unsigned long return_hooker = (unsigned long)&return_to_handler;
 
+       if (unlikely(ftrace_graph_is_dead()))
+               return;
+
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return;
 
index 2480256..5cf3d36 100644 (file)
@@ -131,7 +131,7 @@ _GLOBAL(power7_nap)
 
 _GLOBAL(power7_sleep)
        li      r3,1
-       li      r4,0
+       li      r4,1
        b       power7_powersave_common
        /* No return */
 
index b82227e..12e48d5 100644 (file)
@@ -23,7 +23,7 @@ unsigned int ioread16(void __iomem *addr)
 }
 unsigned int ioread16be(void __iomem *addr)
 {
-       return in_be16(addr);
+       return readw_be(addr);
 }
 unsigned int ioread32(void __iomem *addr)
 {
@@ -31,7 +31,7 @@ unsigned int ioread32(void __iomem *addr)
 }
 unsigned int ioread32be(void __iomem *addr)
 {
-       return in_be32(addr);
+       return readl_be(addr);
 }
 EXPORT_SYMBOL(ioread8);
 EXPORT_SYMBOL(ioread16);
@@ -49,7 +49,7 @@ void iowrite16(u16 val, void __iomem *addr)
 }
 void iowrite16be(u16 val, void __iomem *addr)
 {
-       out_be16(addr, val);
+       writew_be(val, addr);
 }
 void iowrite32(u32 val, void __iomem *addr)
 {
@@ -57,7 +57,7 @@ void iowrite32(u32 val, void __iomem *addr)
 }
 void iowrite32be(u32 val, void __iomem *addr)
 {
-       out_be32(addr, val);
+       writel_be(val, addr);
 }
 EXPORT_SYMBOL(iowrite8);
 EXPORT_SYMBOL(iowrite16);
@@ -75,15 +75,15 @@ EXPORT_SYMBOL(iowrite32be);
  */
 void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
 {
-       _insb((u8 __iomem *) addr, dst, count);
+       readsb(addr, dst, count);
 }
 void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
 {
-       _insw_ns((u16 __iomem *) addr, dst, count);
+       readsw(addr, dst, count);
 }
 void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
 {
-       _insl_ns((u32 __iomem *) addr, dst, count);
+       readsl(addr, dst, count);
 }
 EXPORT_SYMBOL(ioread8_rep);
 EXPORT_SYMBOL(ioread16_rep);
@@ -91,15 +91,15 @@ EXPORT_SYMBOL(ioread32_rep);
 
 void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
 {
-       _outsb((u8 __iomem *) addr, src, count);
+       writesb(addr, src, count);
 }
 void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
 {
-       _outsw_ns((u16 __iomem *) addr, src, count);
+       writesw(addr, src, count);
 }
 void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
 {
-       _outsl_ns((u32 __iomem *) addr, src, count);
+       writesl(addr, src, count);
 }
 EXPORT_SYMBOL(iowrite8_rep);
 EXPORT_SYMBOL(iowrite16_rep);
index 90fab64..2f72af8 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/kdebug.h>
 #include <linux/slab.h>
+#include <asm/code-patching.h>
 #include <asm/cacheflush.h>
 #include <asm/sstep.h>
 #include <asm/uaccess.h>
@@ -491,12 +492,10 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
        return ret;
 }
 
-#ifdef CONFIG_PPC64
 unsigned long arch_deref_entry_point(void *entry)
 {
-       return ((func_descr_t *)entry)->entry;
+       return ppc_global_function_entry(entry);
 }
-#endif
 
 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 {
@@ -508,7 +507,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
        /* setup return addr to the jprobe handler routine */
        regs->nip = arch_deref_entry_point(jp->entry);
 #ifdef CONFIG_PPC64
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+       regs->gpr[12] = (unsigned long)jp->entry;
+#else
        regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
+#endif
 #endif
 
        return 1;
index 077d2ce..d807ee6 100644 (file)
@@ -315,8 +315,17 @@ static void dedotify_versions(struct modversion_info *vers,
        struct modversion_info *end;
 
        for (end = (void *)vers + size; vers < end; vers++)
-               if (vers->name[0] == '.')
+               if (vers->name[0] == '.') {
                        memmove(vers->name, vers->name+1, strlen(vers->name));
+#ifdef ARCH_RELOCATES_KCRCTAB
+                       /* The TOC symbol has no CRC computed. To avoid CRC
+                        * check failing, we must force it to the expected
+                        * value (see CRC check in module.c).
+                        */
+                       if (!strcmp(vers->name, "TOC."))
+                               vers->crc = -(unsigned long)reloc_start;
+#endif
+               }
 }
 
 /* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */
index b49c72f..b2814e2 100644 (file)
@@ -123,21 +123,12 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus,
 
 void pcibios_reset_secondary_bus(struct pci_dev *dev)
 {
-       u16 ctrl;
-
        if (ppc_md.pcibios_reset_secondary_bus) {
                ppc_md.pcibios_reset_secondary_bus(dev);
                return;
        }
 
-       pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
-       ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
-       pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
-       msleep(2);
-
-       ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
-       pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
-       ssleep(1);
+       pci_reset_secondary_bus(dev);
 }
 
 static resource_size_t pcibios_io_size(const struct pci_controller *hose)
index 613a860..b694b07 100644 (file)
@@ -662,13 +662,6 @@ void __init early_init_devtree(void *params)
        of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
 #endif
 
-       /* Pre-initialize the cmd_line with the content of boot_commmand_line,
-        * which will be empty except when the content of the variable has
-        * been overriden by a bootloading mechanism. This happens typically
-        * with HAL takeover
-        */
-       strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
-
        /* Retrieve various informations from the /chosen node of the
         * device-tree, including the platform type, initrd location and
         * size, TCE reserve, and more ...
index 078145a..1a85d8f 100644 (file)
@@ -1268,201 +1268,6 @@ static u64 __initdata prom_opal_base;
 static u64 __initdata prom_opal_entry;
 #endif
 
-#ifdef __BIG_ENDIAN__
-/* XXX Don't change this structure without updating opal-takeover.S */
-static struct opal_secondary_data {
-       s64                             ack;    /*  0 */
-       u64                             go;     /*  8 */
-       struct opal_takeover_args       args;   /* 16 */
-} opal_secondary_data;
-
-static u64 __initdata prom_opal_align;
-static u64 __initdata prom_opal_size;
-static int __initdata prom_rtas_start_cpu;
-static u64 __initdata prom_rtas_data;
-static u64 __initdata prom_rtas_entry;
-
-extern char opal_secondary_entry;
-
-static void __init prom_query_opal(void)
-{
-       long rc;
-
-       /* We must not query for OPAL presence on a machine that
-        * supports TNK takeover (970 blades), as this uses the same
-        * h-call with different arguments and will crash
-        */
-       if (PHANDLE_VALID(call_prom("finddevice", 1, 1,
-                                   ADDR("/tnk-memory-map")))) {
-               prom_printf("TNK takeover detected, skipping OPAL check\n");
-               return;
-       }
-
-       prom_printf("Querying for OPAL presence... ");
-
-       rc = opal_query_takeover(&prom_opal_size,
-                                &prom_opal_align);
-       prom_debug("(rc = %ld) ", rc);
-       if (rc != 0) {
-               prom_printf("not there.\n");
-               return;
-       }
-       of_platform = PLATFORM_OPAL;
-       prom_printf(" there !\n");
-       prom_debug("  opal_size  = 0x%lx\n", prom_opal_size);
-       prom_debug("  opal_align = 0x%lx\n", prom_opal_align);
-       if (prom_opal_align < 0x10000)
-               prom_opal_align = 0x10000;
-}
-
-static int __init prom_rtas_call(int token, int nargs, int nret,
-                                int *outputs, ...)
-{
-       struct rtas_args rtas_args;
-       va_list list;
-       int i;
-
-       rtas_args.token = token;
-       rtas_args.nargs = nargs;
-       rtas_args.nret  = nret;
-       rtas_args.rets  = (rtas_arg_t *)&(rtas_args.args[nargs]);
-       va_start(list, outputs);
-       for (i = 0; i < nargs; ++i)
-               rtas_args.args[i] = va_arg(list, rtas_arg_t);
-       va_end(list);
-
-       for (i = 0; i < nret; ++i)
-               rtas_args.rets[i] = 0;
-
-       opal_enter_rtas(&rtas_args, prom_rtas_data,
-                       prom_rtas_entry);
-
-       if (nret > 1 && outputs != NULL)
-               for (i = 0; i < nret-1; ++i)
-                       outputs[i] = rtas_args.rets[i+1];
-       return (nret > 0)? rtas_args.rets[0]: 0;
-}
-
-static void __init prom_opal_hold_cpus(void)
-{
-       int i, cnt, cpu, rc;
-       long j;
-       phandle node;
-       char type[64];
-       u32 servers[8];
-       void *entry = (unsigned long *)&opal_secondary_entry;
-       struct opal_secondary_data *data = &opal_secondary_data;
-
-       prom_debug("prom_opal_hold_cpus: start...\n");
-       prom_debug("    - entry       = 0x%x\n", entry);
-       prom_debug("    - data        = 0x%x\n", data);
-
-       data->ack = -1;
-       data->go = 0;
-
-       /* look for cpus */
-       for (node = 0; prom_next_node(&node); ) {
-               type[0] = 0;
-               prom_getprop(node, "device_type", type, sizeof(type));
-               if (strcmp(type, "cpu") != 0)
-                       continue;
-
-               /* Skip non-configured cpus. */
-               if (prom_getprop(node, "status", type, sizeof(type)) > 0)
-                       if (strcmp(type, "okay") != 0)
-                               continue;
-
-               cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers,
-                            sizeof(servers));
-               if (cnt == PROM_ERROR)
-                       break;
-               cnt >>= 2;
-               for (i = 0; i < cnt; i++) {
-                       cpu = servers[i];
-                       prom_debug("CPU %d ... ", cpu);
-                       if (cpu == prom.cpu) {
-                               prom_debug("booted !\n");
-                               continue;
-                       }
-                       prom_debug("starting ... ");
-
-                       /* Init the acknowledge var which will be reset by
-                        * the secondary cpu when it awakens from its OF
-                        * spinloop.
-                        */
-                       data->ack = -1;
-                       rc = prom_rtas_call(prom_rtas_start_cpu, 3, 1,
-                                           NULL, cpu, entry, data);
-                       prom_debug("rtas rc=%d ...", rc);
-
-                       for (j = 0; j < 100000000 && data->ack == -1; j++) {
-                               HMT_low();
-                               mb();
-                       }
-                       HMT_medium();
-                       if (data->ack != -1)
-                               prom_debug("done, PIR=0x%x\n", data->ack);
-                       else
-                               prom_debug("timeout !\n");
-               }
-       }
-       prom_debug("prom_opal_hold_cpus: end...\n");
-}
-
-static void __init prom_opal_takeover(void)
-{
-       struct opal_secondary_data *data = &opal_secondary_data;
-       struct opal_takeover_args *args = &data->args;
-       u64 align = prom_opal_align;
-       u64 top_addr, opal_addr;
-
-       args->k_image   = (u64)_stext;
-       args->k_size    = _end - _stext;
-       args->k_entry   = 0;
-       args->k_entry2  = 0x60;
-
-       top_addr = _ALIGN_UP(args->k_size, align);
-
-       if (prom_initrd_start != 0) {
-               args->rd_image = prom_initrd_start;
-               args->rd_size = prom_initrd_end - args->rd_image;
-               args->rd_loc = top_addr;
-               top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align);
-       }
-
-       /* Pickup an address for the HAL. We want to go really high
-        * up to avoid problem with future kexecs. On the other hand
-        * we don't want to be all over the TCEs on P5IOC2 machines
-        * which are going to be up there too. We assume the machine
-        * has plenty of memory, and we ask for the HAL for now to
-        * be just below the 1G point, or above the initrd
-        */
-       opal_addr = _ALIGN_DOWN(0x40000000 - prom_opal_size, align);
-       if (opal_addr < top_addr)
-               opal_addr = top_addr;
-       args->hal_addr = opal_addr;
-
-       /* Copy the command line to the kernel image */
-       strlcpy(boot_command_line, prom_cmd_line,
-               COMMAND_LINE_SIZE);
-
-       prom_debug("  k_image    = 0x%lx\n", args->k_image);
-       prom_debug("  k_size     = 0x%lx\n", args->k_size);
-       prom_debug("  k_entry    = 0x%lx\n", args->k_entry);
-       prom_debug("  k_entry2   = 0x%lx\n", args->k_entry2);
-       prom_debug("  hal_addr   = 0x%lx\n", args->hal_addr);
-       prom_debug("  rd_image   = 0x%lx\n", args->rd_image);
-       prom_debug("  rd_size    = 0x%lx\n", args->rd_size);
-       prom_debug("  rd_loc     = 0x%lx\n", args->rd_loc);
-       prom_printf("Performing OPAL takeover,this can take a few minutes..\n");
-       prom_close_stdin();
-       mb();
-       data->go = 1;
-       for (;;)
-               opal_do_takeover(args);
-}
-#endif /* __BIG_ENDIAN__ */
-
 /*
  * Allocate room for and instantiate OPAL
  */
@@ -1597,12 +1402,6 @@ static void __init prom_instantiate_rtas(void)
                         &val, sizeof(val)) != PROM_ERROR)
                rtas_has_query_cpu_stopped = true;
 
-#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
-       /* PowerVN takeover hack */
-       prom_rtas_data = base;
-       prom_rtas_entry = entry;
-       prom_getprop(rtas_node, "start-cpu", &prom_rtas_start_cpu, 4);
-#endif
        prom_debug("rtas base     = 0x%x\n", base);
        prom_debug("rtas entry    = 0x%x\n", entry);
        prom_debug("rtas size     = 0x%x\n", (long)size);
@@ -3027,16 +2826,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
                prom_instantiate_rtas();
 
 #ifdef CONFIG_PPC_POWERNV
-#ifdef __BIG_ENDIAN__
-       /* Detect HAL and try instanciating it & doing takeover */
-       if (of_platform == PLATFORM_PSERIES_LPAR) {
-               prom_query_opal();
-               if (of_platform == PLATFORM_OPAL) {
-                       prom_opal_hold_cpus();
-                       prom_opal_takeover();
-               }
-       } else
-#endif /* __BIG_ENDIAN__ */
        if (of_platform == PLATFORM_OPAL)
                prom_instantiate_opal();
 #endif /* CONFIG_PPC_POWERNV */
index 77aa1e9..fe8e54b 100644 (file)
@@ -21,9 +21,7 @@ _end enter_prom memcpy memset reloc_offset __secondary_hold
 __secondary_hold_acknowledge __secondary_hold_spinloop __start
 strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
 reloc_got2 kernstart_addr memstart_addr linux_banner _stext
-opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry
-boot_command_line __prom_init_toc_start __prom_init_toc_end
-btext_setup_display TOC."
+__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
 
 NM="$1"
 OBJ="$2"
index 658e89d..db2b482 100644 (file)
@@ -611,17 +611,19 @@ static void rtas_flash_firmware(int reboot_type)
        for (f = flist; f; f = next) {
                /* Translate data addrs to absolute */
                for (i = 0; i < f->num_blocks; i++) {
-                       f->blocks[i].data = (char *)__pa(f->blocks[i].data);
+                       f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data));
                        image_size += f->blocks[i].length;
+                       f->blocks[i].length = cpu_to_be64(f->blocks[i].length);
                }
                next = f->next;
                /* Don't translate NULL pointer for last entry */
                if (f->next)
-                       f->next = (struct flash_block_list *)__pa(f->next);
+                       f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next));
                else
                        f->next = NULL;
                /* make num_blocks into the version/length field */
                f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
+               f->num_blocks = cpu_to_be64(f->num_blocks);
        }
 
        printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
index e239df3..e5b022c 100644 (file)
@@ -469,9 +469,17 @@ void __init smp_setup_cpu_maps(void)
                }
 
                for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
+                       bool avail;
+
                        DBG("    thread %d -> cpu %d (hard id %d)\n",
                            j, cpu, be32_to_cpu(intserv[j]));
-                       set_cpu_present(cpu, of_device_is_available(dn));
+
+                       avail = of_device_is_available(dn);
+                       if (!avail)
+                               avail = !of_property_match_string(dn,
+                                               "enable-method", "spin-table");
+
+                       set_cpu_present(cpu, avail);
                        set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
                        set_cpu_possible(cpu, true);
                        cpu++;
index 4e47db6..1bc5a17 100644 (file)
@@ -54,7 +54,6 @@
 
 #include "signal.h"
 
-#undef DEBUG_SIG
 
 #ifdef CONFIG_PPC64
 #define sys_rt_sigreturn       compat_sys_rt_sigreturn
@@ -1063,10 +1062,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
        return 1;
 
 badframe:
-#ifdef DEBUG_SIG
-       printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
-              regs, frame, newsp);
-#endif
        if (show_unhandled_signals)
                printk_ratelimited(KERN_INFO
                                   "%s[%d]: bad frame in handle_rt_signal32: "
@@ -1484,10 +1479,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
        return 1;
 
 badframe:
-#ifdef DEBUG_SIG
-       printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
-              regs, frame, newsp);
-#endif
        if (show_unhandled_signals)
                printk_ratelimited(KERN_INFO
                                   "%s[%d]: bad frame in handle_signal32: "
index d501dc4..97c1e4b 100644 (file)
@@ -38,7 +38,6 @@
 
 #include "signal.h"
 
-#define DEBUG_SIG 0
 
 #define GP_REGS_SIZE   min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
 #define FP_REGS_SIZE   sizeof(elf_fpregset_t)
@@ -700,10 +699,6 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
        return 0;
 
 badframe:
-#if DEBUG_SIG
-       printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n",
-              regs, uc, &uc->uc_mcontext);
-#endif
        if (show_unhandled_signals)
                printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
                                   current->comm, current->pid, "rt_sigreturn",
@@ -809,10 +804,6 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
        return 1;
 
 badframe:
-#if DEBUG_SIG
-       printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n",
-              regs, frame, newsp);
-#endif
        if (show_unhandled_signals)
                printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
                                   current->comm, current->pid, "setup_rt_frame",
index 51a3ff7..1007fb8 100644 (file)
@@ -747,7 +747,7 @@ int setup_profiling_timer(unsigned int multiplier)
 
 #ifdef CONFIG_SCHED_SMT
 /* cpumask of CPUs with asymetric SMT dependancy */
-static const int powerpc_smt_flags(void)
+static int powerpc_smt_flags(void)
 {
        int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
 
index 8056107..68468d6 100644 (file)
@@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
                                goto out;
                        }
                        if (!rma_setup && is_vrma_hpte(v)) {
-                               unsigned long psize = hpte_page_size(v, r);
+                               unsigned long psize = hpte_base_page_size(v, r);
                                unsigned long senc = slb_pgsize_encoding(psize);
                                unsigned long lpcr;
 
index 8c86422..731be74 100644 (file)
@@ -127,11 +127,6 @@ BEGIN_FTR_SECTION
        stw     r10, HSTATE_PMC + 24(r13)
        stw     r11, HSTATE_PMC + 28(r13)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-BEGIN_FTR_SECTION
-       mfspr   r9, SPRN_SIER
-       std     r8, HSTATE_MMCR + 40(r13)
-       std     r9, HSTATE_MMCR + 48(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 31:
 
        /*
index 6e62243..5a24d3c 100644 (file)
@@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
                        r = hpte[i+1];
 
                        /*
-                        * Check the HPTE again, including large page size
-                        * Since we don't currently allow any MPSS (mixed
-                        * page-size segment) page sizes, it is sufficient
-                        * to check against the actual page size.
+                        * Check the HPTE again, including base page size
                         */
                        if ((v & valid) && (v & mask) == val &&
-                           hpte_page_size(v, r) == (1ul << pshift))
+                           hpte_base_page_size(v, r) == (1ul << pshift))
                                /* Return with the HPTE still locked */
                                return (hash << 3) + (i >> 1);
 
index 868347e..558a67d 100644 (file)
@@ -48,7 +48,7 @@
  *
  * LR = return address to continue at after eventually re-enabling MMU
  */
-_GLOBAL(kvmppc_hv_entry_trampoline)
+_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
        mflr    r0
        std     r0, PPC_LR_STKOFF(r1)
        stdu    r1, -112(r1)
index e2c29e3..d044b8b 100644 (file)
 #include <asm/exception-64s.h>
 
 #if defined(CONFIG_PPC_BOOK3S_64)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define FUNC(name)             name
+#else
 #define FUNC(name)             GLUE(.,name)
+#endif
 #define GET_SHADOW_VCPU(reg)    addi   reg, r13, PACA_SVCPU
 
 #elif defined(CONFIG_PPC_BOOK3S_32)
index 9eec675..16c4d88 100644 (file)
 
 #if defined(CONFIG_PPC_BOOK3S_64)
 
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define FUNC(name)             name
+#else
 #define FUNC(name)             GLUE(.,name)
+#endif
 
 #elif defined(CONFIG_PPC_BOOK3S_32)
 
@@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
  * On entry, r4 contains the guest shadow MSR
  * MSR.EE has to be 0 when calling this function
  */
-_GLOBAL(kvmppc_entry_trampoline)
+_GLOBAL_TOC(kvmppc_entry_trampoline)
        mfmsr   r5
        LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
        toreal(r7)
index edb14ba..ef27fbd 100644 (file)
@@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq, server, priority;
        int rc;
 
-       if (args->nargs != 3 || args->nret != 1) {
+       if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
-       server = args->args[1];
-       priority = args->args[2];
+       irq = be32_to_cpu(args->args[0]);
+       server = be32_to_cpu(args->args[1]);
+       priority = be32_to_cpu(args->args[2]);
 
        rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
        if (rc)
                rc = -3;
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 
 static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq, server, priority;
        int rc;
 
-       if (args->nargs != 1 || args->nret != 3) {
+       if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
+       irq = be32_to_cpu(args->args[0]);
 
        server = priority = 0;
        rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
                goto out;
        }
 
-       args->rets[1] = server;
-       args->rets[2] = priority;
+       args->rets[1] = cpu_to_be32(server);
+       args->rets[2] = cpu_to_be32(priority);
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 
 static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq;
        int rc;
 
-       if (args->nargs != 1 || args->nret != 1) {
+       if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
+       irq = be32_to_cpu(args->args[0]);
 
        rc = kvmppc_xics_int_off(vcpu->kvm, irq);
        if (rc)
                rc = -3;
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 
 static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq;
        int rc;
 
-       if (args->nargs != 1 || args->nret != 1) {
+       if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
+       irq = be32_to_cpu(args->args[0]);
 
        rc = kvmppc_xics_int_on(vcpu->kvm, irq);
        if (rc)
                rc = -3;
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 #endif /* CONFIG_KVM_XICS */
 
@@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
        return rc;
 }
 
-static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
-{
-#ifdef __LITTLE_ENDIAN__
-       int i;
-
-       args->token = be32_to_cpu(args->token);
-       args->nargs = be32_to_cpu(args->nargs);
-       args->nret = be32_to_cpu(args->nret);
-       for (i = 0; i < args->nargs; i++)
-               args->args[i] = be32_to_cpu(args->args[i]);
-#endif
-}
-
-static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
-{
-#ifdef __LITTLE_ENDIAN__
-       int i;
-
-       for (i = 0; i < args->nret; i++)
-               args->args[i] = cpu_to_be32(args->args[i]);
-       args->token = cpu_to_be32(args->token);
-       args->nargs = cpu_to_be32(args->nargs);
-       args->nret = cpu_to_be32(args->nret);
-#endif
-}
-
 int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
 {
        struct rtas_token_definition *d;
@@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
        if (rc)
                goto fail;
 
-       kvmppc_rtas_swap_endian_in(&args);
-
        /*
         * args->rets is a pointer into args->args. Now that we've
         * copied args we need to fix it up to point into our copy,
@@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
         * value so we can restore it on the way out.
         */
        orig_rets = args.rets;
-       args.rets = &args.args[args.nargs];
+       args.rets = &args.args[be32_to_cpu(args.nargs)];
 
        mutex_lock(&vcpu->kvm->lock);
 
        rc = -ENOENT;
        list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
-               if (d->token == args.token) {
+               if (d->token == be32_to_cpu(args.token)) {
                        d->handler->handler(vcpu, &args);
                        rc = 0;
                        break;
@@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
 
        if (rc == 0) {
                args.rets = orig_rets;
-               kvmppc_rtas_swap_endian_out(&args);
                rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
                if (rc)
                        goto fail;
index dd2cc03..86903d3 100644 (file)
@@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                if (printk_ratelimit())
                        pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
                                __func__, (long)gfn, pfn);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
        kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
 
index 0738f96..43435c6 100644 (file)
@@ -77,7 +77,7 @@ _GLOBAL(memset)
        stb     r4,0(r6)
        blr
 
-_GLOBAL(memmove)
+_GLOBAL_TOC(memmove)
        cmplw   0,r3,r4
        bgt     backwards_memcpy
        b       memcpy
index 412dd46..5c09f36 100644 (file)
@@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = regs->gpr[rb] & 0x3f;
                        ival = (signed int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
-                       if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
+                       if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
@@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = rb;
                        ival = (signed int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> sh;
-                       if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
+                       if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
@@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 
 #ifdef __powerpc64__
                case 27:        /* sld */
-                       sh = regs->gpr[rd] & 0x7f;
+                       sh = regs->gpr[rb] & 0x7f;
                        if (sh < 64)
                                regs->gpr[ra] = regs->gpr[rd] << sh;
                        else
@@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = regs->gpr[rb] & 0x7f;
                        ival = (signed long int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
-                       if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
+                       if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
@@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = rb | ((instr & 2) << 4);
                        ival = (signed long int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> sh;
-                       if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
+                       if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
index af3d78e..928ebe7 100644 (file)
@@ -410,17 +410,7 @@ void __init mmu_context_init(void)
        } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
                first_context = 1;
                last_context = 65535;
-       } else
-#ifdef CONFIG_PPC_BOOK3E_MMU
-       if (mmu_has_feature(MMU_FTR_TYPE_3E)) {
-               u32 mmucfg = mfspr(SPRN_MMUCFG);
-               u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK)
-                               >> MMUCFG_PIDSIZE_SHIFT;
-               first_context = 1;
-               last_context = (1UL << (pid_bits + 1)) - 1;
-       } else
-#endif
-       {
+       } else {
                first_context = 1;
                last_context = 255;
        }
index 6dcdade..82e82ca 100644 (file)
@@ -390,12 +390,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
                case BPF_ANC | SKF_AD_VLAN_TAG:
                case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+                       BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+
                        PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
                                                          vlan_tci));
-                       if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
-                               PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
-                       else
+                       if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
+                               PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
+                       } else {
                                PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
+                               PPC_SRWI(r_A, r_A, 12);
+                       }
                        break;
                case BPF_ANC | SKF_AD_QUEUE:
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
index 4520c93..fe52db2 100644 (file)
@@ -485,7 +485,7 @@ static bool is_ebb_event(struct perf_event *event)
         * check that the PMU supports EBB, meaning those that don't can still
         * use bit 63 of the event code for something else if they wish.
         */
-       return (ppmu->flags & PPMU_EBB) &&
+       return (ppmu->flags & PPMU_ARCH_207S) &&
               ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
 }
 
@@ -777,7 +777,7 @@ void perf_event_print_debug(void)
        if (ppmu->flags & PPMU_HAS_SIER)
                sier = mfspr(SPRN_SIER);
 
-       if (ppmu->flags & PPMU_EBB) {
+       if (ppmu->flags & PPMU_ARCH_207S) {
                pr_info("MMCR2: %016lx EBBHR: %016lx\n",
                        mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR));
                pr_info("EBBRR: %016lx BESCR: %016lx\n",
@@ -996,7 +996,22 @@ static void power_pmu_read(struct perf_event *event)
        } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
 
        local64_add(delta, &event->count);
-       local64_sub(delta, &event->hw.period_left);
+
+       /*
+        * A number of places program the PMC with (0x80000000 - period_left).
+        * We never want period_left to be less than 1 because we will program
+        * the PMC with a value >= 0x800000000 and an edge detected PMC will
+        * roll around to 0 before taking an exception. We have seen this
+        * on POWER8.
+        *
+        * To fix this, clamp the minimum value of period_left to 1.
+        */
+       do {
+               prev = local64_read(&event->hw.period_left);
+               val = prev - delta;
+               if (val < 1)
+                       val = 1;
+       } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
 }
 
 /*
@@ -1292,6 +1307,9 @@ static void power_pmu_enable(struct pmu *pmu)
  out_enable:
        pmao_restore_workaround(ebb);
 
+       if (ppmu->flags & PPMU_ARCH_207S)
+               mtspr(SPRN_MMCR2, 0);
+
        mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
 
        mb();
@@ -1696,7 +1714,7 @@ static int power_pmu_event_init(struct perf_event *event)
 
        if (has_branch_stack(event)) {
                /* PMU has BHRB enabled */
-               if (!(ppmu->flags & PPMU_BHRB))
+               if (!(ppmu->flags & PPMU_ARCH_207S))
                        return -EOPNOTSUPP;
        }
 
index e0766b8..66d0f17 100644 (file)
@@ -387,8 +387,7 @@ static int h_24x7_event_init(struct perf_event *event)
            event->attr.exclude_hv     ||
            event->attr.exclude_idle   ||
            event->attr.exclude_host   ||
-           event->attr.exclude_guest  ||
-           is_sampling_event(event)) /* no sampling */
+           event->attr.exclude_guest)
                return -EINVAL;
 
        /* no branch sampling */
@@ -513,6 +512,9 @@ static int hv_24x7_init(void)
        if (!hv_page_cache)
                return -ENOMEM;
 
+       /* sampling not supported */
+       h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
        r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
        if (r)
                return r;
index c9d399a..15fc76c 100644 (file)
@@ -210,8 +210,7 @@ static int h_gpci_event_init(struct perf_event *event)
            event->attr.exclude_hv     ||
            event->attr.exclude_idle   ||
            event->attr.exclude_host   ||
-           event->attr.exclude_guest  ||
-           is_sampling_event(event)) /* no sampling */
+           event->attr.exclude_guest)
                return -EINVAL;
 
        /* no branch sampling */
@@ -284,6 +283,9 @@ static int hv_gpci_init(void)
                return -ENODEV;
        }
 
+       /* sampling not supported */
+       h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
        r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1);
        if (r)
                return r;
index fe2763b..639cd91 100644 (file)
@@ -792,7 +792,7 @@ static struct power_pmu power8_pmu = {
        .get_constraint         = power8_get_constraint,
        .get_alternatives       = power8_get_alternatives,
        .disable_pmc            = power8_disable_pmc,
-       .flags                  = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
+       .flags                  = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S,
        .n_generic              = ARRAY_SIZE(power8_generic_events),
        .generic_events         = power8_generic_events,
        .cache_events           = &power8_cache_events,
index 94560db..2c15ff0 100644 (file)
@@ -125,7 +125,7 @@ static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, i
 static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos)
 {
        u64 reg_value;
-       int temp;
+       unsigned int temp;
        u64 new_value;
        int ret;
 
index 38e0a1a..5e6e0ba 100644 (file)
@@ -111,6 +111,7 @@ asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
        return ret;
 }
 
+#ifdef CONFIG_COREDUMP
 int elf_coredump_extra_notes_size(void)
 {
        struct spufs_calls *calls;
@@ -142,6 +143,7 @@ int elf_coredump_extra_notes_write(struct coredump_params *cprm)
 
        return ret;
 }
+#endif
 
 void notify_spus_active(void)
 {
index b9d5d67..52a7d25 100644 (file)
@@ -1,8 +1,9 @@
 
 obj-$(CONFIG_SPU_FS) += spufs.o
-spufs-y += inode.o file.o context.o syscalls.o coredump.o
+spufs-y += inode.o file.o context.o syscalls.o
 spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
 spufs-y += switch.o fault.o lscsa_alloc.o
+spufs-$(CONFIG_COREDUMP) += coredump.o
 
 # magic for the trace events
 CFLAGS_sched.o := -I$(src)
index b045fdd..a87200a 100644 (file)
@@ -79,8 +79,10 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
 struct spufs_calls spufs_calls = {
        .create_thread = do_spu_create,
        .spu_run = do_spu_run,
-       .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
-       .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
        .notify_spus_active = do_notify_spus_active,
        .owner = THIS_MODULE,
+#ifdef CONFIG_COREDUMP
+       .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
+       .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
+#endif
 };
index d55891f..4ad227d 100644 (file)
@@ -1,4 +1,4 @@
-obj-y                  += setup.o opal-takeover.o opal-wrappers.o opal.o opal-async.o
+obj-y                  += setup.o opal-wrappers.o opal.o opal-async.o
 obj-y                  += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
 obj-y                  += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
 obj-y                  += opal-msglog.o
index 10268c4..0ad533b 100644 (file)
@@ -249,7 +249,7 @@ static void elog_work_fn(struct work_struct *work)
 
        rc = opal_get_elog_size(&id, &size, &type);
        if (rc != OPAL_SUCCESS) {
-               pr_err("ELOG: Opal log read failed\n");
+               pr_err("ELOG: OPAL log info read failed\n");
                return;
        }
 
@@ -257,7 +257,7 @@ static void elog_work_fn(struct work_struct *work)
        log_id = be64_to_cpu(id);
        elog_type = be64_to_cpu(type);
 
-       BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
+       WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
 
        if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
                elog_size  =  OPAL_MAX_ERRLOG_SIZE;
diff --git a/arch/powerpc/platforms/powernv/opal-takeover.S b/arch/powerpc/platforms/powernv/opal-takeover.S
deleted file mode 100644 (file)
index 11a3169..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * PowerNV OPAL takeover assembly code, for use by prom_init.c
- *
- * Copyright 2011 IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/ppc_asm.h>
-#include <asm/hvcall.h>
-#include <asm/asm-offsets.h>
-#include <asm/opal.h>
-
-#define H_HAL_TAKEOVER                 0x5124
-#define H_HAL_TAKEOVER_QUERY_MAGIC     -1
-
-       .text
-_GLOBAL(opal_query_takeover)
-       mfcr    r0
-       stw     r0,8(r1)
-       stdu    r1,-STACKFRAMESIZE(r1)
-       std     r3,STK_PARAM(R3)(r1)
-       std     r4,STK_PARAM(R4)(r1)
-       li      r3,H_HAL_TAKEOVER
-       li      r4,H_HAL_TAKEOVER_QUERY_MAGIC
-       HVSC
-       addi    r1,r1,STACKFRAMESIZE
-       ld      r10,STK_PARAM(R3)(r1)
-       std     r4,0(r10)
-       ld      r10,STK_PARAM(R4)(r1)
-       std     r5,0(r10)
-       lwz     r0,8(r1)
-       mtcrf   0xff,r0
-       blr
-
-_GLOBAL(opal_do_takeover)
-       mfcr    r0
-       stw     r0,8(r1)
-       mflr    r0
-       std     r0,16(r1)
-       bl      __opal_do_takeover
-       ld      r0,16(r1)
-       mtlr    r0
-       lwz     r0,8(r1)
-       mtcrf   0xff,r0
-       blr
-
-__opal_do_takeover:
-       ld      r4,0(r3)
-       ld      r5,0x8(r3)
-       ld      r6,0x10(r3)
-       ld      r7,0x18(r3)
-       ld      r8,0x20(r3)
-       ld      r9,0x28(r3)
-       ld      r10,0x30(r3)
-       ld      r11,0x38(r3)
-       li      r3,H_HAL_TAKEOVER
-       HVSC
-       blr
-
-       .globl opal_secondary_entry
-opal_secondary_entry:
-       mr      r31,r3
-       mfmsr   r11
-       li      r12,(MSR_SF | MSR_ISF)@highest
-       sldi    r12,r12,48
-       or      r11,r11,r12
-       mtmsrd  r11
-       isync
-       mfspr   r4,SPRN_PIR
-       std     r4,0(r3)
-1:     HMT_LOW
-       ld      r4,8(r3)
-       cmpli   cr0,r4,0
-       beq     1b
-       HMT_MEDIUM
-1:     addi    r3,r31,16
-       bl      __opal_do_takeover
-       b       1b
-
-_GLOBAL(opal_enter_rtas)
-       mflr    r0
-       std     r0,16(r1)
-        stdu   r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
-
-       /* Because PROM is running in 32b mode, it clobbers the high order half
-        * of all registers that it saves.  We therefore save those registers
-        * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
-       */
-       SAVE_GPR(2, r1)
-       SAVE_GPR(13, r1)
-       SAVE_8GPRS(14, r1)
-       SAVE_10GPRS(22, r1)
-       mfcr    r10
-       mfmsr   r11
-       std     r10,_CCR(r1)
-       std     r11,_MSR(r1)
-
-       /* Get the PROM entrypoint */
-       mtlr    r5
-
-       /* Switch MSR to 32 bits mode
-        */
-        li      r12,1
-        rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
-        andc    r11,r11,r12
-        li      r12,1
-        rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
-        andc    r11,r11,r12
-        mtmsrd  r11
-        isync
-
-       /* Enter RTAS here... */
-       blrl
-
-       /* Just make sure that r1 top 32 bits didn't get
-        * corrupt by OF
-        */
-       rldicl  r1,r1,0,32
-
-       /* Restore the MSR (back to 64 bits) */
-       ld      r0,_MSR(r1)
-       MTMSRD(r0)
-        isync
-
-       /* Restore other registers */
-       REST_GPR(2, r1)
-       REST_GPR(13, r1)
-       REST_8GPRS(14, r1)
-       REST_10GPRS(22, r1)
-       ld      r4,_CCR(r1)
-       mtcr    r4
-
-        addi   r1,r1,PROM_FRAME_SIZE
-       ld      r0,16(r1)
-       mtlr    r0
-       blr
index 022b38e..2d0b4d6 100644 (file)
@@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
        }
 
        of_node_set_flag(dn, OF_DYNAMIC);
+       of_node_init(dn);
 
        return dn;
 }
index 0435bb6..1c0a60d 100644 (file)
@@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
 
        np->properties = proplist;
        of_node_set_flag(np, OF_DYNAMIC);
+       of_node_init(np);
 
        np->parent = derive_parent(path);
        if (IS_ERR(np->parent)) {
index 62c47bb..9e5353f 100644 (file)
@@ -476,6 +476,11 @@ void __init alloc_dart_table(void)
         */
        dart_tablebase = (unsigned long)
                __va(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
+       /*
+        * The DART space is later unmapped from the kernel linear mapping and
+        * accessing dart_tablebase during kmemleak scanning will fault.
+        */
+       kmemleak_no_scan((void *)dart_tablebase);
 
        printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase);
 }
index bb63499..f5af5f6 100644 (file)
@@ -116,7 +116,6 @@ config S390
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_TRACER
-       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_FUTEX_CMPXCHG if FUTEX
        select HAVE_KERNEL_BZIP2
        select HAVE_KERNEL_GZIP
index 8df022c..fd09a10 100644 (file)
@@ -45,7 +45,8 @@ CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z9_109=y
+CONFIG_MARCH_Z196=y
+CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=256
 CONFIG_PREEMPT=y
 CONFIG_HZ_100=y
@@ -240,7 +241,6 @@ CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
 # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -456,6 +456,7 @@ CONFIG_TN3270_FS=y
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
+CONFIG_DIAG288_WATCHDOG=m
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
index c81a74e..b061180 100644 (file)
@@ -45,7 +45,8 @@ CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z9_109=y
+CONFIG_MARCH_Z196=y
+CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=256
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
@@ -238,7 +239,6 @@ CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
 # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -453,6 +453,7 @@ CONFIG_TN3270_FS=y
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
+CONFIG_DIAG288_WATCHDOG=m
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
index b5ba8fe..d279baa 100644 (file)
@@ -43,7 +43,8 @@ CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z9_109=y
+CONFIG_MARCH_Z196=y
+CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=256
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
@@ -236,7 +237,6 @@ CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
 # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -451,6 +451,7 @@ CONFIG_TN3270_FS=y
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
+CONFIG_DIAG288_WATCHDOG=m
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
index cef073c..948e0e0 100644 (file)
@@ -8,7 +8,8 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z9_109=y
+CONFIG_MARCH_Z196=y
+CONFIG_TUNE_ZEC12=y
 # CONFIG_COMPAT is not set
 CONFIG_NR_CPUS=2
 # CONFIG_HOTPLUG_CPU is not set
index 4557cb7..2e56498 100644 (file)
@@ -135,8 +135,8 @@ CONFIG_PROVE_LOCKING=y
 CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_LOCKDEP=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_WRITECOUNT=y
 CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_PI_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_PROVE_RCU=y
@@ -199,4 +199,10 @@ CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRC7=m
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
 CONFIG_CMM=m
index 4181d7b..773bef7 100644 (file)
@@ -305,7 +305,6 @@ struct kvm_s390_local_interrupt {
        struct list_head list;
        atomic_t active;
        struct kvm_s390_float_interrupt *float_int;
-       int timer_due; /* event indicator for waitqueue below */
        wait_queue_head_t *wq;
        atomic_t *cpuflags;
        unsigned int action_bits;
@@ -367,7 +366,6 @@ struct kvm_vcpu_arch {
        s390_fp_regs      guest_fpregs;
        struct kvm_s390_local_interrupt local_int;
        struct hrtimer    ckc_timer;
-       struct tasklet_struct tasklet;
        struct kvm_s390_pgm_info pgm;
        union  {
                struct cpuid    cpu_id;
@@ -418,6 +416,7 @@ struct kvm_arch{
        int css_support;
        int use_irqchip;
        int use_cmma;
+       int user_cpu_state_ctrl;
        struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS];
        wait_queue_head_t ipte_wq;
        spinlock_t start_stop_lock;
index c28f32a..3815bfe 100644 (file)
@@ -33,10 +33,9 @@ static inline int init_new_context(struct task_struct *tsk,
 
 static inline void set_user_asce(struct mm_struct *mm)
 {
-       pgd_t *pgd = mm->pgd;
-
-       S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
-       set_fs(current->thread.mm_segment);
+       S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
+       if (current->thread.mm_segment.ar4)
+               __ctl_load(S390_lowcore.user_asce, 7, 7);
        set_cpu_flag(CIF_ASCE);
 }
 
@@ -70,12 +69,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        /* Clear old ASCE by loading the kernel ASCE. */
        __ctl_load(S390_lowcore.kernel_asce, 1, 1);
        __ctl_load(S390_lowcore.kernel_asce, 7, 7);
-       /* Delay loading of the new ASCE to control registers CR1 & CR7 */
-       set_cpu_flag(CIF_ASCE);
        atomic_inc(&next->context.attach_count);
        atomic_dec(&prev->context.attach_count);
        if (MACHINE_HAS_TLB_LC)
                cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
+       S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
 }
 
 #define finish_arch_post_lock_switch finish_arch_post_lock_switch
@@ -84,17 +82,18 @@ static inline void finish_arch_post_lock_switch(void)
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
 
-       if (!mm)
-               return;
-       preempt_disable();
-       while (atomic_read(&mm->context.attach_count) >> 16)
-               cpu_relax();
-
-       cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
-       set_user_asce(mm);
-       if (mm->context.flush_mm)
-               __tlb_flush_mm(mm);
-       preempt_enable();
+       load_kernel_asce();
+       if (mm) {
+               preempt_disable();
+               while (atomic_read(&mm->context.attach_count) >> 16)
+                       cpu_relax();
+
+               cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+               if (mm->context.flush_mm)
+                       __tlb_flush_mm(mm);
+               preempt_enable();
+       }
+       set_fs(current->thread.mm_segment);
 }
 
 #define enter_lazy_tlb(mm,tsk) do { } while (0)
index 6f02d45..e568fc8 100644 (file)
@@ -217,7 +217,7 @@ static inline void cpu_relax(void)
        barrier();
 }
 
-#define arch_mutex_cpu_relax()  barrier()
+#define cpu_relax_lowlatency()  barrier()
 
 static inline void psw_set_key(unsigned int key)
 {
index 29c81f8..18ea9e3 100644 (file)
@@ -51,8 +51,8 @@ static inline int restore_fp_ctl(u32 *fpc)
                return 0;
 
        asm volatile(
-               "0:     lfpc    %1\n"
-               "       la      %0,0\n"
+               "       lfpc    %1\n"
+               "0:     la      %0,0\n"
                "1:\n"
                EX_TABLE(0b,1b)
                : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
@@ -134,8 +134,4 @@ static inline void restore_access_regs(unsigned int *acrs)
        prev = __switch_to(prev,next);                                  \
 } while (0)
 
-#define finish_arch_switch(prev) do {                                       \
-       set_fs(current->thread.mm_segment);                                  \
-} while (0)
-
 #endif /* __ASM_SWITCH_TO_H */
index 6a9a9eb..08fe6da 100644 (file)
@@ -16,6 +16,7 @@ header-y += ioctls.h
 header-y += ipcbuf.h
 header-y += kvm.h
 header-y += kvm_para.h
+header-y += kvm_perf.h
 header-y += kvm_virtio.h
 header-y += mman.h
 header-y += monwriter.h
@@ -36,6 +37,7 @@ header-y += signal.h
 header-y += socket.h
 header-y += sockios.h
 header-y += sclp_ctl.h
+header-y += sie.h
 header-y += stat.h
 header-y += statfs.h
 header-y += swab.h
diff --git a/arch/s390/include/uapi/asm/kvm_perf.h b/arch/s390/include/uapi/asm/kvm_perf.h
new file mode 100644 (file)
index 0000000..3972827
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Definitions for perf-kvm on s390
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_KVM_PERF_S390_H
+#define __LINUX_KVM_PERF_S390_H
+
+#include <asm/sie.h>
+
+#define DECODE_STR_LEN 40
+
+#define VCPU_ID "id"
+
+#define KVM_ENTRY_TRACE "kvm:kvm_s390_sie_enter"
+#define KVM_EXIT_TRACE "kvm:kvm_s390_sie_exit"
+#define KVM_EXIT_REASON "icptcode"
+
+#endif
index 3d97f61..d4096fd 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _UAPI_ASM_S390_SIE_H
 #define _UAPI_ASM_S390_SIE_H
 
-#include <asm/sigp.h>
-
 #define diagnose_codes                                         \
        { 0x10, "DIAG (0x10) release pages" },                  \
        { 0x44, "DIAG (0x44) time slice end" },                 \
        { 0x500, "DIAG (0x500) KVM virtio functions" },         \
        { 0x501, "DIAG (0x501) KVM breakpoint" }
 
-#define sigp_order_codes                                               \
-       { SIGP_SENSE, "SIGP sense" },                                   \
-       { SIGP_EXTERNAL_CALL, "SIGP external call" },                   \
-       { SIGP_EMERGENCY_SIGNAL, "SIGP emergency signal" },             \
-       { SIGP_STOP, "SIGP stop" },                                     \
-       { SIGP_STOP_AND_STORE_STATUS, "SIGP stop and store status" },   \
-       { SIGP_SET_ARCHITECTURE, "SIGP set architecture" },             \
-       { SIGP_SET_PREFIX, "SIGP set prefix" },                         \
-       { SIGP_SENSE_RUNNING, "SIGP sense running" },                   \
-       { SIGP_RESTART, "SIGP restart" },                               \
-       { SIGP_INITIAL_CPU_RESET, "SIGP initial cpu reset" },           \
-       { SIGP_STORE_STATUS_AT_ADDRESS, "SIGP store status at address" }
+#define sigp_order_codes                                       \
+       { 0x01, "SIGP sense" },                                 \
+       { 0x02, "SIGP external call" },                         \
+       { 0x03, "SIGP emergency signal" },                      \
+       { 0x05, "SIGP stop" },                                  \
+       { 0x06, "SIGP restart" },                               \
+       { 0x09, "SIGP stop and store status" },                 \
+       { 0x0b, "SIGP initial cpu reset" },                     \
+       { 0x0d, "SIGP set prefix" },                            \
+       { 0x0e, "SIGP store status at address" },               \
+       { 0x12, "SIGP set architecture" },                      \
+       { 0x15, "SIGP sense running" }
 
 #define icpt_prog_codes                                                \
        { 0x0001, "Prog Operation" },                           \
        exit_code_ipa0(0xB2, 0x17, "STETR"),    \
        exit_code_ipa0(0xB2, 0x18, "PC"),       \
        exit_code_ipa0(0xB2, 0x20, "SERVC"),    \
+       exit_code_ipa0(0xB2, 0x21, "IPTE"),     \
        exit_code_ipa0(0xB2, 0x28, "PT"),       \
        exit_code_ipa0(0xB2, 0x29, "ISKE"),     \
        exit_code_ipa0(0xB2, 0x2a, "RRBE"),     \
index 200e063..3e077b2 100644 (file)
@@ -16,7 +16,9 @@ struct ucontext_extended {
        struct ucontext  *uc_link;
        stack_t           uc_stack;
        _sigregs          uc_mcontext;
-       unsigned long     uc_sigmask[2];
+       sigset_t          uc_sigmask;
+       /* Allow for uc_sigmask growth.  Glibc uses a 1024-bit sigset_t.  */
+       unsigned char     __unused[128 - sizeof(sigset_t)];
        unsigned long     uc_gprs_high[16];
 };
 
@@ -27,7 +29,9 @@ struct ucontext {
        struct ucontext  *uc_link;
        stack_t           uc_stack;
        _sigregs          uc_mcontext;
-       sigset_t          uc_sigmask;   /* mask last for extensibility */
+       sigset_t          uc_sigmask;
+       /* Allow for uc_sigmask growth.  Glibc uses a 1024-bit sigset_t.  */
+       unsigned char     __unused[128 - sizeof(sigset_t)];
 };
 
 #endif /* !_ASM_S390_UCONTEXT_H */
index 39ddfdb..70d4b7c 100644 (file)
@@ -69,7 +69,9 @@ struct ucontext32 {
        __u32                   uc_link;        /* pointer */   
        compat_stack_t          uc_stack;
        _sigregs32              uc_mcontext;
-       compat_sigset_t         uc_sigmask;     /* mask last for extensibility */
+       compat_sigset_t         uc_sigmask;
+       /* Allow for uc_sigmask growth.  Glibc uses a 1024-bit sigset_t.  */
+       unsigned char           __unused[128 - sizeof(compat_sigset_t)];
 };
 
 struct stat64_emu31;
index 7ba7d67..e88d35d 100644 (file)
@@ -437,11 +437,11 @@ ENTRY(startup_kdump)
 
 #if defined(CONFIG_64BIT)
 #if defined(CONFIG_MARCH_ZEC12)
-       .long 3, 0xc100efea, 0xf46ce800, 0x00400000
+       .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
 #elif defined(CONFIG_MARCH_Z196)
-       .long 2, 0xc100efea, 0xf46c0000
+       .long 2, 0xc100eff2, 0xf46c0000
 #elif defined(CONFIG_MARCH_Z10)
-       .long 2, 0xc100efea, 0xf0680000
+       .long 2, 0xc100eff2, 0xf0680000
 #elif defined(CONFIG_MARCH_Z9_109)
        .long 1, 0xc100efc2
 #elif defined(CONFIG_MARCH_Z990)
index 08dcf21..433c6db 100644 (file)
@@ -21,13 +21,9 @@ ENTRY(_mcount)
 ENTRY(ftrace_caller)
 #endif
        stm     %r2,%r5,16(%r15)
-       bras    %r1,2f
+       bras    %r1,1f
 0:     .long   ftrace_trace_function
-1:     .long   function_trace_stop
-2:     l       %r2,1b-0b(%r1)
-       icm     %r2,0xf,0(%r2)
-       jnz     3f
-       st      %r14,56(%r15)
+1:     st      %r14,56(%r15)
        lr      %r0,%r15
        ahi     %r15,-96
        l       %r3,100(%r15)
@@ -50,7 +46,7 @@ ENTRY(ftrace_graph_caller)
 #endif
        ahi     %r15,96
        l       %r14,56(%r15)
-3:     lm      %r2,%r5,16(%r15)
+       lm      %r2,%r5,16(%r15)
        br      %r14
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
index 1c52eae..c67a8bf 100644 (file)
@@ -20,9 +20,6 @@ ENTRY(_mcount)
 
 ENTRY(ftrace_caller)
 #endif
-       larl    %r1,function_trace_stop
-       icm     %r1,0xf,0(%r1)
-       bnzr    %r14
        stmg    %r2,%r5,32(%r15)
        stg     %r14,112(%r15)
        lgr     %r1,%r15
index ea75d01..d3194de 100644 (file)
@@ -411,12 +411,6 @@ static int cpumf_pmu_event_init(struct perf_event *event)
        case PERF_TYPE_HARDWARE:
        case PERF_TYPE_HW_CACHE:
        case PERF_TYPE_RAW:
-               /* The CPU measurement counter facility does not have overflow
-                * interrupts to do sampling.  Sampling must be provided by
-                * external means, for example, by timers.
-                */
-               if (is_sampling_event(event))
-                       return -ENOENT;
                err = __hw_perf_event_init(event);
                break;
        default:
@@ -681,6 +675,12 @@ static int __init cpumf_pmu_init(void)
                goto out;
        }
 
+       /* The CPU measurement counter facility does not have overflow
+        * interrupts to do sampling.  Sampling must be provided by
+        * external means, for example, by timers.
+        */
+       cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
        cpumf_pmu.attr_groups = cpumf_cf_event_group();
        rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
        if (rc) {
index 2d71673..5dc7ad9 100644 (file)
@@ -334,9 +334,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
                        unsigned long mask = PSW_MASK_USER;
 
                        mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
-                       if ((data & ~mask) != PSW_USER_BITS)
+                       if ((data ^ PSW_USER_BITS) & ~mask)
+                               /* Invalid psw mask. */
+                               return -EINVAL;
+                       if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
+                               /* Invalid address-space-control bits */
                                return -EINVAL;
                        if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
+                               /* Invalid addressing mode bits */
                                return -EINVAL;
                }
                *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
@@ -672,9 +677,12 @@ static int __poke_user_compat(struct task_struct *child,
 
                        mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
                        /* Build a 64 bit psw mask from 31 bit mask. */
-                       if ((tmp & ~mask) != PSW32_USER_BITS)
+                       if ((tmp ^ PSW32_USER_BITS) & ~mask)
                                /* Invalid psw mask. */
                                return -EINVAL;
+                       if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
+                               /* Invalid address-space-control bits */
+                               return -EINVAL;
                        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
                                (regs->psw.mask & PSW_MASK_BA) |
                                (__u64)(tmp & mask) << 32;
index 0161675..59bd8f9 100644 (file)
@@ -176,7 +176,8 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu)
                return -EOPNOTSUPP;
        }
 
-       kvm_s390_vcpu_stop(vcpu);
+       if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
+               kvm_s390_vcpu_stop(vcpu);
        vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM;
        vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL;
        vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT;
index a0b586c..eaf4629 100644 (file)
@@ -56,32 +56,26 @@ static int handle_noop(struct kvm_vcpu *vcpu)
 static int handle_stop(struct kvm_vcpu *vcpu)
 {
        int rc = 0;
+       unsigned int action_bits;
 
        vcpu->stat.exit_stop_request++;
-       spin_lock_bh(&vcpu->arch.local_int.lock);
-
        trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits);
 
-       if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) {
-               kvm_s390_vcpu_stop(vcpu);
-               vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP;
-               VCPU_EVENT(vcpu, 3, "%s", "cpu stopped");
-               rc = -EOPNOTSUPP;
-       }
+       action_bits = vcpu->arch.local_int.action_bits;
 
-       if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) {
-               vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP;
-               /* store status must be called unlocked. Since local_int.lock
-                * only protects local_int.* and not guest memory we can give
-                * up the lock here */
-               spin_unlock_bh(&vcpu->arch.local_int.lock);
+       if (!(action_bits & ACTION_STOP_ON_STOP))
+               return 0;
+
+       if (action_bits & ACTION_STORE_ON_STOP) {
                rc = kvm_s390_vcpu_store_status(vcpu,
                                                KVM_S390_STORE_STATUS_NOADDR);
-               if (rc >= 0)
-                       rc = -EOPNOTSUPP;
-       } else
-               spin_unlock_bh(&vcpu->arch.local_int.lock);
-       return rc;
+               if (rc)
+                       return rc;
+       }
+
+       if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
+               kvm_s390_vcpu_stop(vcpu);
+       return -EOPNOTSUPP;
 }
 
 static int handle_validity(struct kvm_vcpu *vcpu)
index 90c8de2..92528a0 100644 (file)
@@ -158,6 +158,9 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
                                               LCTL_CR10 | LCTL_CR11);
                vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
        }
+
+       if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP)
+               atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
 }
 
 static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
@@ -544,13 +547,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
        int rc = 0;
 
        if (atomic_read(&li->active)) {
-               spin_lock_bh(&li->lock);
+               spin_lock(&li->lock);
                list_for_each_entry(inti, &li->list, list)
                        if (__interrupt_is_deliverable(vcpu, inti)) {
                                rc = 1;
                                break;
                        }
-               spin_unlock_bh(&li->lock);
+               spin_unlock(&li->lock);
        }
 
        if ((!rc) && atomic_read(&fi->active)) {
@@ -585,88 +588,56 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
 {
        u64 now, sltime;
-       DECLARE_WAITQUEUE(wait, current);
 
        vcpu->stat.exit_wait_state++;
-       if (kvm_cpu_has_interrupt(vcpu))
-               return 0;
 
-       __set_cpu_idle(vcpu);
-       spin_lock_bh(&vcpu->arch.local_int.lock);
-       vcpu->arch.local_int.timer_due = 0;
-       spin_unlock_bh(&vcpu->arch.local_int.lock);
+       /* fast path */
+       if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
+               return 0;
 
        if (psw_interrupts_disabled(vcpu)) {
                VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
-               __unset_cpu_idle(vcpu);
                return -EOPNOTSUPP; /* disabled wait */
        }
 
+       __set_cpu_idle(vcpu);
        if (!ckc_interrupts_enabled(vcpu)) {
                VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
                goto no_timer;
        }
 
        now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
-       if (vcpu->arch.sie_block->ckc < now) {
-               __unset_cpu_idle(vcpu);
-               return 0;
-       }
-
        sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
-
        hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
        VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
 no_timer:
        srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
-       spin_lock(&vcpu->arch.local_int.float_int->lock);
-       spin_lock_bh(&vcpu->arch.local_int.lock);
-       add_wait_queue(&vcpu->wq, &wait);
-       while (list_empty(&vcpu->arch.local_int.list) &&
-               list_empty(&vcpu->arch.local_int.float_int->list) &&
-               (!vcpu->arch.local_int.timer_due) &&
-               !signal_pending(current) &&
-               !kvm_s390_si_ext_call_pending(vcpu)) {
-               set_current_state(TASK_INTERRUPTIBLE);
-               spin_unlock_bh(&vcpu->arch.local_int.lock);
-               spin_unlock(&vcpu->arch.local_int.float_int->lock);
-               schedule();
-               spin_lock(&vcpu->arch.local_int.float_int->lock);
-               spin_lock_bh(&vcpu->arch.local_int.lock);
-       }
+       kvm_vcpu_block(vcpu);
        __unset_cpu_idle(vcpu);
-       __set_current_state(TASK_RUNNING);
-       remove_wait_queue(&vcpu->wq, &wait);
-       spin_unlock_bh(&vcpu->arch.local_int.lock);
-       spin_unlock(&vcpu->arch.local_int.float_int->lock);
        vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 
        hrtimer_try_to_cancel(&vcpu->arch.ckc_timer);
        return 0;
 }
 
-void kvm_s390_tasklet(unsigned long parm)
+void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
 {
-       struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm;
-
-       spin_lock(&vcpu->arch.local_int.lock);
-       vcpu->arch.local_int.timer_due = 1;
-       if (waitqueue_active(&vcpu->wq))
+       if (waitqueue_active(&vcpu->wq)) {
+               /*
+                * The vcpu gave up the cpu voluntarily, mark it as a good
+                * yield-candidate.
+                */
+               vcpu->preempted = true;
                wake_up_interruptible(&vcpu->wq);
-       spin_unlock(&vcpu->arch.local_int.lock);
+       }
 }
 
-/*
- * low level hrtimer wake routine. Because this runs in hardirq context
- * we schedule a tasklet to do the real work.
- */
 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
 {
        struct kvm_vcpu *vcpu;
 
        vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
-       vcpu->preempted = true;
-       tasklet_schedule(&vcpu->arch.tasklet);
+       kvm_s390_vcpu_wakeup(vcpu);
 
        return HRTIMER_NORESTART;
 }
@@ -676,13 +647,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
        struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
        struct kvm_s390_interrupt_info  *n, *inti = NULL;
 
-       spin_lock_bh(&li->lock);
+       spin_lock(&li->lock);
        list_for_each_entry_safe(inti, n, &li->list, list) {
                list_del(&inti->list);
                kfree(inti);
        }
        atomic_set(&li->active, 0);
-       spin_unlock_bh(&li->lock);
+       spin_unlock(&li->lock);
 
        /* clear pending external calls set by sigp interpretation facility */
        atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
@@ -701,7 +672,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
        if (atomic_read(&li->active)) {
                do {
                        deliver = 0;
-                       spin_lock_bh(&li->lock);
+                       spin_lock(&li->lock);
                        list_for_each_entry_safe(inti, n, &li->list, list) {
                                if (__interrupt_is_deliverable(vcpu, inti)) {
                                        list_del(&inti->list);
@@ -712,7 +683,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
                        }
                        if (list_empty(&li->list))
                                atomic_set(&li->active, 0);
-                       spin_unlock_bh(&li->lock);
+                       spin_unlock(&li->lock);
                        if (deliver) {
                                __do_deliver_interrupt(vcpu, inti);
                                kfree(inti);
@@ -758,7 +729,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
        if (atomic_read(&li->active)) {
                do {
                        deliver = 0;
-                       spin_lock_bh(&li->lock);
+                       spin_lock(&li->lock);
                        list_for_each_entry_safe(inti, n, &li->list, list) {
                                if ((inti->type == KVM_S390_MCHK) &&
                                    __interrupt_is_deliverable(vcpu, inti)) {
@@ -770,7 +741,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu)
                        }
                        if (list_empty(&li->list))
                                atomic_set(&li->active, 0);
-                       spin_unlock_bh(&li->lock);
+                       spin_unlock(&li->lock);
                        if (deliver) {
                                __do_deliver_interrupt(vcpu, inti);
                                kfree(inti);
@@ -817,11 +788,11 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
 
        VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
        trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1);
-       spin_lock_bh(&li->lock);
+       spin_lock(&li->lock);
        list_add(&inti->list, &li->list);
        atomic_set(&li->active, 1);
        BUG_ON(waitqueue_active(li->wq));
-       spin_unlock_bh(&li->lock);
+       spin_unlock(&li->lock);
        return 0;
 }
 
@@ -842,11 +813,11 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
 
        inti->type = KVM_S390_PROGRAM_INT;
        memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm));
-       spin_lock_bh(&li->lock);
+       spin_lock(&li->lock);
        list_add(&inti->list, &li->list);
        atomic_set(&li->active, 1);
        BUG_ON(waitqueue_active(li->wq));
-       spin_unlock_bh(&li->lock);
+       spin_unlock(&li->lock);
        return 0;
 }
 
@@ -934,12 +905,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
        }
        dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
        li = &dst_vcpu->arch.local_int;
-       spin_lock_bh(&li->lock);
+       spin_lock(&li->lock);
        atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-       if (waitqueue_active(li->wq))
-               wake_up_interruptible(li->wq);
-       kvm_get_vcpu(kvm, sigcpu)->preempted = true;
-       spin_unlock_bh(&li->lock);
+       spin_unlock(&li->lock);
+       kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
 unlock_fi:
        spin_unlock(&fi->lock);
        mutex_unlock(&kvm->lock);
@@ -1081,7 +1050,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
 
        mutex_lock(&vcpu->kvm->lock);
        li = &vcpu->arch.local_int;
-       spin_lock_bh(&li->lock);
+       spin_lock(&li->lock);
        if (inti->type == KVM_S390_PROGRAM_INT)
                list_add(&inti->list, &li->list);
        else
@@ -1090,11 +1059,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
        if (inti->type == KVM_S390_SIGP_STOP)
                li->action_bits |= ACTION_STOP_ON_STOP;
        atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
-       if (waitqueue_active(&vcpu->wq))
-               wake_up_interruptible(&vcpu->wq);
-       vcpu->preempted = true;
-       spin_unlock_bh(&li->lock);
+       spin_unlock(&li->lock);
        mutex_unlock(&vcpu->kvm->lock);
+       kvm_s390_vcpu_wakeup(vcpu);
        return 0;
 }
 
index 2f3e14f..339b34a 100644 (file)
@@ -166,7 +166,9 @@ int kvm_dev_ioctl_check_extension(long ext)
        case KVM_CAP_IOEVENTFD:
        case KVM_CAP_DEVICE_CTRL:
        case KVM_CAP_ENABLE_CAP_VM:
+       case KVM_CAP_S390_IRQCHIP:
        case KVM_CAP_VM_ATTRIBUTES:
+       case KVM_CAP_MP_STATE:
                r = 1;
                break;
        case KVM_CAP_NR_VCPUS:
@@ -595,7 +597,8 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
        vcpu->arch.sie_block->pp = 0;
        vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
        kvm_clear_async_pf_completion_queue(vcpu);
-       kvm_s390_vcpu_stop(vcpu);
+       if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
+               kvm_s390_vcpu_stop(vcpu);
        kvm_s390_clear_local_irqs(vcpu);
 }
 
@@ -647,8 +650,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
                        return rc;
        }
        hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
-       tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
-                    (unsigned long) vcpu);
        vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
        get_cpu_id(&vcpu->arch.cpu_id);
        vcpu->arch.cpu_id.version = 0xff;
@@ -926,7 +927,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
 {
        int rc = 0;
 
-       if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
+       if (!is_vcpu_stopped(vcpu))
                rc = -EBUSY;
        else {
                vcpu->run->psw_mask = psw.mask;
@@ -980,13 +981,34 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
-       return -EINVAL; /* not implemented yet */
+       /* CHECK_STOP and LOAD are not supported yet */
+       return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
+                                      KVM_MP_STATE_OPERATING;
 }
 
 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
-       return -EINVAL; /* not implemented yet */
+       int rc = 0;
+
+       /* user space knows about this interface - let it control the state */
+       vcpu->kvm->arch.user_cpu_state_ctrl = 1;
+
+       switch (mp_state->mp_state) {
+       case KVM_MP_STATE_STOPPED:
+               kvm_s390_vcpu_stop(vcpu);
+               break;
+       case KVM_MP_STATE_OPERATING:
+               kvm_s390_vcpu_start(vcpu);
+               break;
+       case KVM_MP_STATE_LOAD:
+       case KVM_MP_STATE_CHECK_STOP:
+               /* fall through - CHECK_STOP and LOAD are not supported yet */
+       default:
+               rc = -ENXIO;
+       }
+
+       return rc;
 }
 
 bool kvm_s390_cmma_enabled(struct kvm *kvm)
@@ -1045,6 +1067,9 @@ retry:
                goto retry;
        }
 
+       /* nothing to do, just clear the request */
+       clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
+
        return 0;
 }
 
@@ -1284,7 +1309,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
 
-       kvm_s390_vcpu_start(vcpu);
+       if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
+               kvm_s390_vcpu_start(vcpu);
+       } else if (is_vcpu_stopped(vcpu)) {
+               pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
+                                  vcpu->vcpu_id);
+               return -EINVAL;
+       }
 
        switch (kvm_run->exit_reason) {
        case KVM_EXIT_S390_SIEIC:
@@ -1413,11 +1444,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
        return kvm_s390_store_status_unloaded(vcpu, addr);
 }
 
-static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
-{
-       return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
-}
-
 static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
 {
        kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
@@ -1451,7 +1477,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
 
        trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
        /* Only one cpu at a time may enter/leave the STOPPED state. */
-       spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
+       spin_lock(&vcpu->kvm->arch.start_stop_lock);
        online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
 
        for (i = 0; i < online_vcpus; i++) {
@@ -1477,7 +1503,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
         * Let's play safe and flush the VCPU at startup.
         */
        vcpu->arch.sie_block->ihcpu  = 0xffff;
-       spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
+       spin_unlock(&vcpu->kvm->arch.start_stop_lock);
        return;
 }
 
@@ -1491,10 +1517,18 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
 
        trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
        /* Only one cpu at a time may enter/leave the STOPPED state. */
-       spin_lock_bh(&vcpu->kvm->arch.start_stop_lock);
+       spin_lock(&vcpu->kvm->arch.start_stop_lock);
        online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
 
+       /* Need to lock access to action_bits to avoid a SIGP race condition */
+       spin_lock(&vcpu->arch.local_int.lock);
        atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
+
+       /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
+       vcpu->arch.local_int.action_bits &=
+                                ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
+       spin_unlock(&vcpu->arch.local_int.lock);
+
        __disable_ibs_on_vcpu(vcpu);
 
        for (i = 0; i < online_vcpus; i++) {
@@ -1512,7 +1546,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
                __enable_ibs_on_vcpu(started_vcpu);
        }
 
-       spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock);
+       spin_unlock(&vcpu->kvm->arch.start_stop_lock);
        return;
 }
 
index a8655ed..3862fa2 100644 (file)
@@ -45,9 +45,9 @@ do { \
          d_args); \
 } while (0)
 
-static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu)
+static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
 {
-       return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOP_INT;
+       return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
 }
 
 static inline int kvm_is_ucontrol(struct kvm *kvm)
@@ -129,9 +129,15 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
        vcpu->arch.sie_block->gpsw.mask |= cc << 44;
 }
 
+/* are cpu states controlled by user space */
+static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
+{
+       return kvm->arch.user_cpu_state_ctrl != 0;
+}
+
 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
+void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
-void kvm_s390_tasklet(unsigned long parm);
 void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
 void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu);
 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
index 43079a4..cf243ba 100644 (file)
@@ -125,8 +125,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
        return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED;
 }
 
-static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
+static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action)
 {
+       struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int;
        struct kvm_s390_interrupt_info *inti;
        int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
 
@@ -135,7 +136,13 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
                return -ENOMEM;
        inti->type = KVM_S390_SIGP_STOP;
 
-       spin_lock_bh(&li->lock);
+       spin_lock(&li->lock);
+       if (li->action_bits & ACTION_STOP_ON_STOP) {
+               /* another SIGP STOP is pending */
+               kfree(inti);
+               rc = SIGP_CC_BUSY;
+               goto out;
+       }
        if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
                kfree(inti);
                if ((action & ACTION_STORE_ON_STOP) != 0)
@@ -144,19 +151,17 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
        }
        list_add_tail(&inti->list, &li->list);
        atomic_set(&li->active, 1);
-       atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
        li->action_bits |= action;
-       if (waitqueue_active(li->wq))
-               wake_up_interruptible(li->wq);
+       atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
+       kvm_s390_vcpu_wakeup(dst_vcpu);
 out:
-       spin_unlock_bh(&li->lock);
+       spin_unlock(&li->lock);
 
        return rc;
 }
 
 static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
 {
-       struct kvm_s390_local_interrupt *li;
        struct kvm_vcpu *dst_vcpu = NULL;
        int rc;
 
@@ -166,9 +171,8 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
        dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
        if (!dst_vcpu)
                return SIGP_CC_NOT_OPERATIONAL;
-       li = &dst_vcpu->arch.local_int;
 
-       rc = __inject_sigp_stop(li, action);
+       rc = __inject_sigp_stop(dst_vcpu, action);
 
        VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
 
@@ -238,7 +242,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
        if (!inti)
                return SIGP_CC_BUSY;
 
-       spin_lock_bh(&li->lock);
+       spin_lock(&li->lock);
        /* cpu must be in stopped state */
        if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
                *reg &= 0xffffffff00000000UL;
@@ -253,13 +257,12 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
 
        list_add_tail(&inti->list, &li->list);
        atomic_set(&li->active, 1);
-       if (waitqueue_active(li->wq))
-               wake_up_interruptible(li->wq);
+       kvm_s390_vcpu_wakeup(dst_vcpu);
        rc = SIGP_CC_ORDER_CODE_ACCEPTED;
 
        VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
 out_li:
-       spin_unlock_bh(&li->lock);
+       spin_unlock(&li->lock);
        return rc;
 }
 
@@ -275,9 +278,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
        if (!dst_vcpu)
                return SIGP_CC_NOT_OPERATIONAL;
 
-       spin_lock_bh(&dst_vcpu->arch.local_int.lock);
+       spin_lock(&dst_vcpu->arch.local_int.lock);
        flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
-       spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
+       spin_unlock(&dst_vcpu->arch.local_int.lock);
        if (!(flags & CPUSTAT_STOPPED)) {
                *reg &= 0xffffffff00000000UL;
                *reg |= SIGP_STATUS_INCORRECT_STATE;
@@ -338,10 +341,10 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
        if (!dst_vcpu)
                return SIGP_CC_NOT_OPERATIONAL;
        li = &dst_vcpu->arch.local_int;
-       spin_lock_bh(&li->lock);
+       spin_lock(&li->lock);
        if (li->action_bits & ACTION_STOP_ON_STOP)
                rc = SIGP_CC_BUSY;
-       spin_unlock_bh(&li->lock);
+       spin_unlock(&li->lock);
 
        return rc;
 }
@@ -461,12 +464,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu)
                dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
                BUG_ON(dest_vcpu == NULL);
 
-               spin_lock_bh(&dest_vcpu->arch.local_int.lock);
-               if (waitqueue_active(&dest_vcpu->wq))
-                       wake_up_interruptible(&dest_vcpu->wq);
-               dest_vcpu->preempted = true;
-               spin_unlock_bh(&dest_vcpu->arch.local_int.lock);
-
+               kvm_s390_vcpu_wakeup(dest_vcpu);
                kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED);
                return 0;
        }
index 9ddc51e..30de427 100644 (file)
 static LIST_HEAD(zpci_list);
 static DEFINE_SPINLOCK(zpci_list_lock);
 
-static void zpci_enable_irq(struct irq_data *data);
-static void zpci_disable_irq(struct irq_data *data);
-
 static struct irq_chip zpci_irq_chip = {
        .name = "zPCI",
-       .irq_unmask = zpci_enable_irq,
-       .irq_mask = zpci_disable_irq,
+       .irq_unmask = unmask_msi_irq,
+       .irq_mask = mask_msi_irq,
 };
 
 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@@ -244,43 +241,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
        return rc;
 }
 
-static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
-{
-       int offset, pos;
-       u32 mask_bits;
-
-       if (msi->msi_attrib.is_msix) {
-               offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
-                       PCI_MSIX_ENTRY_VECTOR_CTRL;
-               msi->masked = readl(msi->mask_base + offset);
-               writel(flag, msi->mask_base + offset);
-       } else if (msi->msi_attrib.maskbit) {
-               pos = (long) msi->mask_base;
-               pci_read_config_dword(msi->dev, pos, &mask_bits);
-               mask_bits &= ~(mask);
-               mask_bits |= flag & mask;
-               pci_write_config_dword(msi->dev, pos, mask_bits);
-       } else
-               return 0;
-
-       msi->msi_attrib.maskbit = !!flag;
-       return 1;
-}
-
-static void zpci_enable_irq(struct irq_data *data)
-{
-       struct msi_desc *msi = irq_get_msi_desc(data->irq);
-
-       zpci_msi_set_mask_bits(msi, 1, 0);
-}
-
-static void zpci_disable_irq(struct irq_data *data)
-{
-       struct msi_desc *msi = irq_get_msi_desc(data->irq);
-
-       zpci_msi_set_mask_bits(msi, 1, 1);
-}
-
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
 }
@@ -487,7 +447,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
 
        /* Release MSI interrupts */
        list_for_each_entry(msi, &pdev->msi_list, list) {
-               zpci_msi_set_mask_bits(msi, 1, 1);
+               if (msi->msi_attrib.is_msix)
+                       default_msix_mask_irq(msi, 1);
+               else
+                       default_msi_mask_irq(msi, 1, 1);
                irq_set_msi_desc(msi->irq, NULL);
                irq_free_desc(msi->irq);
                msi->msg.address_lo = 0;
index d9a922d..851f441 100644 (file)
@@ -24,6 +24,7 @@ extern unsigned long get_wchan(struct task_struct *p);
 #define current_text_addr() ({ __label__ _l; _l: &&_l; })
 
 #define cpu_relax()            barrier()
+#define cpu_relax_lowlatency()        cpu_relax()
 #define release_thread(thread) do {} while (0)
 
 /*
index 834b67c..aa2df3e 100644 (file)
@@ -57,7 +57,6 @@ config SUPERH32
        select HAVE_FUNCTION_TRACER
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_DYNAMIC_FTRACE
-       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE
        select ARCH_WANT_IPC_PARSE_VERSION
        select HAVE_FUNCTION_GRAPH_TRACER
index d4d16e4..bf5b3f5 100644 (file)
@@ -32,7 +32,8 @@ endif
 
 cflags-$(CONFIG_CPU_SH2)               := $(call cc-option,-m2,)
 cflags-$(CONFIG_CPU_SH2A)              += $(call cc-option,-m2a,) \
-                                          $(call cc-option,-m2a-nofpu,)
+                                          $(call cc-option,-m2a-nofpu,) \
+                                          $(call cc-option,-m4-nofpu,)
 cflags-$(CONFIG_CPU_SH3)               := $(call cc-option,-m3,)
 cflags-$(CONFIG_CPU_SH4)               := $(call cc-option,-m4,) \
        $(call cc-option,-mno-implicit-fp,-m4-nofpu)
index 5448f9b..1506897 100644 (file)
@@ -97,6 +97,7 @@ extern struct sh_cpuinfo cpu_data[];
 
 #define cpu_sleep()    __asm__ __volatile__ ("sleep" : : : "memory")
 #define cpu_relax()    barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 void default_idle(void);
 void stop_this_cpu(void *);
index 3c74f53..079d70e 100644 (file)
@@ -344,6 +344,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
        struct ftrace_graph_ent trace;
        unsigned long return_hooker = (unsigned long)&return_to_handler;
 
+       if (unlikely(ftrace_graph_is_dead()))
+               return;
+
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return;
 
index 0233167..7cfd7f1 100644 (file)
@@ -128,14 +128,6 @@ static int __hw_perf_event_init(struct perf_event *event)
        if (!sh_pmu_initialized())
                return -ENODEV;
 
-       /*
-        * All of the on-chip counters are "limited", in that they have
-        * no interrupts, and are therefore unable to do sampling without
-        * further work and timer assistance.
-        */
-       if (hwc->sample_period)
-               return -EINVAL;
-
        /*
         * See if we need to reserve the counter.
         *
@@ -392,6 +384,13 @@ int register_sh_pmu(struct sh_pmu *_pmu)
 
        pr_info("Performance Events: %s support registered\n", _pmu->name);
 
+       /*
+        * All of the on-chip counters are "limited", in that they have
+        * no interrupts, and are therefore unable to do sampling without
+        * further work and timer assistance.
+        */
+       pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
+
        WARN_ON(_pmu->num_events > MAX_HWEVENTS);
 
        perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
index 52aa201..7a8572f 100644 (file)
@@ -92,13 +92,6 @@ mcount:
        rts
         nop
 #else
-#ifndef CONFIG_DYNAMIC_FTRACE
-       mov.l   .Lfunction_trace_stop, r0
-       mov.l   @r0, r0
-       tst     r0, r0
-       bf      ftrace_stub
-#endif
-
        MCOUNT_ENTER()
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -174,11 +167,6 @@ ftrace_graph_call:
 
        .globl ftrace_caller
 ftrace_caller:
-       mov.l   .Lfunction_trace_stop, r0
-       mov.l   @r0, r0
-       tst     r0, r0
-       bf      ftrace_stub
-
        MCOUNT_ENTER()
 
        .globl ftrace_call
@@ -196,8 +184,6 @@ ftrace_call:
 #endif /* CONFIG_DYNAMIC_FTRACE */
 
        .align 2
-.Lfunction_trace_stop:
-       .long   function_trace_stop
 
 /*
  * NOTE: From here on the locations of the .Lftrace_stub label and
@@ -217,12 +203,7 @@ ftrace_stub:
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        .globl  ftrace_graph_caller
 ftrace_graph_caller:
-       mov.l   2f, r0
-       mov.l   @r0, r0
-       tst     r0, r0
-       bt      1f
-
-       mov.l   3f, r1
+       mov.l   2f, r1
        jmp     @r1
         nop
 1:
@@ -242,8 +223,7 @@ ftrace_graph_caller:
        MCOUNT_LEAVE()
 
        .align 2
-2:     .long   function_trace_stop
-3:     .long   skip_trace
+2:     .long   skip_trace
 .Lprepare_ftrace_return:
        .long   prepare_ftrace_return
 
index 29f2e98..4692c90 100644 (file)
@@ -55,7 +55,6 @@ config SPARC64
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_GRAPH_FP_TEST
-       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_KRETPROBES
        select HAVE_KPROBES
        select HAVE_RCU_TABLE_FREE if SMP
@@ -78,6 +77,7 @@ config SPARC64
        select HAVE_C_RECORDMCOUNT
        select NO_BOOTMEM
        select HAVE_ARCH_AUDITSYSCALL
+       select ARCH_SUPPORTS_ATOMIC_RMW
 
 config ARCH_DEFCONFIG
        string
index 503e6d9..df922f5 100644 (file)
@@ -124,7 +124,7 @@ extern void aes_sparc64_ctr_crypt_256(const u64 *key, const u64 *input,
                                      u64 *output, unsigned int len,
                                      u64 *iv);
 
-struct aes_ops aes128_ops = {
+static struct aes_ops aes128_ops = {
        .encrypt                = aes_sparc64_encrypt_128,
        .decrypt                = aes_sparc64_decrypt_128,
        .load_encrypt_keys      = aes_sparc64_load_encrypt_keys_128,
@@ -136,7 +136,7 @@ struct aes_ops aes128_ops = {
        .ctr_crypt              = aes_sparc64_ctr_crypt_128,
 };
 
-struct aes_ops aes192_ops = {
+static struct aes_ops aes192_ops = {
        .encrypt                = aes_sparc64_encrypt_192,
        .decrypt                = aes_sparc64_decrypt_192,
        .load_encrypt_keys      = aes_sparc64_load_encrypt_keys_192,
@@ -148,7 +148,7 @@ struct aes_ops aes192_ops = {
        .ctr_crypt              = aes_sparc64_ctr_crypt_192,
 };
 
-struct aes_ops aes256_ops = {
+static struct aes_ops aes256_ops = {
        .encrypt                = aes_sparc64_encrypt_256,
        .decrypt                = aes_sparc64_decrypt_256,
        .load_encrypt_keys      = aes_sparc64_load_encrypt_keys_256,
index f08fe51..7aed2be 100644 (file)
 
 #define ATOMIC_INIT(i)  { (i) }
 
-extern int __atomic_add_return(int, atomic_t *);
-extern int atomic_cmpxchg(atomic_t *, int, int);
+int __atomic_add_return(int, atomic_t *);
+int atomic_cmpxchg(atomic_t *, int, int);
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-extern int __atomic_add_unless(atomic_t *, int, int);
-extern void atomic_set(atomic_t *, int);
+int __atomic_add_unless(atomic_t *, int, int);
+void atomic_set(atomic_t *, int);
 
 #define atomic_read(v)          (*(volatile int *)&(v)->counter)
 
index 8b2f1bd..bb894c8 100644 (file)
 #define atomic_set(v, i)       (((v)->counter) = i)
 #define atomic64_set(v, i)     (((v)->counter) = i)
 
-extern void atomic_add(int, atomic_t *);
-extern void atomic64_add(long, atomic64_t *);
-extern void atomic_sub(int, atomic_t *);
-extern void atomic64_sub(long, atomic64_t *);
+void atomic_add(int, atomic_t *);
+void atomic64_add(long, atomic64_t *);
+void atomic_sub(int, atomic_t *);
+void atomic64_sub(long, atomic64_t *);
 
-extern int atomic_add_ret(int, atomic_t *);
-extern long atomic64_add_ret(long, atomic64_t *);
-extern int atomic_sub_ret(int, atomic_t *);
-extern long atomic64_sub_ret(long, atomic64_t *);
+int atomic_add_ret(int, atomic_t *);
+long atomic64_add_ret(long, atomic64_t *);
+int atomic_sub_ret(int, atomic_t *);
+long atomic64_sub_ret(long, atomic64_t *);
 
 #define atomic_dec_return(v) atomic_sub_ret(1, v)
 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
@@ -107,6 +107,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
-extern long atomic64_dec_if_positive(atomic64_t *v);
+long atomic64_dec_if_positive(atomic64_t *v);
 
 #endif /* !(__ARCH_SPARC64_ATOMIC__) */
index 13dc67f..3e09a07 100644 (file)
@@ -1,5 +1,12 @@
 #ifndef ___ASM_SPARC_AUXIO_H
 #define ___ASM_SPARC_AUXIO_H
+
+#ifndef __ASSEMBLY__
+
+extern void __iomem *auxio_register;
+
+#endif /* ifndef __ASSEMBLY__ */
+
 #if defined(__sparc__) && defined(__arch64__)
 #include <asm/auxio_64.h>
 #else
index 3a31977..5d685df 100644 (file)
@@ -34,8 +34,8 @@
  * NOTE: these routines are implementation dependent--
  * understand the hardware you are querying!
  */
-extern void set_auxio(unsigned char bits_on, unsigned char bits_off);
-extern unsigned char get_auxio(void); /* .../asm/floppy.h */
+void set_auxio(unsigned char bits_on, unsigned char bits_off);
+unsigned char get_auxio(void); /* .../asm/floppy.h */
 
 /*
  * The following routines are provided for driver-compatibility
@@ -78,7 +78,7 @@ do { \
 
 
 /* AUXIO2 (Power Off Control) */
-extern __volatile__ unsigned char * auxio_power_register;
+extern volatile u8 __iomem *auxio_power_register;
 
 #define        AUXIO_POWER_DETECT_FAILURE      32
 #define        AUXIO_POWER_CLEAR_FAILURE       2
index f61cd1e..6079e59 100644 (file)
@@ -75,8 +75,6 @@
 
 #ifndef __ASSEMBLY__
 
-extern void __iomem *auxio_register;
-
 #define AUXIO_LTE_ON   1
 #define AUXIO_LTE_OFF  0
 
@@ -84,7 +82,7 @@ extern void __iomem *auxio_register;
  *
  * on - AUXIO_LTE_ON or AUXIO_LTE_OFF
  */
-extern void auxio_set_lte(int on);
+void auxio_set_lte(int on);
 
 #define AUXIO_LED_ON   1
 #define AUXIO_LED_OFF  0
@@ -93,7 +91,7 @@ extern void auxio_set_lte(int on);
  *
  * on - AUXIO_LED_ON or AUXIO_LED_OFF
  */
-extern void auxio_set_led(int on);
+void auxio_set_led(int on);
 
 #endif /* ifndef __ASSEMBLY__ */
 
index 297b2f2..9c988bf 100644 (file)
@@ -20,8 +20,8 @@ struct bit_map {
        int num_colors;
 };
 
-extern int bit_map_string_get(struct bit_map *t, int len, int align);
-extern void bit_map_clear(struct bit_map *t, int offset, int len);
-extern void bit_map_init(struct bit_map *t, unsigned long *map, int size);
+int bit_map_string_get(struct bit_map *t, int len, int align);
+void bit_map_clear(struct bit_map *t, int offset, int len);
+void bit_map_init(struct bit_map *t, unsigned long *map, int size);
 
 #endif /* defined(_SPARC_BITEXT_H) */
index 88c9a96..600ed1d 100644 (file)
@@ -18,9 +18,9 @@
 #error only <linux/bitops.h> can be included directly
 #endif
 
-extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
-extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
-extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
+unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
+unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
+unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
 
 /*
  * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
index f1a051c..2d52240 100644 (file)
 #include <asm/byteorder.h>
 #include <asm/barrier.h>
 
-extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
-extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
-extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
-extern void set_bit(unsigned long nr, volatile unsigned long *addr);
-extern void clear_bit(unsigned long nr, volatile unsigned long *addr);
-extern void change_bit(unsigned long nr, volatile unsigned long *addr);
+int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
+int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
+int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
+void set_bit(unsigned long nr, volatile unsigned long *addr);
+void clear_bit(unsigned long nr, volatile unsigned long *addr);
+void change_bit(unsigned long nr, volatile unsigned long *addr);
 
 #include <asm-generic/bitops/non-atomic.h>
 
@@ -30,8 +30,8 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
 
 #ifdef __KERNEL__
 
-extern int ffs(int x);
-extern unsigned long __ffs(unsigned long);
+int ffs(int x);
+unsigned long __ffs(unsigned long);
 
 #include <asm-generic/bitops/ffz.h>
 #include <asm-generic/bitops/sched.h>
@@ -41,10 +41,10 @@ extern unsigned long __ffs(unsigned long);
  * of bits set) of a N-bit word
  */
 
-extern unsigned long __arch_hweight64(__u64 w);
-extern unsigned int __arch_hweight32(unsigned int w);
-extern unsigned int __arch_hweight16(unsigned int w);
-extern unsigned int __arch_hweight8(unsigned int w);
+unsigned long __arch_hweight64(__u64 w);
+unsigned int __arch_hweight32(unsigned int w);
+unsigned int __arch_hweight16(unsigned int w);
+unsigned int __arch_hweight8(unsigned int w);
 
 #include <asm-generic/bitops/const_hweight.h>
 #include <asm-generic/bitops/lock.h>
index 9b2bc6b..75a32b1 100644 (file)
@@ -1,6 +1,6 @@
 #ifndef _SPARC_BTEXT_H
 #define _SPARC_BTEXT_H
 
-extern int btext_find_display(void);
+int btext_find_display(void);
 
 #endif /* _SPARC_BTEXT_H */
index 6bd9f43..eaa8f8d 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/compiler.h>
 
 #ifdef CONFIG_DEBUG_BUGVERBOSE
-extern void do_BUG(const char *file, int line);
+void do_BUG(const char *file, int line);
 #define BUG() do {                                     \
        do_BUG(__FILE__, __LINE__);                     \
        __builtin_trap();                               \
@@ -20,6 +20,6 @@ extern void do_BUG(const char *file, int line);
 #include <asm-generic/bug.h>
 
 struct pt_regs;
-extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
+void __noreturn die_if_kernel(char *str, struct pt_regs *regs);
 
 #endif
index bb014c2..1216400 100644 (file)
@@ -36,7 +36,7 @@
 #define flush_page_for_dma(addr) \
        sparc32_cachetlb_ops->page_for_dma(addr)
 
-extern void sparc_flush_page_to_ram(struct page *page);
+void sparc_flush_page_to_ram(struct page *page);
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 #define flush_dcache_page(page)                        sparc_flush_page_to_ram(page)
@@ -51,8 +51,8 @@ extern void sparc_flush_page_to_ram(struct page *page);
  * way the windows are all clean for the next process and the stack
  * frames are up to date.
  */
-extern void flush_user_windows(void);
-extern void kill_user_windows(void);
-extern void flushw_all(void);
+void flush_user_windows(void);
+void kill_user_windows(void);
+void flushw_all(void);
 
 #endif /* _SPARC_CACHEFLUSH_H */
index 301736d..3896537 100644 (file)
@@ -10,7 +10,7 @@
 /* Cache flush operations. */
 #define flushw_all()   __asm__ __volatile__("flushw")
 
-extern void __flushw_user(void);
+void __flushw_user(void);
 #define flushw_user() __flushw_user()
 
 #define flush_user_windows flushw_user
@@ -30,29 +30,29 @@ extern void __flushw_user(void);
  * use block commit stores (which invalidate icache lines) during
  * module load, so we need this.
  */
-extern void flush_icache_range(unsigned long start, unsigned long end);
-extern void __flush_icache_page(unsigned long);
+void flush_icache_range(unsigned long start, unsigned long end);
+void __flush_icache_page(unsigned long);
 
-extern void __flush_dcache_page(void *addr, int flush_icache);
-extern void flush_dcache_page_impl(struct page *page);
+void __flush_dcache_page(void *addr, int flush_icache);
+void flush_dcache_page_impl(struct page *page);
 #ifdef CONFIG_SMP
-extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
-extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
+void smp_flush_dcache_page_impl(struct page *page, int cpu);
+void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
 #else
 #define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
 #define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
 #endif
 
-extern void __flush_dcache_range(unsigned long start, unsigned long end);
+void __flush_dcache_range(unsigned long start, unsigned long end);
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page *page);
+void flush_dcache_page(struct page *page);
 
 #define flush_icache_page(vma, pg)     do { } while(0)
 #define flush_icache_user_range(vma,pg,adr,len)        do { } while (0)
 
-extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
-                               unsigned long uaddr, void *kaddr,
-                               unsigned long len, int write);
+void flush_ptrace_access(struct vm_area_struct *, struct page *,
+                        unsigned long uaddr, void *kaddr,
+                        unsigned long len, int write);
 
 #define copy_to_user_page(vma, page, vaddr, dst, src, len)             \
        do {                                                            \
index 04471dc..426b238 100644 (file)
@@ -29,7 +29,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /* the same as csum_partial, but copies from fs:src while it
  * checksums
@@ -38,7 +38,7 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
  * better 64-bit) boundary
  */
 
-extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
+unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
 
 static inline __wsum
 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
index 2ff81ae..b8779a6 100644 (file)
@@ -29,7 +29,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-extern __wsum csum_partial(const void * buff, int len, __wsum sum);
+__wsum csum_partial(const void * buff, int len, __wsum sum);
 
 /* the same as csum_partial, but copies from user space while it
  * checksums
@@ -37,12 +37,12 @@ extern __wsum csum_partial(const void * buff, int len, __wsum sum);
  * here even more important to align src and dst on a 32-bit (or even
  * better 64-bit) boundary
  */
-extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
-                                             int len, __wsum sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+                                int len, __wsum sum);
 
-extern long __csum_partial_copy_from_user(const void __user *src,
-                                         void *dst, int len,
-                                         __wsum sum);
+long __csum_partial_copy_from_user(const void __user *src,
+                                  void *dst, int len,
+                                  __wsum sum);
 
 static inline __wsum
 csum_partial_copy_from_user(const void __user *src,
@@ -59,9 +59,9 @@ csum_partial_copy_from_user(const void __user *src,
  *     Copy and checksum to user
  */
 #define HAVE_CSUM_COPY_USER
-extern long __csum_partial_copy_to_user(const void *src,
-                                       void __user *dst, int len,
-                                         __wsum sum);
+long __csum_partial_copy_to_user(const void *src,
+                                void __user *dst, int len,
+                                __wsum sum);
 
 static inline __wsum
 csum_and_copy_to_user(const void *src,
@@ -77,7 +77,7 @@ csum_and_copy_to_user(const void *src,
 /* ihl is always 5 or greater, almost always is 5, and iph is word aligned
  * the majority of the time.
  */
-extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
 /* Fold a partial checksum without adding pseudo headers. */
 static inline __sum16 csum_fold(__wsum sum)
@@ -96,9 +96,9 @@ static inline __sum16 csum_fold(__wsum sum)
 }
 
 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
-                                              unsigned int len,
-                                              unsigned short proto,
-                                              __wsum sum)
+                                       unsigned int len,
+                                       unsigned short proto,
+                                       __wsum sum)
 {
        __asm__ __volatile__(
 "      addcc           %1, %0, %0\n"
@@ -116,9 +116,9 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
  * returns a 16-bit checksum, already complemented
  */
 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
-                                                  unsigned short len,
-                                                  unsigned short proto,
-                                                  __wsum sum)
+                                       unsigned short len,
+                                       unsigned short proto,
+                                       __wsum sum)
 {
        return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
index 1fae1a0..32c29a1 100644 (file)
@@ -20,7 +20,7 @@ static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned lon
        return val;
 }
 
-extern void __xchg_called_with_bad_pointer(void);
+void __xchg_called_with_bad_pointer(void);
 
 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
 {
@@ -45,9 +45,9 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
 #define __HAVE_ARCH_CMPXCHG    1
 
 /* bug catcher for when unsupported size is used - won't link */
-extern void __cmpxchg_called_with_bad_pointer(void);
+void __cmpxchg_called_with_bad_pointer(void);
 /* we only need to support cmpxchg of a u32 on sparc */
-extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
+unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
 
 /* don't worry...optimizer will get rid of most of this */
 static inline unsigned long
index 4adefe8..0e1ed6c 100644 (file)
@@ -42,7 +42,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
 
 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
 
-extern void __xchg_called_with_bad_pointer(void);
+void __xchg_called_with_bad_pointer(void);
 
 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
                                       int size)
@@ -91,7 +91,7 @@ __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
 
 /* This function doesn't exist, so you'll get a linker error
    if something tries to do an invalid cmpxchg().  */
-extern void __cmpxchg_called_with_bad_pointer(void);
+void __cmpxchg_called_with_bad_pointer(void);
 
 static inline unsigned long
 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
index b5976de..128b56b 100644 (file)
@@ -1,5 +1,15 @@
 #ifndef ___ASM_SPARC_CPUDATA_H
 #define ___ASM_SPARC_CPUDATA_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/threads.h>
+#include <linux/percpu.h>
+
+extern const struct seq_operations cpuinfo_op;
+
+#endif /* !(__ASSEMBLY__) */
+
 #if defined(__sparc__) && defined(__arch64__)
 #include <asm/cpudata_64.h>
 #else
index 050ef35..0e59407 100644 (file)
@@ -8,9 +8,6 @@
 
 #ifndef __ASSEMBLY__
 
-#include <linux/percpu.h>
-#include <linux/threads.h>
-
 typedef struct {
        /* Dcache line 1 */
        unsigned int    __softirq_pending; /* must be 1st, see rtrap.S */
@@ -35,8 +32,6 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
 #define cpu_data(__cpu)                per_cpu(__cpu_data, (__cpu))
 #define local_cpu_data()       __get_cpu_var(__cpu_data)
 
-extern const struct seq_operations cpuinfo_op;
-
 #endif /* !(__ASSEMBLY__) */
 
 #include <asm/trap_block.h>
index bc9aba2..3fb8ca1 100644 (file)
@@ -20,8 +20,8 @@ static inline void __delay(unsigned long loops)
 }
 
 /* This is too messy with inline asm on the Sparc. */
-extern void __udelay(unsigned long usecs, unsigned long lpj);
-extern void __ndelay(unsigned long nsecs, unsigned long lpj);
+void __udelay(unsigned long usecs, unsigned long lpj);
+void __ndelay(unsigned long nsecs, unsigned long lpj);
 
 #ifdef CONFIG_SMP
 #define __udelay_val   cpu_data(smp_processor_id()).udelay_val
index a77aa62..0ba5424 100644 (file)
@@ -8,8 +8,8 @@
 
 #ifndef __ASSEMBLY__
 
-extern void __delay(unsigned long loops);
-extern void udelay(unsigned long usecs);
+void __delay(unsigned long loops);
+void udelay(unsigned long usecs);
 #define mdelay(n)      udelay((n) * 1000)
 
 #endif /* !__ASSEMBLY__ */
index daa6a8a..bb3f0b0 100644 (file)
@@ -19,7 +19,7 @@ struct dev_archdata {
        int                     numa_node;
 };
 
-extern void of_propagate_archdata(struct platform_device *bus);
+void of_propagate_archdata(struct platform_device *bus);
 
 struct pdev_archdata {
        struct resource         resource[PROMREG_MAX];
index 05fe53f..1ee0271 100644 (file)
@@ -7,7 +7,7 @@
 
 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
 
-extern int dma_supported(struct device *dev, u64 mask);
+int dma_supported(struct device *dev, u64 mask);
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
index f07a5b5..fcfb494 100644 (file)
@@ -22,14 +22,14 @@ struct ebus_dma_info {
        unsigned char   name[64];
 };
 
-extern int ebus_dma_register(struct ebus_dma_info *p);
-extern int ebus_dma_irq_enable(struct ebus_dma_info *p, int on);
-extern void ebus_dma_unregister(struct ebus_dma_info *p);
-extern int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr,
+int ebus_dma_register(struct ebus_dma_info *p);
+int ebus_dma_irq_enable(struct ebus_dma_info *p, int on);
+void ebus_dma_unregister(struct ebus_dma_info *p);
+int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr,
                            size_t len);
-extern void ebus_dma_prepare(struct ebus_dma_info *p, int write);
-extern unsigned int ebus_dma_residue(struct ebus_dma_info *p);
-extern unsigned int ebus_dma_addr(struct ebus_dma_info *p);
-extern void ebus_dma_enable(struct ebus_dma_info *p, int on);
+void ebus_dma_prepare(struct ebus_dma_info *p, int write);
+unsigned int ebus_dma_residue(struct ebus_dma_info *p);
+unsigned int ebus_dma_addr(struct ebus_dma_info *p);
+void ebus_dma_enable(struct ebus_dma_info *p, int on);
 
 #endif /* __ASM_SPARC_EBUS_DMA_H */
index fb3f169..071b83e 100644 (file)
@@ -9,11 +9,12 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 
-#include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/idprom.h>
 #include <asm/oplib.h>
 #include <asm/auxio.h>
+#include <asm/setup.h>
+#include <asm/page.h>
 #include <asm/irq.h>
 
 /* We don't need no stinkin' I/O port allocation crap. */
@@ -49,7 +50,6 @@ struct sun_flpy_controller {
 
 /* You'll only ever find one controller on a SparcStation anyways. */
 static struct sun_flpy_controller *sun_fdc = NULL;
-extern volatile unsigned char *fdc_status;
 
 struct sun_floppy_ops {
        unsigned char (*fd_inb)(int port);
@@ -212,13 +212,6 @@ static void sun_82077_fd_outb(unsigned char value, int port)
  * underruns.  If non-zero, doing_pdma encodes the direction of
  * the transfer for debugging.  1=read 2=write
  */
-extern char *pdma_vaddr;
-extern unsigned long pdma_size;
-extern volatile int doing_pdma;
-
-/* This is software state */
-extern char *pdma_base;
-extern unsigned long pdma_areasize;
 
 /* Common routines to all controller types on the Sparc. */
 static inline void virtual_dma_init(void)
@@ -263,8 +256,7 @@ static inline void sun_fd_enable_dma(void)
        pdma_areasize = pdma_size;
 }
 
-extern int sparc_floppy_request_irq(unsigned int irq,
-                                    irq_handler_t irq_handler);
+int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler);
 
 static int sun_fd_request_irq(void)
 {
index 7c90c50..6257564 100644 (file)
@@ -296,7 +296,7 @@ struct sun_pci_dma_op {
 static struct sun_pci_dma_op sun_pci_dma_current = { -1U, 0, 0, NULL};
 static struct sun_pci_dma_op sun_pci_dma_pending = { -1U, 0, 0, NULL};
 
-extern irqreturn_t floppy_interrupt(int irq, void *dev_id);
+irqreturn_t floppy_interrupt(int irq, void *dev_id);
 
 static unsigned char sun_pci_fd_inb(unsigned long port)
 {
index b0f18e9..9ec94ad 100644 (file)
@@ -6,7 +6,7 @@
 #define MCOUNT_INSN_SIZE       4 /* sizeof mcount call */
 
 #ifndef __ASSEMBLY__
-extern void _mcount(void);
+void _mcount(void);
 #endif
 
 #endif
@@ -22,4 +22,8 @@ struct dyn_arch_ftrace {
 };
 #endif /*  CONFIG_DYNAMIC_FTRACE */
 
+unsigned long prepare_ftrace_return(unsigned long parent,
+                                   unsigned long self_addr,
+                                   unsigned long frame_pointer);
+
 #endif /* _ASM_SPARC64_FTRACE */
index 4f9e15c..92ded29 100644 (file)
@@ -31,7 +31,7 @@ extern unsigned long highstart_pfn, highend_pfn;
 extern pgprot_t kmap_prot;
 extern pte_t *pkmap_page_table;
 
-extern void kmap_init(void) __init;
+void kmap_init(void) __init;
 
 /*
  * Right now we initialize only a single pte table. It can be extended
@@ -49,8 +49,8 @@ extern void kmap_init(void) __init;
 
 #define PKMAP_END (PKMAP_ADDR(LAST_PKMAP))
 
-extern void *kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
+void *kmap_high(struct page *page);
+void kunmap_high(struct page *page);
 
 static inline void *kmap(struct page *page)
 {
@@ -68,8 +68,8 @@ static inline void kunmap(struct page *page)
        kunmap_high(page);
 }
 
-extern void *kmap_atomic(struct page *page);
-extern void __kunmap_atomic(void *kvaddr);
+void *kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
 
 #define flush_cache_kmaps()    flush_cache_all()
 
index b2b9b94..04b56f8 100644 (file)
@@ -19,7 +19,7 @@ struct hvtramp_descr {
        struct hvtramp_mapping  maps[1];
 };
 
-extern void hv_cpu_startup(unsigned long hvdescr_pa);
+void hv_cpu_startup(unsigned long hvdescr_pa);
 
 #endif
 
index ca121f0..94b39ca 100644 (file)
@@ -98,7 +98,7 @@
 #define HV_FAST_MACH_EXIT              0x00
 
 #ifndef __ASSEMBLY__
-extern void sun4v_mach_exit(unsigned long exit_code);
+void sun4v_mach_exit(unsigned long exit_code);
 #endif
 
 /* Domain services.  */
@@ -127,9 +127,9 @@ extern void sun4v_mach_exit(unsigned long exit_code);
 #define HV_FAST_MACH_DESC              0x01
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_mach_desc(unsigned long buffer_pa,
-                                    unsigned long buf_len,
-                                    unsigned long *real_buf_len);
+unsigned long sun4v_mach_desc(unsigned long buffer_pa,
+                             unsigned long buf_len,
+                             unsigned long *real_buf_len);
 #endif
 
 /* mach_sir()
@@ -148,7 +148,7 @@ extern unsigned long sun4v_mach_desc(unsigned long buffer_pa,
 #define HV_FAST_MACH_SIR               0x02
 
 #ifndef __ASSEMBLY__
-extern void sun4v_mach_sir(void);
+void sun4v_mach_sir(void);
 #endif
 
 /* mach_set_watchdog()
@@ -204,8 +204,8 @@ extern void sun4v_mach_sir(void);
 #define HV_FAST_MACH_SET_WATCHDOG      0x05
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
-                                            unsigned long *orig_timeout);
+unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
+                                     unsigned long *orig_timeout);
 #endif
 
 /* CPU services.
@@ -250,10 +250,10 @@ extern unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
 #define HV_FAST_CPU_START              0x10
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_start(unsigned long cpuid,
-                                    unsigned long pc,
-                                    unsigned long rtba,
-                                    unsigned long arg0);
+unsigned long sun4v_cpu_start(unsigned long cpuid,
+                             unsigned long pc,
+                             unsigned long rtba,
+                             unsigned long arg0);
 #endif
 
 /* cpu_stop()
@@ -278,7 +278,7 @@ extern unsigned long sun4v_cpu_start(unsigned long cpuid,
 #define HV_FAST_CPU_STOP               0x11
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_stop(unsigned long cpuid);
+unsigned long sun4v_cpu_stop(unsigned long cpuid);
 #endif
 
 /* cpu_yield()
@@ -295,7 +295,7 @@ extern unsigned long sun4v_cpu_stop(unsigned long cpuid);
 #define HV_FAST_CPU_YIELD              0x12
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_yield(void);
+unsigned long sun4v_cpu_yield(void);
 #endif
 
 /* cpu_qconf()
@@ -341,9 +341,9 @@ extern unsigned long sun4v_cpu_yield(void);
 #define  HV_CPU_QUEUE_NONRES_ERROR      0x3f
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_qconf(unsigned long type,
-                                    unsigned long queue_paddr,
-                                    unsigned long num_queue_entries);
+unsigned long sun4v_cpu_qconf(unsigned long type,
+                             unsigned long queue_paddr,
+                             unsigned long num_queue_entries);
 #endif
 
 /* cpu_qinfo()
@@ -394,7 +394,9 @@ extern unsigned long sun4v_cpu_qconf(unsigned long type,
 #define HV_FAST_CPU_MONDO_SEND         0x42
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long cpu_list_pa, unsigned long mondo_block_pa);
+unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count,
+                                  unsigned long cpu_list_pa,
+                                  unsigned long mondo_block_pa);
 #endif
 
 /* cpu_myid()
@@ -425,7 +427,7 @@ extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long
 #define  HV_CPU_STATE_ERROR             0x03
 
 #ifndef __ASSEMBLY__
-extern long sun4v_cpu_state(unsigned long cpuid);
+long sun4v_cpu_state(unsigned long cpuid);
 #endif
 
 /* cpu_set_rtba()
@@ -625,8 +627,8 @@ struct hv_fault_status {
 #define HV_FAST_MMU_TSB_CTX0           0x20
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
-                                       unsigned long tsb_desc_ra);
+unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
+                                unsigned long tsb_desc_ra);
 #endif
 
 /* mmu_tsb_ctxnon0()
@@ -710,7 +712,7 @@ extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
 #define HV_FAST_MMU_DEMAP_ALL          0x24
 
 #ifndef __ASSEMBLY__
-extern void sun4v_mmu_demap_all(void);
+void sun4v_mmu_demap_all(void);
 #endif
 
 /* mmu_map_perm_addr()
@@ -740,10 +742,10 @@ extern void sun4v_mmu_demap_all(void);
 #define HV_FAST_MMU_MAP_PERM_ADDR      0x25
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
-                                            unsigned long set_to_zero,
-                                            unsigned long tte,
-                                            unsigned long flags);
+unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
+                                     unsigned long set_to_zero,
+                                     unsigned long tte,
+                                     unsigned long flags);
 #endif
 
 /* mmu_fault_area_conf()
@@ -945,7 +947,7 @@ extern unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
 #define HV_FAST_TOD_GET                        0x50
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_tod_get(unsigned long *time);
+unsigned long sun4v_tod_get(unsigned long *time);
 #endif
 
 /* tod_set()
@@ -962,7 +964,7 @@ extern unsigned long sun4v_tod_get(unsigned long *time);
 #define HV_FAST_TOD_SET                        0x51
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_tod_set(unsigned long time);
+unsigned long sun4v_tod_set(unsigned long time);
 #endif
 
 /* Console services */
@@ -1038,14 +1040,14 @@ extern unsigned long sun4v_tod_set(unsigned long time);
 #define HV_FAST_CONS_WRITE             0x63
 
 #ifndef __ASSEMBLY__
-extern long sun4v_con_getchar(long *status);
-extern long sun4v_con_putchar(long c);
-extern long sun4v_con_read(unsigned long buffer,
-                          unsigned long size,
-                          unsigned long *bytes_read);
-extern unsigned long sun4v_con_write(unsigned long buffer,
-                                    unsigned long size,
-                                    unsigned long *bytes_written);
+long sun4v_con_getchar(long *status);
+long sun4v_con_putchar(long c);
+long sun4v_con_read(unsigned long buffer,
+                   unsigned long size,
+                   unsigned long *bytes_read);
+unsigned long sun4v_con_write(unsigned long buffer,
+                             unsigned long size,
+                             unsigned long *bytes_written);
 #endif
 
 /* mach_set_soft_state()
@@ -1080,8 +1082,8 @@ extern unsigned long sun4v_con_write(unsigned long buffer,
 #define  HV_SOFT_STATE_TRANSITION       0x02
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
-                                              unsigned long msg_string_ra);
+unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
+                                       unsigned long msg_string_ra);
 #endif
 
 /* mach_get_soft_state()
@@ -1159,20 +1161,20 @@ extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
 #define HV_FAST_SVC_CLRSTATUS          0x84
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_svc_send(unsigned long svc_id,
-                                   unsigned long buffer,
-                                   unsigned long buffer_size,
-                                   unsigned long *sent_bytes);
-extern unsigned long sun4v_svc_recv(unsigned long svc_id,
-                                   unsigned long buffer,
-                                   unsigned long buffer_size,
-                                   unsigned long *recv_bytes);
-extern unsigned long sun4v_svc_getstatus(unsigned long svc_id,
-                                        unsigned long *status_bits);
-extern unsigned long sun4v_svc_setstatus(unsigned long svc_id,
-                                        unsigned long status_bits);
-extern unsigned long sun4v_svc_clrstatus(unsigned long svc_id,
-                                        unsigned long status_bits);
+unsigned long sun4v_svc_send(unsigned long svc_id,
+                            unsigned long buffer,
+                            unsigned long buffer_size,
+                            unsigned long *sent_bytes);
+unsigned long sun4v_svc_recv(unsigned long svc_id,
+                            unsigned long buffer,
+                            unsigned long buffer_size,
+                            unsigned long *recv_bytes);
+unsigned long sun4v_svc_getstatus(unsigned long svc_id,
+                                 unsigned long *status_bits);
+unsigned long sun4v_svc_setstatus(unsigned long svc_id,
+                                 unsigned long status_bits);
+unsigned long sun4v_svc_clrstatus(unsigned long svc_id,
+                                 unsigned long status_bits);
 #endif
 
 /* Trap trace services.
@@ -1458,8 +1460,8 @@ struct hv_trap_trace_entry {
 #define HV_FAST_INTR_DEVINO2SYSINO     0xa0
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
-                                           unsigned long devino);
+unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
+                                    unsigned long devino);
 #endif
 
 /* intr_getenabled()
@@ -1476,7 +1478,7 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
 #define HV_FAST_INTR_GETENABLED                0xa1
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_getenabled(unsigned long sysino);
+unsigned long sun4v_intr_getenabled(unsigned long sysino);
 #endif
 
 /* intr_setenabled()
@@ -1492,7 +1494,8 @@ extern unsigned long sun4v_intr_getenabled(unsigned long sysino);
 #define HV_FAST_INTR_SETENABLED                0xa2
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled);
+unsigned long sun4v_intr_setenabled(unsigned long sysino,
+                                   unsigned long intr_enabled);
 #endif
 
 /* intr_getstate()
@@ -1508,7 +1511,7 @@ extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long i
 #define HV_FAST_INTR_GETSTATE          0xa3
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_getstate(unsigned long sysino);
+unsigned long sun4v_intr_getstate(unsigned long sysino);
 #endif
 
 /* intr_setstate()
@@ -1528,7 +1531,7 @@ extern unsigned long sun4v_intr_getstate(unsigned long sysino);
 #define HV_FAST_INTR_SETSTATE          0xa4
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
+unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
 #endif
 
 /* intr_gettarget()
@@ -1546,7 +1549,7 @@ extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long int
 #define HV_FAST_INTR_GETTARGET         0xa5
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
+unsigned long sun4v_intr_gettarget(unsigned long sysino);
 #endif
 
 /* intr_settarget()
@@ -1563,7 +1566,7 @@ extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
 #define HV_FAST_INTR_SETTARGET         0xa6
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
+unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
 #endif
 
 /* vintr_get_cookie()
@@ -1647,30 +1650,30 @@ extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cp
 #define HV_FAST_VINTR_SET_TARGET       0xae
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_vintr_get_cookie(unsigned long dev_handle,
-                                           unsigned long dev_ino,
-                                           unsigned long *cookie);
-extern unsigned long sun4v_vintr_set_cookie(unsigned long dev_handle,
-                                           unsigned long dev_ino,
-                                           unsigned long cookie);
-extern unsigned long sun4v_vintr_get_valid(unsigned long dev_handle,
-                                          unsigned long dev_ino,
-                                          unsigned long *valid);
-extern unsigned long sun4v_vintr_set_valid(unsigned long dev_handle,
-                                          unsigned long dev_ino,
-                                          unsigned long valid);
-extern unsigned long sun4v_vintr_get_state(unsigned long dev_handle,
-                                          unsigned long dev_ino,
-                                          unsigned long *state);
-extern unsigned long sun4v_vintr_set_state(unsigned long dev_handle,
-                                          unsigned long dev_ino,
-                                          unsigned long state);
-extern unsigned long sun4v_vintr_get_target(unsigned long dev_handle,
-                                           unsigned long dev_ino,
-                                           unsigned long *cpuid);
-extern unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
-                                           unsigned long dev_ino,
-                                           unsigned long cpuid);
+unsigned long sun4v_vintr_get_cookie(unsigned long dev_handle,
+                                    unsigned long dev_ino,
+                                    unsigned long *cookie);
+unsigned long sun4v_vintr_set_cookie(unsigned long dev_handle,
+                                    unsigned long dev_ino,
+                                    unsigned long cookie);
+unsigned long sun4v_vintr_get_valid(unsigned long dev_handle,
+                                   unsigned long dev_ino,
+                                   unsigned long *valid);
+unsigned long sun4v_vintr_set_valid(unsigned long dev_handle,
+                                   unsigned long dev_ino,
+                                   unsigned long valid);
+unsigned long sun4v_vintr_get_state(unsigned long dev_handle,
+                                   unsigned long dev_ino,
+                                   unsigned long *state);
+unsigned long sun4v_vintr_set_state(unsigned long dev_handle,
+                                   unsigned long dev_ino,
+                                   unsigned long state);
+unsigned long sun4v_vintr_get_target(unsigned long dev_handle,
+                                    unsigned long dev_ino,
+                                    unsigned long *cpuid);
+unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
+                                    unsigned long dev_ino,
+                                    unsigned long cpuid);
 #endif
 
 /* PCI IO services.
@@ -2627,50 +2630,50 @@ struct ldc_mtable_entry {
 #define HV_FAST_LDC_REVOKE             0xef
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_ldc_tx_qconf(unsigned long channel,
-                                       unsigned long ra,
-                                       unsigned long num_entries);
-extern unsigned long sun4v_ldc_tx_qinfo(unsigned long channel,
-                                       unsigned long *ra,
-                                       unsigned long *num_entries);
-extern unsigned long sun4v_ldc_tx_get_state(unsigned long channel,
-                                           unsigned long *head_off,
-                                           unsigned long *tail_off,
-                                           unsigned long *chan_state);
-extern unsigned long sun4v_ldc_tx_set_qtail(unsigned long channel,
-                                           unsigned long tail_off);
-extern unsigned long sun4v_ldc_rx_qconf(unsigned long channel,
-                                       unsigned long ra,
-                                       unsigned long num_entries);
-extern unsigned long sun4v_ldc_rx_qinfo(unsigned long channel,
-                                       unsigned long *ra,
-                                       unsigned long *num_entries);
-extern unsigned long sun4v_ldc_rx_get_state(unsigned long channel,
-                                           unsigned long *head_off,
-                                           unsigned long *tail_off,
-                                           unsigned long *chan_state);
-extern unsigned long sun4v_ldc_rx_set_qhead(unsigned long channel,
-                                           unsigned long head_off);
-extern unsigned long sun4v_ldc_set_map_table(unsigned long channel,
-                                            unsigned long ra,
-                                            unsigned long num_entries);
-extern unsigned long sun4v_ldc_get_map_table(unsigned long channel,
-                                            unsigned long *ra,
-                                            unsigned long *num_entries);
-extern unsigned long sun4v_ldc_copy(unsigned long channel,
-                                   unsigned long dir_code,
-                                   unsigned long tgt_raddr,
-                                   unsigned long lcl_raddr,
-                                   unsigned long len,
-                                   unsigned long *actual_len);
-extern unsigned long sun4v_ldc_mapin(unsigned long channel,
-                                    unsigned long cookie,
-                                    unsigned long *ra,
-                                    unsigned long *perm);
-extern unsigned long sun4v_ldc_unmap(unsigned long ra);
-extern unsigned long sun4v_ldc_revoke(unsigned long channel,
-                                     unsigned long cookie,
-                                     unsigned long mte_cookie);
+unsigned long sun4v_ldc_tx_qconf(unsigned long channel,
+                                unsigned long ra,
+                                unsigned long num_entries);
+unsigned long sun4v_ldc_tx_qinfo(unsigned long channel,
+                                unsigned long *ra,
+                                unsigned long *num_entries);
+unsigned long sun4v_ldc_tx_get_state(unsigned long channel,
+                                    unsigned long *head_off,
+                                    unsigned long *tail_off,
+                                    unsigned long *chan_state);
+unsigned long sun4v_ldc_tx_set_qtail(unsigned long channel,
+                                    unsigned long tail_off);
+unsigned long sun4v_ldc_rx_qconf(unsigned long channel,
+                                unsigned long ra,
+                                unsigned long num_entries);
+unsigned long sun4v_ldc_rx_qinfo(unsigned long channel,
+                                unsigned long *ra,
+                                unsigned long *num_entries);
+unsigned long sun4v_ldc_rx_get_state(unsigned long channel,
+                                    unsigned long *head_off,
+                                    unsigned long *tail_off,
+                                    unsigned long *chan_state);
+unsigned long sun4v_ldc_rx_set_qhead(unsigned long channel,
+                                    unsigned long head_off);
+unsigned long sun4v_ldc_set_map_table(unsigned long channel,
+                                     unsigned long ra,
+                                     unsigned long num_entries);
+unsigned long sun4v_ldc_get_map_table(unsigned long channel,
+                                     unsigned long *ra,
+                                     unsigned long *num_entries);
+unsigned long sun4v_ldc_copy(unsigned long channel,
+                            unsigned long dir_code,
+                            unsigned long tgt_raddr,
+                            unsigned long lcl_raddr,
+                            unsigned long len,
+                            unsigned long *actual_len);
+unsigned long sun4v_ldc_mapin(unsigned long channel,
+                             unsigned long cookie,
+                             unsigned long *ra,
+                             unsigned long *perm);
+unsigned long sun4v_ldc_unmap(unsigned long ra);
+unsigned long sun4v_ldc_revoke(unsigned long channel,
+                              unsigned long cookie,
+                              unsigned long mte_cookie);
 #endif
 
 /* Performance counter services.  */
@@ -2727,14 +2730,14 @@ extern unsigned long sun4v_ldc_revoke(unsigned long channel,
 #define HV_FAST_N2_SET_PERFREG         0x105
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_niagara_getperf(unsigned long reg,
-                                          unsigned long *val);
-extern unsigned long sun4v_niagara_setperf(unsigned long reg,
-                                          unsigned long val);
-extern unsigned long sun4v_niagara2_getperf(unsigned long reg,
-                                           unsigned long *val);
-extern unsigned long sun4v_niagara2_setperf(unsigned long reg,
-                                           unsigned long val);
+unsigned long sun4v_niagara_getperf(unsigned long reg,
+                                   unsigned long *val);
+unsigned long sun4v_niagara_setperf(unsigned long reg,
+                                   unsigned long val);
+unsigned long sun4v_niagara2_getperf(unsigned long reg,
+                                    unsigned long *val);
+unsigned long sun4v_niagara2_setperf(unsigned long reg,
+                                    unsigned long val);
 #endif
 
 /* MMU statistics services.
@@ -2829,8 +2832,8 @@ struct hv_mmu_statistics {
 #define HV_FAST_MMUSTAT_INFO           0x103
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_mmustat_conf(unsigned long ra, unsigned long *orig_ra);
-extern unsigned long sun4v_mmustat_info(unsigned long *ra);
+unsigned long sun4v_mmustat_conf(unsigned long ra, unsigned long *orig_ra);
+unsigned long sun4v_mmustat_info(unsigned long *ra);
 #endif
 
 /* NCS crypto services  */
@@ -2919,9 +2922,9 @@ struct hv_ncs_qtail_update_arg {
 #define HV_FAST_NCS_REQUEST            0x110
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_ncs_request(unsigned long request,
-                                      unsigned long arg_ra,
-                                      unsigned long arg_size);
+unsigned long sun4v_ncs_request(unsigned long request,
+                               unsigned long arg_ra,
+                               unsigned long arg_size);
 #endif
 
 #define HV_FAST_FIRE_GET_PERFREG       0x120
@@ -2930,18 +2933,18 @@ extern unsigned long sun4v_ncs_request(unsigned long request,
 #define HV_FAST_REBOOT_DATA_SET                0x172
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_reboot_data_set(unsigned long ra,
-                                          unsigned long len);
+unsigned long sun4v_reboot_data_set(unsigned long ra,
+                                   unsigned long len);
 #endif
 
 #define HV_FAST_VT_GET_PERFREG         0x184
 #define HV_FAST_VT_SET_PERFREG         0x185
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_vt_get_perfreg(unsigned long reg_num,
-                                         unsigned long *reg_val);
-extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
-                                         unsigned long reg_val);
+unsigned long sun4v_vt_get_perfreg(unsigned long reg_num,
+                                  unsigned long *reg_val);
+unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
+                                  unsigned long reg_val);
 #endif
 
 /* Function numbers for HV_CORE_TRAP.  */
@@ -2978,21 +2981,21 @@ extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
 #define HV_GRP_DIAG                    0x0300
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_get_version(unsigned long group,
-                                      unsigned long *major,
-                                      unsigned long *minor);
-extern unsigned long sun4v_set_version(unsigned long group,
-                                      unsigned long major,
-                                      unsigned long minor,
-                                      unsigned long *actual_minor);
-
-extern int sun4v_hvapi_register(unsigned long group, unsigned long major,
-                               unsigned long *minor);
-extern void sun4v_hvapi_unregister(unsigned long group);
-extern int sun4v_hvapi_get(unsigned long group,
-                          unsigned long *major,
-                          unsigned long *minor);
-extern void sun4v_hvapi_init(void);
+unsigned long sun4v_get_version(unsigned long group,
+                               unsigned long *major,
+                               unsigned long *minor);
+unsigned long sun4v_set_version(unsigned long group,
+                               unsigned long major,
+                               unsigned long minor,
+                               unsigned long *actual_minor);
+
+int sun4v_hvapi_register(unsigned long group, unsigned long major,
+                        unsigned long *minor);
+void sun4v_hvapi_unregister(unsigned long group);
+int sun4v_hvapi_get(unsigned long group,
+                   unsigned long *major,
+                   unsigned long *minor);
+void sun4v_hvapi_init(void);
 #endif
 
 #endif /* !(_SPARC64_HYPERVISOR_H) */
index 6976aa2..3793f7f 100644 (file)
@@ -20,6 +20,6 @@ struct idprom {
 };
 
 extern struct idprom *idprom;
-extern void idprom_init(void);
+void idprom_init(void);
 
 #endif /* !(_SPARC_IDPROM_H) */
index 01ab2f6..04a9701 100644 (file)
@@ -43,7 +43,7 @@
 struct iounit_struct {
        unsigned long           bmap[(IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 3)) / sizeof(unsigned long)];
        spinlock_t              lock;
-       iopte_t                 *page_table;
+       iopte_t __iomem         *page_table;
        unsigned long           rotor[3];
        unsigned long           limit[4];
 };
index c1acbd8..9f53290 100644 (file)
 #define __SPARC_IO_H
 
 #include <linux/kernel.h>
-#include <linux/types.h>
 #include <linux/ioport.h>  /* struct resource */
 
-#include <asm/page.h>      /* IO address mapping routines need this */
-#include <asm-generic/pci_iomap.h>
-
-#define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
-
-static inline u32 flip_dword (u32 l)
-{
-       return ((l&0xff)<<24) | (((l>>8)&0xff)<<16) | (((l>>16)&0xff)<<8)| ((l>>24)&0xff);
-}
-
-static inline u16 flip_word (u16 w)
-{
-       return ((w&0xff) << 8) | ((w>>8)&0xff);
-}
-
-#define mmiowb()
-
-/*
- * Memory mapped I/O to PCI
- */
-
-static inline u8 __raw_readb(const volatile void __iomem *addr)
-{
-       return *(__force volatile u8 *)addr;
-}
-
-static inline u16 __raw_readw(const volatile void __iomem *addr)
-{
-       return *(__force volatile u16 *)addr;
-}
-
-static inline u32 __raw_readl(const volatile void __iomem *addr)
-{
-       return *(__force volatile u32 *)addr;
-}
+#define readb_relaxed(__addr)  readb(__addr)
+#define readw_relaxed(__addr)  readw(__addr)
+#define readl_relaxed(__addr)  readl(__addr)
 
-static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
-{
-       *(__force volatile u8 *)addr = b;
-}
+#define IO_SPACE_LIMIT 0xffffffff
 
-static inline void __raw_writew(u16 w, volatile void __iomem *addr)
-{
-       *(__force volatile u16 *)addr = w;
-}
+#define memset_io(d,c,sz)     _memset_io(d,c,sz)
+#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
+#define memcpy_toio(d,s,sz)   _memcpy_toio(d,s,sz)
 
-static inline void __raw_writel(u32 l, volatile void __iomem *addr)
-{
-       *(__force volatile u32 *)addr = l;
-}
+#include <asm-generic/io.h>
 
-static inline u8 __readb(const volatile void __iomem *addr)
+static inline void _memset_io(volatile void __iomem *dst,
+                              int c, __kernel_size_t n)
 {
-       return *(__force volatile u8 *)addr;
-}
+       volatile void __iomem *d = dst;
 
-static inline u16 __readw(const volatile void __iomem *addr)
-{
-       return flip_word(*(__force volatile u16 *)addr);
+       while (n--) {
+               writeb(c, d);
+               d++;
+       }
 }
 
-static inline u32 __readl(const volatile void __iomem *addr)
+static inline void _memcpy_fromio(void *dst, const volatile void __iomem *src,
+                                  __kernel_size_t n)
 {
-       return flip_dword(*(__force volatile u32 *)addr);
-}
+       char *d = dst;
 
-static inline void __writeb(u8 b, volatile void __iomem *addr)
-{
-       *(__force volatile u8 *)addr = b;
+       while (n--) {
+               char tmp = readb(src);
+               *d++ = tmp;
+               src++;
+       }
 }
 
-static inline void __writew(u16 w, volatile void __iomem *addr)
+static inline void _memcpy_toio(volatile void __iomem *dst, const void *src,
+                                __kernel_size_t n)
 {
-       *(__force volatile u16 *)addr = flip_word(w);
-}
+       const char *s = src;
+       volatile void __iomem *d = dst;
 
-static inline void __writel(u32 l, volatile void __iomem *addr)
-{
-       *(__force volatile u32 *)addr = flip_dword(l);
+       while (n--) {
+               char tmp = *s++;
+               writeb(tmp, d);
+               d++;
+       }
 }
 
-#define readb(__addr)          __readb(__addr)
-#define readw(__addr)          __readw(__addr)
-#define readl(__addr)          __readl(__addr)
-#define readb_relaxed(__addr)  readb(__addr)
-#define readw_relaxed(__addr)  readw(__addr)
-#define readl_relaxed(__addr)  readl(__addr)
-
-#define writeb(__b, __addr)    __writeb((__b),(__addr))
-#define writew(__w, __addr)    __writew((__w),(__addr))
-#define writel(__l, __addr)    __writel((__l),(__addr))
-
-/*
- * I/O space operations
- *
- * Arrangement on a Sun is somewhat complicated.
- *
- * First of all, we want to use standard Linux drivers
- * for keyboard, PC serial, etc. These drivers think
- * they access I/O space and use inb/outb.
- * On the other hand, EBus bridge accepts PCI *memory*
- * cycles and converts them into ISA *I/O* cycles.
- * Ergo, we want inb & outb to generate PCI memory cycles.
- *
- * If we want to issue PCI *I/O* cycles, we do this
- * with a low 64K fixed window in PCIC. This window gets
- * mapped somewhere into virtual kernel space and we
- * can use inb/outb again.
- */
-#define inb_local(__addr)      __readb((void __iomem *)(unsigned long)(__addr))
-#define inb(__addr)            __readb((void __iomem *)(unsigned long)(__addr))
-#define inw(__addr)            __readw((void __iomem *)(unsigned long)(__addr))
-#define inl(__addr)            __readl((void __iomem *)(unsigned long)(__addr))
-
-#define outb_local(__b, __addr)        __writeb(__b, (void __iomem *)(unsigned long)(__addr))
-#define outb(__b, __addr)      __writeb(__b, (void __iomem *)(unsigned long)(__addr))
-#define outw(__w, __addr)      __writew(__w, (void __iomem *)(unsigned long)(__addr))
-#define outl(__l, __addr)      __writel(__l, (void __iomem *)(unsigned long)(__addr))
-
-#define inb_p(__addr)          inb(__addr)
-#define outb_p(__b, __addr)    outb(__b, __addr)
-#define inw_p(__addr)          inw(__addr)
-#define outw_p(__w, __addr)    outw(__w, __addr)
-#define inl_p(__addr)          inl(__addr)
-#define outl_p(__l, __addr)    outl(__l, __addr)
-
-void outsb(unsigned long addr, const void *src, unsigned long cnt);
-void outsw(unsigned long addr, const void *src, unsigned long cnt);
-void outsl(unsigned long addr, const void *src, unsigned long cnt);
-void insb(unsigned long addr, void *dst, unsigned long count);
-void insw(unsigned long addr, void *dst, unsigned long count);
-void insl(unsigned long addr, void *dst, unsigned long count);
-
-#define IO_SPACE_LIMIT 0xffffffff
-
 /*
  * SBus accessors.
  *
  * SBus has only one, memory mapped, I/O space.
  * We do not need to flip bytes for SBus of course.
  */
-static inline u8 _sbus_readb(const volatile void __iomem *addr)
+static inline u8 sbus_readb(const volatile void __iomem *addr)
 {
        return *(__force volatile u8 *)addr;
 }
 
-static inline u16 _sbus_readw(const volatile void __iomem *addr)
+static inline u16 sbus_readw(const volatile void __iomem *addr)
 {
        return *(__force volatile u16 *)addr;
 }
 
-static inline u32 _sbus_readl(const volatile void __iomem *addr)
+static inline u32 sbus_readl(const volatile void __iomem *addr)
 {
        return *(__force volatile u32 *)addr;
 }
 
-static inline void _sbus_writeb(u8 b, volatile void __iomem *addr)
+static inline void sbus_writeb(u8 b, volatile void __iomem *addr)
 {
        *(__force volatile u8 *)addr = b;
 }
 
-static inline void _sbus_writew(u16 w, volatile void __iomem *addr)
+static inline void sbus_writew(u16 w, volatile void __iomem *addr)
 {
        *(__force volatile u16 *)addr = w;
 }
 
-static inline void _sbus_writel(u32 l, volatile void __iomem *addr)
+static inline void sbus_writel(u32 l, volatile void __iomem *addr)
 {
        *(__force volatile u32 *)addr = l;
 }
 
-/*
- * The only reason for #define's is to hide casts to unsigned long.
- */
-#define sbus_readb(__addr)             _sbus_readb(__addr)
-#define sbus_readw(__addr)             _sbus_readw(__addr)
-#define sbus_readl(__addr)             _sbus_readl(__addr)
-#define sbus_writeb(__b, __addr)       _sbus_writeb(__b, __addr)
-#define sbus_writew(__w, __addr)       _sbus_writew(__w, __addr)
-#define sbus_writel(__l, __addr)       _sbus_writel(__l, __addr)
-
-static inline void sbus_memset_io(volatile void __iomem *__dst, int c, __kernel_size_t n)
+static inline void sbus_memset_io(volatile void __iomem *__dst, int c,
+                                  __kernel_size_t n)
 {
        while(n--) {
                sbus_writeb(c, __dst);
@@ -194,22 +97,9 @@ static inline void sbus_memset_io(volatile void __iomem *__dst, int c, __kernel_
        }
 }
 
-static inline void
-_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
-{
-       volatile void __iomem *d = dst;
-
-       while (n--) {
-               writeb(c, d);
-               d++;
-       }
-}
-
-#define memset_io(d,c,sz)      _memset_io(d,c,sz)
-
-static inline void
-_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
-                   __kernel_size_t n)
+static inline void sbus_memcpy_fromio(void *dst,
+                                      const volatile void __iomem *src,
+                                      __kernel_size_t n)
 {
        char *d = dst;
 
@@ -220,25 +110,9 @@ _sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
        }
 }
 
-#define sbus_memcpy_fromio(d, s, sz)   _sbus_memcpy_fromio(d, s, sz)
-
-static inline void
-_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
-{
-       char *d = dst;
-
-       while (n--) {
-               char tmp = readb(src);
-               *d++ = tmp;
-               src++;
-       }
-}
-
-#define memcpy_fromio(d,s,sz)  _memcpy_fromio(d,s,sz)
-
-static inline void
-_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
-                 __kernel_size_t n)
+static inline void sbus_memcpy_toio(volatile void __iomem *dst,
+                                    const void *src,
+                                    __kernel_size_t n)
 {
        const char *s = src;
        volatile void __iomem *d = dst;
@@ -250,81 +124,26 @@ _sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
        }
 }
 
-#define sbus_memcpy_toio(d, s, sz)     _sbus_memcpy_toio(d, s, sz)
-
-static inline void
-_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
-{
-       const char *s = src;
-       volatile void __iomem *d = dst;
-
-       while (n--) {
-               char tmp = *s++;
-               writeb(tmp, d);
-               d++;
-       }
-}
-
-#define memcpy_toio(d,s,sz)    _memcpy_toio(d,s,sz)
-
 #ifdef __KERNEL__
 
 /*
  * Bus number may be embedded in the higher bits of the physical address.
  * This is why we have no bus number argument to ioremap().
  */
-extern void __iomem *ioremap(unsigned long offset, unsigned long size);
+void __iomem *ioremap(unsigned long offset, unsigned long size);
 #define ioremap_nocache(X,Y)   ioremap((X),(Y))
 #define ioremap_wc(X,Y)                ioremap((X),(Y))
-extern void iounmap(volatile void __iomem *addr);
-
-#define ioread8(X)                     readb(X)
-#define ioread16(X)                    readw(X)
-#define ioread16be(X)                  __raw_readw(X)
-#define ioread32(X)                    readl(X)
-#define ioread32be(X)                  __raw_readl(X)
-#define iowrite8(val,X)                        writeb(val,X)
-#define iowrite16(val,X)               writew(val,X)
-#define iowrite16be(val,X)             __raw_writew(val,X)
-#define iowrite32(val,X)               writel(val,X)
-#define iowrite32be(val,X)             __raw_writel(val,X)
-
-static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
-{
-       insb((unsigned long __force)port, buf, count);
-}
-static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
-{
-       insw((unsigned long __force)port, buf, count);
-}
-
-static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
-{
-       insl((unsigned long __force)port, buf, count);
-}
-
-static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
-{
-       outsb((unsigned long __force)port, buf, count);
-}
-
-static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
-{
-       outsw((unsigned long __force)port, buf, count);
-}
-
-static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
-{
-       outsl((unsigned long __force)port, buf, count);
-}
+void iounmap(volatile void __iomem *addr);
 
 /* Create a virtual mapping cookie for an IO port range */
-extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
-extern void ioport_unmap(void __iomem *);
+void __iomem *ioport_map(unsigned long port, unsigned int nr);
+void ioport_unmap(void __iomem *);
 
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+void pci_iounmap(struct pci_dev *dev, void __iomem *);
+
+
 
 /*
  * At the moment, we do not use CMOS_READ anywhere outside of rtc.c,
@@ -343,21 +162,11 @@ static inline int sbus_can_burst64(void)
        return 0; /* actually, sparc_cpu_model==sun4d */
 }
 struct device;
-extern void sbus_set_sbus64(struct device *, int);
+void sbus_set_sbus64(struct device *, int);
 
 #endif
 
 #define __ARCH_HAS_NO_PAGE_ZERO_MAPPED         1
 
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p)   __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p)  p
 
 #endif /* !(__SPARC_IO_H) */
index 09b0b88..05381c3 100644 (file)
@@ -15,7 +15,6 @@
 
 /* BIO layer definitions. */
 extern unsigned long kern_base, kern_size;
-#define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
 
 static inline u8 _inb(unsigned long addr)
 {
@@ -91,12 +90,12 @@ static inline void _outl(u32 l, unsigned long addr)
 #define inl_p(__addr)          inl(__addr)
 #define outl_p(__l, __addr)    outl(__l, __addr)
 
-extern void outsb(unsigned long, const void *, unsigned long);
-extern void outsw(unsigned long, const void *, unsigned long);
-extern void outsl(unsigned long, const void *, unsigned long);
-extern void insb(unsigned long, void *, unsigned long);
-extern void insw(unsigned long, void *, unsigned long);
-extern void insl(unsigned long, void *, unsigned long);
+void outsb(unsigned long, const void *, unsigned long);
+void outsw(unsigned long, const void *, unsigned long);
+void outsl(unsigned long, const void *, unsigned long);
+void insb(unsigned long, void *, unsigned long);
+void insw(unsigned long, void *, unsigned long);
+void insl(unsigned long, void *, unsigned long);
 
 static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
 {
@@ -509,12 +508,12 @@ static inline void iounmap(volatile void __iomem *addr)
 #define iowrite32be(val,X)             __raw_writel(val,X)
 
 /* Create a virtual mapping cookie for an IO port range */
-extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
-extern void ioport_unmap(void __iomem *);
+void __iomem *ioport_map(unsigned long port, unsigned int nr);
+void ioport_unmap(void __iomem *);
 
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+void pci_iounmap(struct pci_dev *dev, void __iomem *);
 
 static inline int sbus_can_dma_64bit(void)
 {
@@ -525,7 +524,7 @@ static inline int sbus_can_burst64(void)
        return 1;
 }
 struct device;
-extern void sbus_set_sbus64(struct device *, int);
+void sbus_set_sbus64(struct device *, int);
 
 /*
  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
index 70c589c..f6c066b 100644 (file)
@@ -99,7 +99,7 @@ struct iommu_regs {
 #define IOPTE_WAZ           0x00000001 /* Write as zeros */
 
 struct iommu_struct {
-       struct iommu_regs *regs;
+       struct iommu_regs __iomem *regs;
        iopte_t *page_table;
        /* For convenience */
        unsigned long start; /* First managed virtual address */
@@ -108,14 +108,14 @@ struct iommu_struct {
        struct bit_map usemap;
 };
 
-static inline void iommu_invalidate(struct iommu_regs *regs)
+static inline void iommu_invalidate(struct iommu_regs __iomem *regs)
 {
-       regs->tlbflush = 0;
+       sbus_writel(0, &regs->tlbflush);
 }
 
-static inline void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba)
+static inline void iommu_invalidate_page(struct iommu_regs __iomem *regs, unsigned long ba)
 {
-       regs->pageflush = (ba & PAGE_MASK);
+       sbus_writel(ba & PAGE_MASK, &regs->pageflush);
 }
 
 #endif /* !(_SPARC_IOMMU_H) */
index caf798b..2b9321a 100644 (file)
@@ -58,8 +58,8 @@ struct strbuf {
        volatile unsigned long  __flushflag_buf[(64+(64-1)) / sizeof(long)];
 };
 
-extern int iommu_table_init(struct iommu *iommu, int tsbsize,
-                           u32 dma_offset, u32 dma_addr_mask,
-                           int numa_node);
+int iommu_table_init(struct iommu *iommu, int tsbsize,
+                    u32 dma_offset, u32 dma_addr_mask,
+                    int numa_node);
 
 #endif /* !(_SPARC64_IOMMU_H) */
index 2ae3aca..eecd3d8 100644 (file)
@@ -16,7 +16,8 @@
 
 #define irq_canonicalize(irq)  (irq)
 
-extern void __init init_IRQ(void);
+void __init init_IRQ(void);
+void __init sun4d_init_sbi_irq(void);
 
 #define NO_IRQ         0xffffffff
 
index abf6afe..91d2193 100644 (file)
  */
 #define NR_IRQS    255
 
-extern void irq_install_pre_handler(int irq,
-                                   void (*func)(unsigned int, void *, void *),
-                                   void *arg1, void *arg2);
+void irq_install_pre_handler(int irq,
+                            void (*func)(unsigned int, void *, void *),
+                            void *arg1, void *arg2);
 #define irq_canonicalize(irq)  (irq)
-extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
-extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
-extern unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
-extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *irq_p,
-                                   unsigned int msi_devino_start,
-                                   unsigned int msi_devino_end);
-extern void sun4v_destroy_msi(unsigned int irq);
-extern unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
-                                   unsigned int msi_devino_start,
-                                   unsigned int msi_devino_end,
-                                   unsigned long imap_base,
-                                   unsigned long iclr_base);
-extern void sun4u_destroy_msi(unsigned int irq);
-
-extern unsigned char irq_alloc(unsigned int dev_handle,
-                                   unsigned int dev_ino);
+unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
+unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
+unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
+unsigned int sun4v_build_msi(u32 devhandle, unsigned int *irq_p,
+                            unsigned int msi_devino_start,
+                            unsigned int msi_devino_end);
+void sun4v_destroy_msi(unsigned int irq);
+unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
+                            unsigned int msi_devino_start,
+                            unsigned int msi_devino_end,
+                            unsigned long imap_base,
+                            unsigned long iclr_base);
+void sun4u_destroy_msi(unsigned int irq);
+
+unsigned char irq_alloc(unsigned int dev_handle,
+                       unsigned int dev_ino);
 #ifdef CONFIG_PCI_MSI
-extern void irq_free(unsigned int irq);
+void irq_free(unsigned int irq);
 #endif
 
-extern void __init init_IRQ(void);
-extern void fixup_irqs(void);
+void __init init_IRQ(void);
+void fixup_irqs(void);
 
 static inline void set_softint(unsigned long bits)
 {
@@ -89,7 +89,7 @@ static inline unsigned long get_softint(void)
        return retval;
 }
 
-void arch_trigger_all_cpu_backtrace(void);
+void arch_trigger_all_cpu_backtrace(bool);
 #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 
 extern void *hardirq_stack[NR_CPUS];
index e414c06..71cc284 100644 (file)
@@ -15,9 +15,9 @@
 #include <linux/types.h>
 #include <asm/psr.h>
 
-extern void arch_local_irq_restore(unsigned long);
-extern unsigned long arch_local_irq_save(void);
-extern void arch_local_irq_enable(void);
+void arch_local_irq_restore(unsigned long);
+unsigned long arch_local_irq_save(void);
+void arch_local_irq_enable(void);
 
 static inline notrace unsigned long arch_local_save_flags(void)
 {
index feb3578..04465de 100644 (file)
@@ -3,7 +3,7 @@
 
 struct pt_regs;
 
-extern void bad_trap(struct pt_regs *, long);
+void bad_trap(struct pt_regs *, long);
 
 /* Grossly misnamed. */
 enum die_val {
index b6ef301..47366af 100644 (file)
@@ -28,9 +28,12 @@ enum regnames {
 #define NUMREGBYTES            ((GDB_CSR + 1) * 4)
 #else
 #define NUMREGBYTES            ((GDB_Y + 1) * 8)
+
+struct pt_regs;
+asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs);
 #endif
 
-extern void arch_kgdb_breakpoint(void);
+void arch_kgdb_breakpoint(void);
 
 #define BREAK_INSTR_SIZE       4
 #define CACHE_FLUSH_IS_SAFE    1
index 5879d71..a145d79 100644 (file)
@@ -43,7 +43,9 @@ struct kprobe_ctlblk {
        struct prev_kprobe prev_kprobe;
 };
 
-extern int kprobe_exceptions_notify(struct notifier_block *self,
-                                   unsigned long val, void *data);
-extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+int kprobe_exceptions_notify(struct notifier_block *self,
+                            unsigned long val, void *data);
+int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
+                                     struct pt_regs *regs);
 #endif /* _SPARC64_KPROBES_H */
index bdb524a..c8c67f6 100644 (file)
@@ -4,9 +4,9 @@
 #include <asm/hypervisor.h>
 
 extern int ldom_domaining_enabled;
-extern void ldom_set_var(const char *var, const char *value);
-extern void ldom_reboot(const char *boot_command);
-extern void ldom_power_off(void);
+void ldom_set_var(const char *var, const char *value);
+void ldom_reboot(const char *boot_command);
+void ldom_power_off(void);
 
 /* The event handler will be evoked when link state changes
  * or data becomes available on the receive side.
@@ -51,30 +51,30 @@ struct ldc_channel_config {
 struct ldc_channel;
 
 /* Allocate state for a channel.  */
-extern struct ldc_channel *ldc_alloc(unsigned long id,
-                                    const struct ldc_channel_config *cfgp,
-                                    void *event_arg);
+struct ldc_channel *ldc_alloc(unsigned long id,
+                             const struct ldc_channel_config *cfgp,
+                             void *event_arg);
 
 /* Shut down and free state for a channel.  */
-extern void ldc_free(struct ldc_channel *lp);
+void ldc_free(struct ldc_channel *lp);
 
 /* Register TX and RX queues of the link with the hypervisor.  */
-extern int ldc_bind(struct ldc_channel *lp, const char *name);
+int ldc_bind(struct ldc_channel *lp, const char *name);
 
 /* For non-RAW protocols we need to complete a handshake before
  * communication can proceed.  ldc_connect() does that, if the
  * handshake completes successfully, an LDC_EVENT_UP event will
  * be sent up to the driver.
  */
-extern int ldc_connect(struct ldc_channel *lp);
-extern int ldc_disconnect(struct ldc_channel *lp);
+int ldc_connect(struct ldc_channel *lp);
+int ldc_disconnect(struct ldc_channel *lp);
 
-extern int ldc_state(struct ldc_channel *lp);
+int ldc_state(struct ldc_channel *lp);
 
 /* Read and write operations.  Only valid when the link is up.  */
-extern int ldc_write(struct ldc_channel *lp, const void *buf,
-                    unsigned int size);
-extern int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size);
+int ldc_write(struct ldc_channel *lp, const void *buf,
+             unsigned int size);
+int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size);
 
 #define LDC_MAP_SHADOW 0x01
 #define LDC_MAP_DIRECT 0x02
@@ -92,22 +92,22 @@ struct ldc_trans_cookie {
 };
 
 struct scatterlist;
-extern int ldc_map_sg(struct ldc_channel *lp,
-                     struct scatterlist *sg, int num_sg,
-                     struct ldc_trans_cookie *cookies, int ncookies,
-                     unsigned int map_perm);
+int ldc_map_sg(struct ldc_channel *lp,
+              struct scatterlist *sg, int num_sg,
+              struct ldc_trans_cookie *cookies, int ncookies,
+              unsigned int map_perm);
 
-extern int ldc_map_single(struct ldc_channel *lp,
-                         void *buf, unsigned int len,
-                         struct ldc_trans_cookie *cookies, int ncookies,
-                         unsigned int map_perm);
+int ldc_map_single(struct ldc_channel *lp,
+                  void *buf, unsigned int len,
+                  struct ldc_trans_cookie *cookies, int ncookies,
+                  unsigned int map_perm);
 
-extern void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
-                     int ncookies);
+void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
+              int ncookies);
 
-extern int ldc_copy(struct ldc_channel *lp, int copy_dir,
-                   void *buf, unsigned int len, unsigned long offset,
-                   struct ldc_trans_cookie *cookies, int ncookies);
+int ldc_copy(struct ldc_channel *lp, int copy_dir,
+            void *buf, unsigned int len, unsigned long offset,
+            struct ldc_trans_cookie *cookies, int ncookies);
 
 static inline int ldc_get_dring_entry(struct ldc_channel *lp,
                                      void *buf, unsigned int len,
@@ -127,12 +127,12 @@ static inline int ldc_put_dring_entry(struct ldc_channel *lp,
        return ldc_copy(lp, LDC_COPY_OUT, buf, len, offset, cookies, ncookies);
 }
 
-extern void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
-                                struct ldc_trans_cookie *cookies,
-                                int *ncookies, unsigned int map_perm);
+void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
+                         struct ldc_trans_cookie *cookies,
+                         int *ncookies, unsigned int map_perm);
 
-extern void ldc_free_exp_dring(struct ldc_channel *lp, void *buf,
-                              unsigned int len,
-                              struct ldc_trans_cookie *cookies, int ncookies);
+void ldc_free_exp_dring(struct ldc_channel *lp, void *buf,
+                       unsigned int len,
+                       struct ldc_trans_cookie *cookies, int ncookies);
 
 #endif /* _SPARC64_LDC_H */
index c2f6ff6..204771c 100644 (file)
@@ -82,8 +82,8 @@ static inline unsigned long leon_load_reg(unsigned long paddr)
 #define LEON_BYPASS_LOAD_PA(x)      leon_load_reg((unsigned long)(x))
 #define LEON_BYPASS_STORE_PA(x, v)  leon_store_reg((unsigned long)(x), (unsigned long)(v))
 
-extern void leon_switch_mm(void);
-extern void leon_init_IRQ(void);
+void leon_switch_mm(void);
+void leon_init_IRQ(void);
 
 static inline unsigned long sparc_leon3_get_dcachecfg(void)
 {
@@ -196,14 +196,14 @@ static inline int sparc_leon3_cpuid(void)
 #ifndef __ASSEMBLY__
 struct vm_area_struct;
 
-extern unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr);
-extern void leon_flush_icache_all(void);
-extern void leon_flush_dcache_all(void);
-extern void leon_flush_cache_all(void);
-extern void leon_flush_tlb_all(void);
+unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr);
+void leon_flush_icache_all(void);
+void leon_flush_dcache_all(void);
+void leon_flush_cache_all(void);
+void leon_flush_tlb_all(void);
 extern int leon_flush_during_switch;
-extern int leon_flush_needed(void);
-extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
+int leon_flush_needed(void);
+void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
 
 /* struct that hold LEON3 cache configuration registers */
 struct leon3_cacheregs {
@@ -217,29 +217,29 @@ struct leon3_cacheregs {
 
 struct device_node;
 struct task_struct;
-extern unsigned int leon_build_device_irq(unsigned int real_irq,
-                                          irq_flow_handler_t flow_handler,
-                                          const char *name, int do_ack);
-extern void leon_update_virq_handling(unsigned int virq,
-                             irq_flow_handler_t flow_handler,
-                             const char *name, int do_ack);
-extern void leon_init_timers(void);
-extern void leon_trans_init(struct device_node *dp);
-extern void leon_node_init(struct device_node *dp, struct device_node ***nextp);
-extern void init_leon(void);
-extern void poke_leonsparc(void);
-extern void leon3_getCacheRegs(struct leon3_cacheregs *regs);
+unsigned int leon_build_device_irq(unsigned int real_irq,
+                                  irq_flow_handler_t flow_handler,
+                                  const char *name, int do_ack);
+void leon_update_virq_handling(unsigned int virq,
+                              irq_flow_handler_t flow_handler,
+                              const char *name, int do_ack);
+void leon_init_timers(void);
+void leon_trans_init(struct device_node *dp);
+void leon_node_init(struct device_node *dp, struct device_node ***nextp);
+void init_leon(void);
+void poke_leonsparc(void);
+void leon3_getCacheRegs(struct leon3_cacheregs *regs);
 extern int leon3_ticker_irq;
 
 #ifdef CONFIG_SMP
-extern int leon_smp_nrcpus(void);
-extern void leon_clear_profile_irq(int cpu);
-extern void leon_smp_done(void);
-extern void leon_boot_cpus(void);
-extern int leon_boot_one_cpu(int i, struct task_struct *);
+int leon_smp_nrcpus(void);
+void leon_clear_profile_irq(int cpu);
+void leon_smp_done(void);
+void leon_boot_cpus(void);
+int leon_boot_one_cpu(int i, struct task_struct *);
 void leon_init_smp(void);
 void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
-extern irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused);
+irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused);
 
 extern unsigned int smpleon_ipi[];
 extern unsigned int linux_trap_ipi15_leon[];
index bfd3ab3..049d067 100644 (file)
@@ -16,7 +16,7 @@ struct leon_pci_info {
        int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
 };
 
-extern void leon_pci_init(struct platform_device *ofdev,
-                               struct leon_pci_info *info);
+void leon_pci_init(struct platform_device *ofdev,
+                  struct leon_pci_info *info);
 
 #endif /* _ASM_LEON_PCI_H_ */
index 67ed9e3..d8e72f3 100644 (file)
@@ -1,5 +1,10 @@
 #ifndef ___ASM_SPARC_MC146818RTC_H
 #define ___ASM_SPARC_MC146818RTC_H
+
+#include <linux/spinlock.h>
+
+extern spinlock_t rtc_lock;
+
 #if defined(__sparc__) && defined(__arch64__)
 #include <asm/mc146818rtc_64.h>
 #else
index 139097f..aebeb88 100644 (file)
@@ -12,13 +12,13 @@ struct mdesc_handle;
  * the first argument to all of the operational calls that work
  * on mdescs.
  */
-extern struct mdesc_handle *mdesc_grab(void);
-extern void mdesc_release(struct mdesc_handle *);
+struct mdesc_handle *mdesc_grab(void);
+void mdesc_release(struct mdesc_handle *);
 
 #define MDESC_NODE_NULL                (~(u64)0)
 
-extern u64 mdesc_node_by_name(struct mdesc_handle *handle,
-                             u64 from_node, const char *name);
+u64 mdesc_node_by_name(struct mdesc_handle *handle,
+                      u64 from_node, const char *name);
 #define mdesc_for_each_node_by_name(__hdl, __node, __name) \
        for (__node = mdesc_node_by_name(__hdl, MDESC_NODE_NULL, __name); \
             (__node) != MDESC_NODE_NULL; \
@@ -34,9 +34,9 @@ extern u64 mdesc_node_by_name(struct mdesc_handle *handle,
  *
  * These same rules apply to mdesc_node_name().
  */
-extern const void *mdesc_get_property(struct mdesc_handle *handle,
-                                     u64 node, const char *name, int *lenp);
-extern const char *mdesc_node_name(struct mdesc_handle *hp, u64 node);
+const void *mdesc_get_property(struct mdesc_handle *handle,
+                              u64 node, const char *name, int *lenp);
+const char *mdesc_node_name(struct mdesc_handle *hp, u64 node);
 
 /* MD arc iteration, the standard sequence is:
  *
@@ -50,16 +50,16 @@ extern const char *mdesc_node_name(struct mdesc_handle *hp, u64 node);
 #define MDESC_ARC_TYPE_FWD     "fwd"
 #define MDESC_ARC_TYPE_BACK    "back"
 
-extern u64 mdesc_next_arc(struct mdesc_handle *handle, u64 from,
-                         const char *arc_type);
+u64 mdesc_next_arc(struct mdesc_handle *handle, u64 from,
+                  const char *arc_type);
 #define mdesc_for_each_arc(__arc, __hdl, __node, __type) \
        for (__arc = mdesc_next_arc(__hdl, __node, __type); \
             (__arc) != MDESC_NODE_NULL; \
             __arc = mdesc_next_arc(__hdl, __arc, __type))
 
-extern u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc);
+u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc);
 
-extern void mdesc_update(void);
+void mdesc_update(void);
 
 struct mdesc_notifier_client {
        void (*add)(struct mdesc_handle *handle, u64 node);
@@ -69,12 +69,12 @@ struct mdesc_notifier_client {
        struct mdesc_notifier_client    *next;
 };
 
-extern void mdesc_register_notifier(struct mdesc_notifier_client *client);
+void mdesc_register_notifier(struct mdesc_notifier_client *client);
 
-extern void mdesc_fill_in_cpu_data(cpumask_t *mask);
-extern void mdesc_populate_present_mask(cpumask_t *mask);
-extern void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask);
+void mdesc_fill_in_cpu_data(cpumask_t *mask);
+void mdesc_populate_present_mask(cpumask_t *mask);
+void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask);
 
-extern void sun4v_mdesc_init(void);
+void sun4v_mdesc_init(void);
 
 #endif
index f668797..70067ce 100644 (file)
@@ -67,9 +67,9 @@ struct tsb {
        unsigned long pte;
 } __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
 
-extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
-extern void tsb_flush(unsigned long ent, unsigned long tag);
-extern void tsb_init(struct tsb *tsb, unsigned long size);
+void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
+void tsb_flush(unsigned long ent, unsigned long tag);
+void tsb_init(struct tsb *tsb, unsigned long size);
 
 struct tsb_config {
        struct tsb              *tsb;
index 3d528f0..b84be67 100644 (file)
@@ -17,20 +17,20 @@ extern spinlock_t ctx_alloc_lock;
 extern unsigned long tlb_context_cache;
 extern unsigned long mmu_context_bmap[];
 
-extern void get_new_mmu_context(struct mm_struct *mm);
+void get_new_mmu_context(struct mm_struct *mm);
 #ifdef CONFIG_SMP
-extern void smp_new_mmu_context_version(void);
+void smp_new_mmu_context_version(void);
 #else
 #define smp_new_mmu_context_version() do { } while (0)
 #endif
 
-extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-extern void destroy_context(struct mm_struct *mm);
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+void destroy_context(struct mm_struct *mm);
 
-extern void __tsb_context_switch(unsigned long pgd_pa,
-                                struct tsb_config *tsb_base,
-                                struct tsb_config *tsb_huge,
-                                unsigned long tsb_descr_pa);
+void __tsb_context_switch(unsigned long pgd_pa,
+                         struct tsb_config *tsb_base,
+                         struct tsb_config *tsb_huge,
+                         unsigned long tsb_descr_pa);
 
 static inline void tsb_context_switch(struct mm_struct *mm)
 {
@@ -46,9 +46,11 @@ static inline void tsb_context_switch(struct mm_struct *mm)
                             , __pa(&mm->context.tsb_descr[0]));
 }
 
-extern void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss);
+void tsb_grow(struct mm_struct *mm,
+             unsigned long tsb_index,
+             unsigned long mm_rss);
 #ifdef CONFIG_SMP
-extern void smp_tsb_sync(struct mm_struct *mm);
+void smp_tsb_sync(struct mm_struct *mm);
 #else
 #define smp_tsb_sync(__mm) do { } while (0)
 #endif
@@ -66,7 +68,7 @@ extern void smp_tsb_sync(struct mm_struct *mm);
        : "r" (CTX_HWBITS((__mm)->context)), \
          "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
 
-extern void __flush_tlb_mm(unsigned long, unsigned long);
+void __flush_tlb_mm(unsigned long, unsigned long);
 
 /* Switch the current MM context. */
 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
index 72e6500..26ad2b2 100644 (file)
@@ -1,13 +1,13 @@
 #ifndef __NMI_H
 #define __NMI_H
 
-extern int __init nmi_init(void);
-extern void perfctr_irq(int irq, struct pt_regs *regs);
-extern void nmi_adjust_hz(unsigned int new_hz);
+int __init nmi_init(void);
+void perfctr_irq(int irq, struct pt_regs *regs);
+void nmi_adjust_hz(unsigned int new_hz);
 
 extern atomic_t nmi_active;
 
-extern void start_nmi_watchdog(void *unused);
-extern void stop_nmi_watchdog(void *unused);
+void start_nmi_watchdog(void *unused);
+void stop_nmi_watchdog(void *unused);
 
 #endif /* __NMI_H */
index c72f304..56a09b9 100644 (file)
@@ -43,28 +43,28 @@ extern struct linux_nodeops *prom_nodeops;
 /* You must call prom_init() before using any of the library services,
  * preferably as early as possible.  Pass it the romvec pointer.
  */
-extern void prom_init(struct linux_romvec *rom_ptr);
+void prom_init(struct linux_romvec *rom_ptr);
 
 /* Boot argument acquisition, returns the boot command line string. */
-extern char *prom_getbootargs(void);
+char *prom_getbootargs(void);
 
 /* Miscellaneous routines, don't really fit in any category per se. */
 
 /* Reboot the machine with the command line passed. */
-extern void prom_reboot(char *boot_command);
+void prom_reboot(char *boot_command);
 
 /* Evaluate the forth string passed. */
-extern void prom_feval(char *forth_string);
+void prom_feval(char *forth_string);
 
 /* Enter the prom, with possibility of continuation with the 'go'
  * command in newer proms.
  */
-extern void prom_cmdline(void);
+void prom_cmdline(void);
 
 /* Enter the prom, with no chance of continuation for the stand-alone
  * which calls this.
  */
-extern void __noreturn prom_halt(void);
+void __noreturn prom_halt(void);
 
 /* Set the PROM 'sync' callback function to the passed function pointer.
  * When the user gives the 'sync' command at the prom prompt while the
@@ -73,37 +73,37 @@ extern void __noreturn prom_halt(void);
  * XXX The arguments are different on V0 vs. V2->higher proms, grrr! XXX
  */
 typedef void (*sync_func_t)(void);
-extern void prom_setsync(sync_func_t func_ptr);
+void prom_setsync(sync_func_t func_ptr);
 
 /* Acquire the IDPROM of the root node in the prom device tree.  This
  * gets passed a buffer where you would like it stuffed.  The return value
  * is the format type of this idprom or 0xff on error.
  */
-extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
+unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
 
 /* Get the prom major version. */
-extern int prom_version(void);
+int prom_version(void);
 
 /* Get the prom plugin revision. */
-extern int prom_getrev(void);
+int prom_getrev(void);
 
 /* Get the prom firmware revision. */
-extern int prom_getprev(void);
+int prom_getprev(void);
 
 /* Write a buffer of characters to the console. */
-extern void prom_console_write_buf(const char *buf, int len);
+void prom_console_write_buf(const char *buf, int len);
 
 /* Prom's internal routines, don't use in kernel/boot code. */
-extern __printf(1, 2) void prom_printf(const char *fmt, ...);
-extern void prom_write(const char *buf, unsigned int len);
+__printf(1, 2) void prom_printf(const char *fmt, ...);
+void prom_write(const char *buf, unsigned int len);
 
 /* Multiprocessor operations... */
 
 /* Start the CPU with the given device tree node, context table, and context
  * at the passed program counter.
  */
-extern int prom_startcpu(int cpunode, struct linux_prom_registers *context_table,
-                        int context, char *program_counter);
+int prom_startcpu(int cpunode, struct linux_prom_registers *context_table,
+                 int context, char *program_counter);
 
 /* Initialize the memory lists based upon the prom version. */
 void prom_meminit(void);
@@ -111,65 +111,65 @@ void prom_meminit(void);
 /* PROM device tree traversal functions... */
 
 /* Get the child node of the given node, or zero if no child exists. */
-extern phandle prom_getchild(phandle parent_node);
+phandle prom_getchild(phandle parent_node);
 
 /* Get the next sibling node of the given node, or zero if no further
  * siblings exist.
  */
-extern phandle prom_getsibling(phandle node);
+phandle prom_getsibling(phandle node);
 
 /* Get the length, at the passed node, of the given property type.
  * Returns -1 on error (ie. no such property at this node).
  */
-extern int prom_getproplen(phandle thisnode, const char *property);
+int prom_getproplen(phandle thisnode, const char *property);
 
 /* Fetch the requested property using the given buffer.  Returns
  * the number of bytes the prom put into your buffer or -1 on error.
  */
-extern int __must_check prom_getproperty(phandle thisnode, const char *property,
-                                        char *prop_buffer, int propbuf_size);
+int __must_check prom_getproperty(phandle thisnode, const char *property,
+                                 char *prop_buffer, int propbuf_size);
 
 /* Acquire an integer property. */
-extern int prom_getint(phandle node, char *property);
+int prom_getint(phandle node, char *property);
 
 /* Acquire an integer property, with a default value. */
-extern int prom_getintdefault(phandle node, char *property, int defval);
+int prom_getintdefault(phandle node, char *property, int defval);
 
 /* Acquire a boolean property, 0=FALSE 1=TRUE. */
-extern int prom_getbool(phandle node, char *prop);
+int prom_getbool(phandle node, char *prop);
 
 /* Acquire a string property, null string on error. */
-extern void prom_getstring(phandle node, char *prop, char *buf, int bufsize);
+void prom_getstring(phandle node, char *prop, char *buf, int bufsize);
 
 /* Search all siblings starting at the passed node for "name" matching
  * the given string.  Returns the node on success, zero on failure.
  */
-extern phandle prom_searchsiblings(phandle node_start, char *name);
+phandle prom_searchsiblings(phandle node_start, char *name);
 
 /* Returns the next property after the passed property for the given
  * node.  Returns null string on failure.
  */
-extern char *prom_nextprop(phandle node, char *prev_property, char *buffer);
+char *prom_nextprop(phandle node, char *prev_property, char *buffer);
 
 /* Returns phandle of the path specified */
-extern phandle prom_finddevice(char *name);
+phandle prom_finddevice(char *name);
 
 /* Set the indicated property at the given node with the passed value.
  * Returns the number of bytes of your value that the prom took.
  */
-extern int prom_setprop(phandle node, const char *prop_name, char *prop_value,
-                       int value_size);
+int prom_setprop(phandle node, const char *prop_name, char *prop_value,
+                int value_size);
 
-extern phandle prom_inst2pkg(int);
+phandle prom_inst2pkg(int);
 
 /* Dorking with Bus ranges... */
 
 /* Apply promlib probes OBIO ranges to registers. */
-extern void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nregs);
+void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nregs);
 
 /* Apply ranges of any prom node (and optionally parent node as well) to registers. */
-extern void prom_apply_generic_ranges(phandle node, phandle parent,
-                                     struct linux_prom_registers *sbusregs, int nregs);
+void prom_apply_generic_ranges(phandle node, phandle parent,
+                              struct linux_prom_registers *sbusregs, int nregs);
 
 void prom_ranges_init(void);
 
index a12dbe3..f346824 100644 (file)
@@ -62,100 +62,100 @@ struct linux_mem_p1275 {
 /* You must call prom_init() before using any of the library services,
  * preferably as early as possible.  Pass it the romvec pointer.
  */
-extern void prom_init(void *cif_handler, void *cif_stack);
+void prom_init(void *cif_handler, void *cif_stack);
 
 /* Boot argument acquisition, returns the boot command line string. */
-extern char *prom_getbootargs(void);
+char *prom_getbootargs(void);
 
 /* Miscellaneous routines, don't really fit in any category per se. */
 
 /* Reboot the machine with the command line passed. */
-extern void prom_reboot(const char *boot_command);
+void prom_reboot(const char *boot_command);
 
 /* Evaluate the forth string passed. */
-extern void prom_feval(const char *forth_string);
+void prom_feval(const char *forth_string);
 
 /* Enter the prom, with possibility of continuation with the 'go'
  * command in newer proms.
  */
-extern void prom_cmdline(void);
+void prom_cmdline(void);
 
 /* Enter the prom, with no chance of continuation for the stand-alone
  * which calls this.
  */
-extern void prom_halt(void) __attribute__ ((noreturn));
+void prom_halt(void) __attribute__ ((noreturn));
 
 /* Halt and power-off the machine. */
-extern void prom_halt_power_off(void) __attribute__ ((noreturn));
+void prom_halt_power_off(void) __attribute__ ((noreturn));
 
 /* Acquire the IDPROM of the root node in the prom device tree.  This
  * gets passed a buffer where you would like it stuffed.  The return value
  * is the format type of this idprom or 0xff on error.
  */
-extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
+unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
 
 /* Write a buffer of characters to the console. */
-extern void prom_console_write_buf(const char *buf, int len);
+void prom_console_write_buf(const char *buf, int len);
 
 /* Prom's internal routines, don't use in kernel/boot code. */
-extern __printf(1, 2) void prom_printf(const char *fmt, ...);
-extern void prom_write(const char *buf, unsigned int len);
+__printf(1, 2) void prom_printf(const char *fmt, ...);
+void prom_write(const char *buf, unsigned int len);
 
 /* Multiprocessor operations... */
 #ifdef CONFIG_SMP
 /* Start the CPU with the given device tree node at the passed program
  * counter with the given arg passed in via register %o0.
  */
-extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg);
+void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg);
 
 /* Start the CPU with the given cpu ID at the passed program
  * counter with the given arg passed in via register %o0.
  */
-extern void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg);
+void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg);
 
 /* Stop the CPU with the given cpu ID.  */
-extern void prom_stopcpu_cpuid(int cpuid);
+void prom_stopcpu_cpuid(int cpuid);
 
 /* Stop the current CPU. */
-extern void prom_stopself(void);
+void prom_stopself(void);
 
 /* Idle the current CPU. */
-extern void prom_idleself(void);
+void prom_idleself(void);
 
 /* Resume the CPU with the passed device tree node. */
-extern void prom_resumecpu(int cpunode);
+void prom_resumecpu(int cpunode);
 #endif
 
 /* Power management interfaces. */
 
 /* Put the current CPU to sleep. */
-extern void prom_sleepself(void);
+void prom_sleepself(void);
 
 /* Put the entire system to sleep. */
-extern int prom_sleepsystem(void);
+int prom_sleepsystem(void);
 
 /* Initiate a wakeup event. */
-extern int prom_wakeupsystem(void);
+int prom_wakeupsystem(void);
 
 /* MMU and memory related OBP interfaces. */
 
 /* Get unique string identifying SIMM at given physical address. */
-extern int prom_getunumber(int syndrome_code,
-                          unsigned long phys_addr,
-                          char *buf, int buflen);
+int prom_getunumber(int syndrome_code,
+                   unsigned long phys_addr,
+                   char *buf, int buflen);
 
 /* Retain physical memory to the caller across soft resets. */
-extern int prom_retain(const char *name, unsigned long size,
-                      unsigned long align, unsigned long *paddr);
+int prom_retain(const char *name, unsigned long size,
+               unsigned long align, unsigned long *paddr);
 
 /* Load explicit I/D TLB entries into the calling processor. */
-extern long prom_itlb_load(unsigned long index,
-                          unsigned long tte_data,
-                          unsigned long vaddr);
+long prom_itlb_load(unsigned long index,
+                   unsigned long tte_data,
+                   unsigned long vaddr);
 
-extern long prom_dtlb_load(unsigned long index,
-                          unsigned long tte_data,
-                          unsigned long vaddr);
+long prom_dtlb_load(unsigned long index,
+                   unsigned long tte_data,
+                   unsigned long vaddr);
 
 /* Map/Unmap client program address ranges.  First the format of
  * the mapping mode argument.
@@ -170,81 +170,81 @@ extern long prom_dtlb_load(unsigned long index,
 #define PROM_MAP_IE    0x0100 /* Invert-Endianness */
 #define PROM_MAP_DEFAULT (PROM_MAP_WRITE | PROM_MAP_READ | PROM_MAP_EXEC | PROM_MAP_CACHED)
 
-extern int prom_map(int mode, unsigned long size,
-                   unsigned long vaddr, unsigned long paddr);
-extern void prom_unmap(unsigned long size, unsigned long vaddr);
+int prom_map(int mode, unsigned long size,
+            unsigned long vaddr, unsigned long paddr);
+void prom_unmap(unsigned long size, unsigned long vaddr);
 
 
 /* PROM device tree traversal functions... */
 
 /* Get the child node of the given node, or zero if no child exists. */
-extern phandle prom_getchild(phandle parent_node);
+phandle prom_getchild(phandle parent_node);
 
 /* Get the next sibling node of the given node, or zero if no further
  * siblings exist.
  */
-extern phandle prom_getsibling(phandle node);
+phandle prom_getsibling(phandle node);
 
 /* Get the length, at the passed node, of the given property type.
  * Returns -1 on error (ie. no such property at this node).
  */
-extern int prom_getproplen(phandle thisnode, const char *property);
+int prom_getproplen(phandle thisnode, const char *property);
 
 /* Fetch the requested property using the given buffer.  Returns
  * the number of bytes the prom put into your buffer or -1 on error.
  */
-extern int prom_getproperty(phandle thisnode, const char *property,
-                           char *prop_buffer, int propbuf_size);
+int prom_getproperty(phandle thisnode, const char *property,
+                    char *prop_buffer, int propbuf_size);
 
 /* Acquire an integer property. */
-extern int prom_getint(phandle node, const char *property);
+int prom_getint(phandle node, const char *property);
 
 /* Acquire an integer property, with a default value. */
-extern int prom_getintdefault(phandle node, const char *property, int defval);
+int prom_getintdefault(phandle node, const char *property, int defval);
 
 /* Acquire a boolean property, 0=FALSE 1=TRUE. */
-extern int prom_getbool(phandle node, const char *prop);
+int prom_getbool(phandle node, const char *prop);
 
 /* Acquire a string property, null string on error. */
-extern void prom_getstring(phandle node, const char *prop, char *buf,
-                          int bufsize);
+void prom_getstring(phandle node, const char *prop, char *buf,
+                   int bufsize);
 
 /* Does the passed node have the given "name"? YES=1 NO=0 */
-extern int prom_nodematch(phandle thisnode, const char *name);
+int prom_nodematch(phandle thisnode, const char *name);
 
 /* Search all siblings starting at the passed node for "name" matching
  * the given string.  Returns the node on success, zero on failure.
  */
-extern phandle prom_searchsiblings(phandle node_start, const char *name);
+phandle prom_searchsiblings(phandle node_start, const char *name);
 
 /* Return the first property type, as a string, for the given node.
  * Returns a null string on error. Buffer should be at least 32B long.
  */
-extern char *prom_firstprop(phandle node, char *buffer);
+char *prom_firstprop(phandle node, char *buffer);
 
 /* Returns the next property after the passed property for the given
  * node.  Returns null string on failure. Buffer should be at least 32B long.
  */
-extern char *prom_nextprop(phandle node, const char *prev_property, char *buf);
+char *prom_nextprop(phandle node, const char *prev_property, char *buf);
 
 /* Returns 1 if the specified node has given property. */
-extern int prom_node_has_property(phandle node, const char *property);
+int prom_node_has_property(phandle node, const char *property);
 
 /* Returns phandle of the path specified */
-extern phandle prom_finddevice(const char *name);
+phandle prom_finddevice(const char *name);
 
 /* Set the indicated property at the given node with the passed value.
  * Returns the number of bytes of your value that the prom took.
  */
-extern int prom_setprop(phandle node, const char *prop_name, char *prop_value,
-                       int value_size);
+int prom_setprop(phandle node, const char *prop_name, char *prop_value,
+                int value_size);
 
-extern phandle prom_inst2pkg(int);
-extern void prom_sun4v_guest_soft_state(void);
+phandle prom_inst2pkg(int);
+void prom_sun4v_guest_soft_state(void);
 
-extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
+int prom_ihandle2path(int handle, char *buffer, int bufsize);
 
 /* Client interface level routines. */
-extern void p1275_cmd_direct(unsigned long *);
+void p1275_cmd_direct(unsigned long *);
 
 #endif /* !(__SPARC64_OPLIB_H) */
index f21de03..1be2fde 100644 (file)
@@ -1,5 +1,8 @@
 #ifndef ___ASM_SPARC_PAGE_H
 #define ___ASM_SPARC_PAGE_H
+
+#define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
+
 #if defined(__sparc__) && defined(__arch64__)
 #include <asm/page_64.h>
 #else
index aac53fc..bf10998 100644 (file)
 
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 struct pt_regs;
-extern void hugetlb_setup(struct pt_regs *regs);
+void hugetlb_setup(struct pt_regs *regs);
 #endif
 
 #define WANT_PAGE_VIRTUAL
 
-extern void _clear_page(void *page);
+void _clear_page(void *page);
 #define clear_page(X)  _clear_page((void *)(X))
 struct page;
-extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
+void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
 #define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
-extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
+void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
 
 /* Unlike sparc32, sparc64's parameter passing API is more
  * sane in that structures which as small enough are passed
index c6c7396..bd00a62 100644 (file)
@@ -52,7 +52,7 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
 
 /* Return the index of the PCI controller for device PDEV. */
 
-extern int pci_domain_nr(struct pci_bus *bus);
+int pci_domain_nr(struct pci_bus *bus);
 static inline int pci_proc_domain(struct pci_bus *bus)
 {
        return 1;
@@ -64,9 +64,9 @@ static inline int pci_proc_domain(struct pci_bus *bus)
 #define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
 #define get_pci_unmapped_area get_fb_unmapped_area
 
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-                              enum pci_mmap_state mmap_state,
-                              int write_combine);
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+                       enum pci_mmap_state mmap_state,
+                       int write_combine);
 
 static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 {
@@ -74,9 +74,9 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 }
 
 #define HAVE_ARCH_PCI_RESOURCE_TO_USER
-extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
-                                const struct resource *rsrc,
-                                resource_size_t *start, resource_size_t *end);
+void pci_resource_to_user(const struct pci_dev *dev, int bar,
+                         const struct resource *rsrc,
+                         resource_size_t *start, resource_size_t *end);
 #endif /* __KERNEL__ */
 
 #endif /* __SPARC64_PCI_H */
index 6676cbc..f417067 100644 (file)
@@ -30,10 +30,10 @@ struct linux_pcic {
 };
 
 #ifdef CONFIG_PCIC_PCI
-extern int pcic_present(void);
-extern int pcic_probe(void);
-extern void pci_time_init(void);
-extern void sun4m_pci_init_IRQ(void);
+int pcic_present(void);
+int pcic_probe(void);
+void pci_time_init(void);
+void sun4m_pci_init_IRQ(void);
 #else
 static inline int pcic_present(void) { return 0; }
 static inline int pcic_probe(void) { return 0; }
index 942bb17..cdf800c 100644 (file)
@@ -12,8 +12,8 @@ struct pcr_ops {
 };
 extern const struct pcr_ops *pcr_ops;
 
-extern void deferred_pcr_work_irq(int irq, struct pt_regs *regs);
-extern void schedule_deferred_pcr_work(void);
+void deferred_pcr_work_irq(int irq, struct pt_regs *regs);
+void schedule_deferred_pcr_work(void);
 
 #define PCR_PIC_PRIV           0x00000001 /* PIC access is privileged */
 #define PCR_STRACE             0x00000002 /* Trace supervisor events  */
@@ -45,6 +45,6 @@ extern void schedule_deferred_pcr_work(void);
 #define PCR_N4_PICNHT          0x00020000 /* PIC non-hypervisor trap  */
 #define PCR_N4_NTC             0x00040000 /* Next-To-Commit wrap      */
 
-extern int pcr_arch_init(void);
+int pcr_arch_init(void);
 
 #endif /* __PCR_H */
index 9b1c36d..a3890da 100644 (file)
@@ -14,6 +14,8 @@ struct page;
 void *srmmu_get_nocache(int size, int align);
 void srmmu_free_nocache(void *addr, int size);
 
+extern struct resource sparc_iomap;
+
 #define check_pgt_cache()      do { } while (0)
 
 pgd_t *get_pgd_fast(void);
index bcfe063..39a7ac4 100644 (file)
@@ -38,12 +38,12 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
        kmem_cache_free(pgtable_cache, pmd);
 }
 
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
-                                  unsigned long address);
-extern pgtable_t pte_alloc_one(struct mm_struct *mm,
-                              unsigned long address);
-extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
-extern void pte_free(struct mm_struct *mm, pgtable_t ptepage);
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+                           unsigned long address);
+pgtable_t pte_alloc_one(struct mm_struct *mm,
+                       unsigned long address);
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
+void pte_free(struct mm_struct *mm, pgtable_t ptepage);
 
 #define pmd_populate_kernel(MM, PMD, PTE)      pmd_set(MM, PMD, PTE)
 #define pmd_populate(MM, PMD, PTE)             pmd_set(MM, PMD, PTE)
@@ -51,12 +51,12 @@ extern void pte_free(struct mm_struct *mm, pgtable_t ptepage);
 
 #define check_pgt_cache()      do { } while (0)
 
-extern void pgtable_free(void *table, bool is_page);
+void pgtable_free(void *table, bool is_page);
 
 #ifdef CONFIG_SMP
 
 struct mmu_gather;
-extern void tlb_remove_table(struct mmu_gather *, void *);
+void tlb_remove_table(struct mmu_gather *, void *);
 
 static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page)
 {
index 502f632..b9b91ae 100644 (file)
@@ -25,8 +25,9 @@
 struct vm_area_struct;
 struct page;
 
-extern void load_mmu(void);
-extern unsigned long calc_highpages(void);
+void load_mmu(void);
+unsigned long calc_highpages(void);
+unsigned long __init bootmem_init(unsigned long *pages_avail);
 
 #define pte_ERROR(e)   __builtin_trap()
 #define pmd_ERROR(e)   __builtin_trap()
@@ -56,7 +57,7 @@ extern unsigned long calc_highpages(void);
  * srmmu.c will assign the real one (which is dynamically sized) */
 #define swapper_pg_dir NULL
 
-extern void paging_init(void);
+void paging_init(void);
 
 extern unsigned long ptr_in_current_pgd;
 
@@ -428,8 +429,8 @@ extern unsigned long *sparc_valid_addr_bitmap;
 #define GET_IOSPACE(pfn)               (pfn >> (BITS_PER_LONG - 4))
 #define GET_PFN(pfn)                   (pfn & 0x0fffffffUL)
 
-extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
-                          unsigned long, pgprot_t);
+int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
+                   unsigned long, pgprot_t);
 
 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
                                     unsigned long from, unsigned long pfn,
index 1a49ffd..3770bf5 100644 (file)
@@ -210,9 +210,9 @@ static inline bool kern_addr_valid(unsigned long addr)
 
 #ifndef __ASSEMBLY__
 
-extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
+pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
 
-extern unsigned long pte_sz_bits(unsigned long size);
+unsigned long pte_sz_bits(unsigned long size);
 
 extern pgprot_t PAGE_KERNEL;
 extern pgprot_t PAGE_KERNEL_LOCKED;
@@ -780,8 +780,8 @@ static inline int pmd_present(pmd_t pmd)
                                         !__kern_addr_valid(pud_val(pud)))
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
-                      pmd_t *pmdp, pmd_t pmd);
+void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+               pmd_t *pmdp, pmd_t pmd);
 #else
 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                              pmd_t *pmdp, pmd_t pmd)
@@ -840,8 +840,8 @@ static inline unsigned long __pmd_page(pmd_t pmd)
 #define pte_unmap(pte)                 do { } while (0)
 
 /* Actual page table PTE updates.  */
-extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
-                         pte_t *ptep, pte_t orig, int fullmm);
+void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
+                  pte_t *ptep, pte_t orig, int fullmm);
 
 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
@@ -900,28 +900,28 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pmd_t swapper_low_pmd_dir[PTRS_PER_PMD];
 
-extern void paging_init(void);
-extern unsigned long find_ecache_flush_span(unsigned long size);
+void paging_init(void);
+unsigned long find_ecache_flush_span(unsigned long size);
 
 struct seq_file;
-extern void mmu_info(struct seq_file *);
+void mmu_info(struct seq_file *);
 
 struct vm_area_struct;
-extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
+void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
-                                pmd_t *pmd);
+void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+                         pmd_t *pmd);
 
 #define __HAVE_ARCH_PMDP_INVALIDATE
 extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                            pmd_t *pmdp);
 
 #define __HAVE_ARCH_PGTABLE_DEPOSIT
-extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
-                                      pgtable_t pgtable);
+void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+                               pgtable_t pgtable);
 
 #define __HAVE_ARCH_PGTABLE_WITHDRAW
-extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
 #endif
 
 /* Encode and de-code a swap entry */
@@ -937,12 +937,12 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
 #define __swp_entry_to_pte(x)          ((pte_t) { (x).val })
 
 /* File offset in PTE support. */
-extern unsigned long pte_file(pte_t);
+unsigned long pte_file(pte_t);
 #define pte_to_pgoff(pte)      (pte_val(pte) >> PAGE_SHIFT)
-extern pte_t pgoff_to_pte(unsigned long);
+pte_t pgoff_to_pte(unsigned long);
 #define PTE_FILE_MAX_BITS      (64UL - PAGE_SHIFT - 1UL)
 
-extern int page_in_phys_avail(unsigned long paddr);
+int page_in_phys_avail(unsigned long paddr);
 
 /*
  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
@@ -952,8 +952,8 @@ extern int page_in_phys_avail(unsigned long paddr);
 #define GET_IOSPACE(pfn)               (pfn >> (BITS_PER_LONG - 4))
 #define GET_PFN(pfn)                   (pfn & 0x0fffffffffffffffUL)
 
-extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
-                          unsigned long, pgprot_t);
+int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
+                   unsigned long, pgprot_t);
 
 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
                                     unsigned long from, unsigned long pfn,
@@ -981,20 +981,20 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
  * the largest alignment possible such that larget PTEs can be used.
  */
-extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
-                                         unsigned long, unsigned long,
-                                         unsigned long);
+unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
+                                  unsigned long, unsigned long,
+                                  unsigned long);
 #define HAVE_ARCH_FB_UNMAPPED_AREA
 
-extern void pgtable_cache_init(void);
-extern void sun4v_register_fault_status(void);
-extern void sun4v_ktsb_register(void);
-extern void __init cheetah_ecache_flush_init(void);
-extern void sun4v_patch_tlb_handlers(void);
+void pgtable_cache_init(void);
+void sun4v_register_fault_status(void);
+void sun4v_ktsb_register(void);
+void __init cheetah_ecache_flush_init(void);
+void sun4v_patch_tlb_handlers(void);
 
 extern unsigned long cmdline_memory_size;
 
-extern asmlinkage void do_sparc64_fault(struct pt_regs *regs);
+asmlinkage void do_sparc64_fault(struct pt_regs *regs);
 
 #endif /* !(__ASSEMBLY__) */
 
index 2c7baa4..812fd08 100644 (file)
@@ -74,7 +74,7 @@ struct thread_struct {
 }
 
 /* Return saved PC of a blocked thread. */
-extern unsigned long thread_saved_pc(struct task_struct *t);
+unsigned long thread_saved_pc(struct task_struct *t);
 
 /* Do necessary setup to start up a newly executed thread. */
 static inline void start_thread(struct pt_regs * regs, unsigned long pc,
@@ -107,7 +107,7 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc,
 /* Free all resources held by a thread. */
 #define release_thread(tsk)            do { } while(0)
 
-extern unsigned long get_wchan(struct task_struct *);
+unsigned long get_wchan(struct task_struct *);
 
 #define task_pt_regs(tsk) ((tsk)->thread.kregs)
 #define KSTK_EIP(tsk)  ((tsk)->thread.kregs->pc)
@@ -116,8 +116,11 @@ extern unsigned long get_wchan(struct task_struct *);
 #ifdef __KERNEL__
 
 extern struct task_struct *last_task_used_math;
+int do_mathemu(struct pt_regs *regs, struct task_struct *fpt);
 
 #define cpu_relax()    barrier()
+#define cpu_relax_lowlatency() cpu_relax()
+
 extern void (*sparc_idle)(void);
 
 #endif
index 4c3f7f0..6924bde 100644 (file)
@@ -95,7 +95,7 @@ struct thread_struct {
 
 /* Return saved PC of a blocked thread. */
 struct task_struct;
-extern unsigned long thread_saved_pc(struct task_struct *);
+unsigned long thread_saved_pc(struct task_struct *);
 
 /* On Uniprocessor, even in RMO processes see TSO semantics */
 #ifdef CONFIG_SMP
@@ -194,7 +194,7 @@ do { \
 /* Free all resources held by a thread. */
 #define release_thread(tsk)            do { } while (0)
 
-extern unsigned long get_wchan(struct task_struct *task);
+unsigned long get_wchan(struct task_struct *task);
 
 #define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
 #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->tpc)
@@ -216,6 +216,7 @@ extern unsigned long get_wchan(struct task_struct *task);
                                     "nop\n\t"                          \
                                     ".previous"                        \
                                     ::: "memory")
+#define cpu_relax_lowlatency() cpu_relax()
 
 /* Prefetch support.  This is tuned for UltraSPARC-III and later.
  * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
@@ -253,6 +254,8 @@ static inline void prefetchw(const void *x)
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
 
+int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap);
+
 #endif /* !(__ASSEMBLY__) */
 
 #endif /* !(__ASM_SPARC64_PROCESSOR_H) */
index 11ebd65..d955c8d 100644 (file)
@@ -36,28 +36,28 @@ struct of_irq_controller {
        void            *data;
 };
 
-extern struct device_node *of_find_node_by_cpuid(int cpuid);
-extern int of_set_property(struct device_node *node, const char *name, void *val, int len);
+struct device_node *of_find_node_by_cpuid(int cpuid);
+int of_set_property(struct device_node *node, const char *name, void *val, int len);
 extern struct mutex of_set_property_mutex;
-extern int of_getintprop_default(struct device_node *np,
-                                const char *name,
+int of_getintprop_default(struct device_node *np,
+                         const char *name,
                                 int def);
-extern int of_find_in_proplist(const char *list, const char *match, int len);
+int of_find_in_proplist(const char *list, const char *match, int len);
 
-extern void prom_build_devicetree(void);
-extern void of_populate_present_mask(void);
-extern void of_fill_in_cpu_data(void);
+void prom_build_devicetree(void);
+void of_populate_present_mask(void);
+void of_fill_in_cpu_data(void);
 
 struct resource;
-extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name);
-extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size);
+void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name);
+void of_iounmap(struct resource *res, void __iomem *base, unsigned long size);
 
 extern struct device_node *of_console_device;
 extern char *of_console_path;
 extern char *of_console_options;
 
-extern void irq_trans_init(struct device_node *dp);
-extern char *build_path_component(struct device_node *dp);
+void irq_trans_init(struct device_node *dp);
+char *build_path_component(struct device_node *dp);
 
 #endif /* __KERNEL__ */
 #endif /* _SPARC_PROM_H */
index bdfafd7..bac6a94 100644 (file)
@@ -73,7 +73,7 @@ static inline long regs_return_value(struct pt_regs *regs)
        return regs->u_regs[UREG_I0];
 }
 #ifdef CONFIG_SMP
-extern unsigned long profile_pc(struct pt_regs *);
+unsigned long profile_pc(struct pt_regs *);
 #else
 #define profile_pc(regs) instruction_pointer(regs)
 #endif
index 5e35e05..f5fffd8 100644 (file)
@@ -4,8 +4,9 @@
 #ifndef _SPARC_SETUP_H
 #define _SPARC_SETUP_H
 
-#include <uapi/asm/setup.h>
+#include <linux/interrupt.h>
 
+#include <uapi/asm/setup.h>
 
 extern char reboot_command[];
 
@@ -22,9 +23,43 @@ static inline int con_is_present(void)
 {
        return serial_console ? 0 : 1;
 }
+
+/* from irq_32.c */
+extern volatile unsigned char *fdc_status;
+extern char *pdma_vaddr;
+extern unsigned long pdma_size;
+extern volatile int doing_pdma;
+
+/* This is software state */
+extern char *pdma_base;
+extern unsigned long pdma_areasize;
+
+int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler);
+
+/* setup_32.c */
+extern unsigned long cmdline_memory_size;
+
+/* devices.c */
+void __init device_scan(void);
+
+/* unaligned_32.c */
+unsigned long safe_compute_effective_address(struct pt_regs *, unsigned int);
+
+#endif
+
+#ifdef CONFIG_SPARC64
+/* unaligned_64.c */
+int handle_ldf_stq(u32 insn, struct pt_regs *regs);
+void handle_ld_nf(u32 insn, struct pt_regs *regs);
+
+/* init_64.c */
+extern atomic_t dcpage_flushes;
+extern atomic_t dcpage_flushes_xcall;
+
+extern int sysctl_tsb_ratio;
 #endif
 
-extern void sun_do_break(void);
+void sun_do_break(void);
 extern int stop_a_enabled;
 extern int scons_pwroff;
 
index 01d9c3b..838c9d5 100644 (file)
@@ -79,9 +79,9 @@
   __asm__ ("addcc %r7,%8,%2\n\t"                                       \
           "addxcc %r5,%6,%1\n\t"                                       \
           "addx %r3,%4,%0\n"                                           \
-          : "=r" ((USItype)(r2)),                                      \
-            "=&r" ((USItype)(r1)),                                     \
-            "=&r" ((USItype)(r0))                                      \
+          : "=r" (r2),                                                 \
+            "=&r" (r1),                                                \
+            "=&r" (r0)                                                 \
           : "%rJ" ((USItype)(x2)),                                     \
             "rI" ((USItype)(y2)),                                      \
             "%rJ" ((USItype)(x1)),                                     \
@@ -94,9 +94,9 @@
   __asm__ ("subcc %r7,%8,%2\n\t"                                       \
            "subxcc %r5,%6,%1\n\t"                                      \
            "subx %r3,%4,%0\n"                                          \
-          : "=r" ((USItype)(r2)),                                      \
-            "=&r" ((USItype)(r1)),                                     \
-            "=&r" ((USItype)(r0))                                      \
+          : "=r" (r2),                                                 \
+            "=&r" (r1),                                                \
+            "=&r" (r0)                                                 \
           : "%rJ" ((USItype)(x2)),                                     \
             "rI" ((USItype)(y2)),                                      \
             "%rJ" ((USItype)(x1)),                                     \
            "addxcc %r6,%7,%0\n\t"                                      \
            "addxcc %r4,%5,%%g2\n\t"                                    \
            "addx %r2,%3,%%g1\n\t"                                      \
-          : "=&r" ((USItype)(r1)),                                     \
-            "=&r" ((USItype)(r0))                                      \
+          : "=&r" (r1),                                                \
+            "=&r" (r0)                                                 \
           : "%rJ" ((USItype)(x3)),                                     \
             "rI" ((USItype)(y3)),                                      \
             "%rJ" ((USItype)(x2)),                                     \
            "subxcc %r6,%7,%0\n\t"                                      \
            "subxcc %r4,%5,%%g2\n\t"                                    \
            "subx %r2,%3,%%g1\n\t"                                      \
-          : "=&r" ((USItype)(r1)),                                     \
-            "=&r" ((USItype)(r0))                                      \
+          : "=&r" (r1),                                                \
+            "=&r" (r0)                                                 \
           : "%rJ" ((USItype)(x3)),                                     \
             "rI" ((USItype)(y3)),                                      \
             "%rJ" ((USItype)(x2)),                                     \
           "addxcc %2,%%g0,%2\n\t"                                      \
           "addxcc %1,%%g0,%1\n\t"                                      \
           "addx %0,%%g0,%0\n\t"                                        \
-          : "=&r" ((USItype)(x3)),                                     \
-            "=&r" ((USItype)(x2)),                                     \
-            "=&r" ((USItype)(x1)),                                     \
-            "=&r" ((USItype)(x0))                                      \
+          : "=&r" (x3),                                                \
+            "=&r" (x2),                                                \
+            "=&r" (x1),                                                \
+            "=&r" (x0)                                                 \
           : "rI" ((USItype)(i)),                                       \
             "0" ((USItype)(x3)),                                       \
             "1" ((USItype)(x2)),                                       \
index 3c8917f..7c24e08 100644 (file)
@@ -93,15 +93,15 @@ static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
                                    arg1, arg2, arg3, arg4);
 }
 
-extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+void arch_send_call_function_single_ipi(int cpu);
+void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
 static inline int cpu_logical_map(int cpu)
 {
        return cpu;
 }
 
-extern int hard_smp_processor_id(void);
+int hard_smp_processor_id(void);
 
 #define raw_smp_processor_id()         (current_thread_info()->cpu)
 
index 0571039..26d9e77 100644 (file)
 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 extern cpumask_t cpu_core_map[NR_CPUS];
 
-extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+void arch_send_call_function_single_ipi(int cpu);
+void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
 /*
  *     General functions that each host system must provide.
  */
 
-extern int hard_smp_processor_id(void);
+int hard_smp_processor_id(void);
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
-extern void smp_fill_in_sib_core_maps(void);
-extern void cpu_play_dead(void);
+void smp_fill_in_sib_core_maps(void);
+void cpu_play_dead(void);
 
-extern void smp_fetch_global_regs(void);
-extern void smp_fetch_global_pmu(void);
+void smp_fetch_global_regs(void);
+void smp_fetch_global_pmu(void);
 
 struct seq_file;
 void smp_bogo(struct seq_file *);
 void smp_info(struct seq_file *);
 
+void smp_callin(void);
+void cpu_panic(void);
+void smp_synchronize_tick_client(void);
+void smp_capture(void);
+void smp_release(void);
+
 #ifdef CONFIG_HOTPLUG_CPU
-extern int __cpu_disable(void);
-extern void __cpu_die(unsigned int cpu);
+int __cpu_disable(void);
+void __cpu_die(unsigned int cpu);
 #endif
 
 #endif /* !(__ASSEMBLY__) */
index 6b67e50..3fc5869 100644 (file)
@@ -62,7 +62,7 @@ extern enum ultra_tlb_layout tlb_type;
 extern int sun4v_chip_type;
 
 extern int cheetah_pcache_forced_on;
-extern void cheetah_enable_pcache(void);
+void cheetah_enable_pcache(void);
 
 #define sparc64_highest_locked_tlbent()        \
        (tlb_type == spitfire ? \
index 6cee39a..c30d066 100644 (file)
@@ -1,6 +1,6 @@
 #ifndef _SPARC64_STACKTRACE_H
 #define _SPARC64_STACKTRACE_H
 
-extern void stack_trace_flush(void);
+void stack_trace_flush(void);
 
 #endif /* _SPARC64_STACKTRACE_H */
index d56ce60..c100dc2 100644 (file)
 
 extern int this_is_starfire;
 
-extern void check_if_starfire(void);
-extern int starfire_hard_smp_processor_id(void);
-extern void starfire_hookup(int);
-extern unsigned int starfire_translate(unsigned long imap, unsigned int upaid);
+void check_if_starfire(void);
+int starfire_hard_smp_processor_id(void);
+void starfire_hookup(int);
+unsigned int starfire_translate(unsigned long imap, unsigned int upaid);
 
 #endif
 #endif
index 12f6785..69974e9 100644 (file)
@@ -15,7 +15,7 @@
 
 #ifdef __KERNEL__
 
-extern void __memmove(void *,const void *,__kernel_size_t);
+void __memmove(void *,const void *,__kernel_size_t);
 
 #ifndef EXPORT_SYMTAB_STROPS
 
@@ -40,8 +40,8 @@ extern void __memmove(void *,const void *,__kernel_size_t);
 #undef memscan
 #define memscan(__arg0, __char, __arg2)                                                \
 ({                                                                             \
-       extern void *__memscan_zero(void *, size_t);                            \
-       extern void *__memscan_generic(void *, int, size_t);                    \
+       void *__memscan_zero(void *, size_t);                                   \
+       void *__memscan_generic(void *, int, size_t);                           \
        void *__retval, *__addr = (__arg0);                                     \
        size_t __size = (__arg2);                                               \
                                                                                \
@@ -54,14 +54,14 @@ extern void __memmove(void *,const void *,__kernel_size_t);
 })
 
 #define __HAVE_ARCH_MEMCMP
-extern int memcmp(const void *,const void *,__kernel_size_t);
+int memcmp(const void *,const void *,__kernel_size_t);
 
 /* Now the str*() stuff... */
 #define __HAVE_ARCH_STRLEN
-extern __kernel_size_t strlen(const char *);
+__kernel_size_t strlen(const char *);
 
 #define __HAVE_ARCH_STRNCMP
-extern int strncmp(const char *, const char *, __kernel_size_t);
+int strncmp(const char *, const char *, __kernel_size_t);
 
 #endif /* !EXPORT_SYMTAB_STROPS */
 
index 9623bc2..5936b8f 100644 (file)
@@ -19,7 +19,7 @@
 
 /* First the mem*() things. */
 #define __HAVE_ARCH_MEMMOVE
-extern void *memmove(void *, const void *, __kernel_size_t);
+void *memmove(void *, const void *, __kernel_size_t);
 
 #define __HAVE_ARCH_MEMCPY
 #define memcpy(t, f, n) __builtin_memcpy(t, f, n)
@@ -32,8 +32,8 @@ extern void *memmove(void *, const void *, __kernel_size_t);
 #undef memscan
 #define memscan(__arg0, __char, __arg2)                                        \
 ({                                                                     \
-       extern void *__memscan_zero(void *, size_t);                    \
-       extern void *__memscan_generic(void *, int, size_t);            \
+       void *__memscan_zero(void *, size_t);                           \
+       void *__memscan_generic(void *, int, size_t);                   \
        void *__retval, *__addr = (__arg0);                             \
        size_t __size = (__arg2);                                       \
                                                                        \
@@ -46,14 +46,14 @@ extern void *memmove(void *, const void *, __kernel_size_t);
 })
 
 #define __HAVE_ARCH_MEMCMP
-extern int memcmp(const void *,const void *,__kernel_size_t);
+int memcmp(const void *,const void *,__kernel_size_t);
 
 /* Now the str*() stuff... */
 #define __HAVE_ARCH_STRLEN
-extern __kernel_size_t strlen(const char *);
+__kernel_size_t strlen(const char *);
 
 #define __HAVE_ARCH_STRNCMP
-extern int strncmp(const char *, const char *, __kernel_size_t);
+int strncmp(const char *, const char *, __kernel_size_t);
 
 #endif /* !EXPORT_SYMTAB_STROPS */
 
index e32e82b..16f1037 100644 (file)
@@ -99,8 +99,8 @@ extern struct thread_info *current_set[NR_CPUS];
          "o0", "o1", "o2", "o3",                   "o7");      \
        } while(0)
 
-extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
-                  void *fpqueue, unsigned long *fpqdepth);
-extern void synchronize_user_stack(void);
+void fpsave(unsigned long *fpregs, unsigned long *fsr,
+           void *fpqueue, unsigned long *fpqdepth);
+void synchronize_user_stack(void);
 
 #endif /* __SPARC_SWITCH_TO_H */
index 8d28480..10e7633 100644 (file)
@@ -65,7 +65,7 @@ do {  save_and_clear_fpu();                                           \
          "o0", "o1", "o2", "o3", "o4", "o5",       "o7");              \
 } while(0)
 
-extern void synchronize_user_stack(void);
-extern void fault_in_user_windows(void);
+void synchronize_user_stack(void);
+void fault_in_user_windows(void);
 
 #endif /* __SPARC64_SWITCH_TO_64_H */
index bf8972a..b0a0db8 100644 (file)
@@ -3,9 +3,9 @@
 
 struct pt_regs;
 
-extern asmlinkage long sparc_do_fork(unsigned long clone_flags,
-                                    unsigned long stack_start,
-                                    struct pt_regs *regs,
-                                    unsigned long stack_size);
+asmlinkage long sparc_do_fork(unsigned long clone_flags,
+                             unsigned long stack_start,
+                             struct pt_regs *regs,
+                             unsigned long stack_size);
 
 #endif /* _SPARC64_SYSCALLS_H */
index 72f40a5..f8e708a 100644 (file)
@@ -32,13 +32,13 @@ static inline unsigned int timer_value(unsigned int value)
        return (value + 1) << TIMER_VALUE_SHIFT;
 }
 
-extern __volatile__ unsigned int *master_l10_counter;
+extern volatile u32 __iomem *master_l10_counter;
 
-extern irqreturn_t notrace timer_interrupt(int dummy, void *dev_id);
+irqreturn_t notrace timer_interrupt(int dummy, void *dev_id);
 
 #ifdef CONFIG_SMP
 DECLARE_PER_CPU(struct clock_event_device, sparc32_clockevent);
-extern void register_percpu_ce(int cpu);
+void register_percpu_ce(int cpu);
 #endif
 
 #endif /* !(_SPARC_TIMER_H) */
index 01197d8..fce4150 100644 (file)
@@ -23,8 +23,8 @@ struct sparc64_tick_ops {
 
 extern struct sparc64_tick_ops *tick_ops;
 
-extern unsigned long sparc64_get_clock_tick(unsigned int cpu);
-extern void setup_sparc64_timer(void);
-extern void __init time_init(void);
+unsigned long sparc64_get_clock_tick(unsigned int cpu);
+void setup_sparc64_timer(void);
+void __init time_init(void);
 
 #endif /* _SPARC64_TIMER_H */
index 190e189..4cb392f 100644 (file)
@@ -8,19 +8,19 @@
 #include <asm/mmu_context.h>
 
 #ifdef CONFIG_SMP
-extern void smp_flush_tlb_pending(struct mm_struct *,
+void smp_flush_tlb_pending(struct mm_struct *,
                                  unsigned long, unsigned long *);
 #endif
 
 #ifdef CONFIG_SMP
-extern void smp_flush_tlb_mm(struct mm_struct *mm);
+void smp_flush_tlb_mm(struct mm_struct *mm);
 #define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
 #else
 #define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
 #endif
 
-extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
-extern void flush_tlb_pending(void);
+void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
+void flush_tlb_pending(void);
 
 #define tlb_start_vma(tlb, vma) do { } while (0)
 #define tlb_end_vma(tlb, vma)  do { } while (0)
index 3c3c89f..816d820 100644 (file)
@@ -14,9 +14,9 @@ struct tlb_batch {
        unsigned long vaddrs[TLB_BATCH_NR];
 };
 
-extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
-extern void flush_tsb_user(struct tlb_batch *tb);
-extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
+void flush_tsb_kernel_range(unsigned long start, unsigned long end);
+void flush_tsb_user(struct tlb_batch *tb);
+void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
 
 /* TLB flush operations. */
 
@@ -36,15 +36,15 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
 
 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 
-extern void flush_tlb_pending(void);
-extern void arch_enter_lazy_mmu_mode(void);
-extern void arch_leave_lazy_mmu_mode(void);
+void flush_tlb_pending(void);
+void arch_enter_lazy_mmu_mode(void);
+void arch_leave_lazy_mmu_mode(void);
 #define arch_flush_lazy_mmu_mode()      do {} while (0)
 
 /* Local cpu only.  */
-extern void __flush_tlb_all(void);
-extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
-extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void __flush_tlb_all(void);
+void __flush_tlb_page(unsigned long context, unsigned long vaddr);
+void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #ifndef CONFIG_SMP
 
@@ -60,8 +60,8 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad
 
 #else /* CONFIG_SMP */
 
-extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
-extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
+void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
 
 #define flush_tlb_kernel_range(start, end) \
 do {   flush_tsb_kernel_range(start,end); \
index a2d10fc..ed8f071 100644 (file)
@@ -18,7 +18,7 @@ static inline int cpu_to_node(int cpu)
 
 struct pci_bus;
 #ifdef CONFIG_PCI
-extern int pcibus_to_node(struct pci_bus *pbus);
+int pcibus_to_node(struct pci_bus *pbus);
 #else
 static inline int pcibus_to_node(struct pci_bus *pbus)
 {
index 7e26b2d..6fd4436 100644 (file)
@@ -51,11 +51,11 @@ struct trap_per_cpu {
        unsigned long           __per_cpu_base;
 } __attribute__((aligned(64)));
 extern struct trap_per_cpu trap_block[NR_CPUS];
-extern void init_cur_cpu_trap(struct thread_info *);
-extern void setup_tba(void);
+void init_cur_cpu_trap(struct thread_info *);
+void setup_tba(void);
 extern int ncpus_probed;
 
-extern unsigned long real_hard_smp_processor_id(void);
+unsigned long real_hard_smp_processor_id(void);
 
 struct cpuid_patch_entry {
        unsigned int    addr;
index 0167d26..bd56c28 100644 (file)
@@ -9,6 +9,6 @@
 #define user_addr_max() \
        (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
 
-extern long strncpy_from_user(char *dest, const char __user *src, long count);
+long strncpy_from_user(char *dest, const char __user *src, long count);
 
 #endif
index 53a28dd..9634d08 100644 (file)
@@ -78,9 +78,9 @@ struct exception_table_entry
 };
 
 /* Returns 0 if exception not found and fixup otherwise.  */
-extern unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
+unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
 
-extern void __ret_efault(void);
+void __ret_efault(void);
 
 /* Uh, these should become the main single-value transfer routines..
  * They automatically use the right size if we just have the right
@@ -152,7 +152,7 @@ __asm__ __volatile__(                                                       \
        : "=&r" (ret) : "r" (x), "m" (*__m(addr)),                      \
         "i" (-EFAULT))
 
-extern int __put_user_bad(void);
+int __put_user_bad(void);
 
 #define __get_user_check(x,addr,size,type) ({ \
 register int __gu_ret; \
@@ -244,9 +244,9 @@ __asm__ __volatile__(                                                       \
        ".previous\n\t"                                                 \
        : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
 
-extern int __get_user_bad(void);
+int __get_user_bad(void);
 
-extern unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
+unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
 
 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
 {
@@ -306,8 +306,8 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n)
                return n;
 }
 
-extern __must_check long strlen_user(const char __user *str);
-extern __must_check long strnlen_user(const char __user *str, long n);
+__must_check long strlen_user(const char __user *str);
+__must_check long strnlen_user(const char __user *str, long n);
 
 #endif  /* __ASSEMBLY__ */
 
index ad7e178..c990a5e 100644 (file)
@@ -76,8 +76,8 @@ struct exception_table_entry {
         unsigned int insn, fixup;
 };
 
-extern void __ret_efault(void);
-extern void __retl_efault(void);
+void __ret_efault(void);
+void __retl_efault(void);
 
 /* Uh, these should become the main single-value transfer routines..
  * They automatically use the right size if we just have the right
@@ -134,7 +134,7 @@ __asm__ __volatile__(                                                       \
        : "=r" (ret) : "r" (x), "r" (__m(addr)),                                \
         "i" (-EFAULT))
 
-extern int __put_user_bad(void);
+int __put_user_bad(void);
 
 #define __get_user_nocheck(data,addr,size,type) ({ \
 register int __gu_ret; \
@@ -204,13 +204,13 @@ __asm__ __volatile__(                                                     \
        ".previous\n\t"                                                 \
        : "=r" (x) : "r" (__m(addr)), "i" (retval))
 
-extern int __get_user_bad(void);
+int __get_user_bad(void);
 
-extern unsigned long __must_check ___copy_from_user(void *to,
-                                                   const void __user *from,
-                                                   unsigned long size);
-extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
-                                         unsigned long size);
+unsigned long __must_check ___copy_from_user(void *to,
+                                            const void __user *from,
+                                            unsigned long size);
+unsigned long copy_from_user_fixup(void *to, const void __user *from,
+                                  unsigned long size);
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long size)
 {
@@ -223,11 +223,11 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
 }
 #define __copy_from_user copy_from_user
 
-extern unsigned long __must_check ___copy_to_user(void __user *to,
-                                                 const void *from,
-                                                 unsigned long size);
-extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
-                                       unsigned long size);
+unsigned long __must_check ___copy_to_user(void __user *to,
+                                          const void *from,
+                                          unsigned long size);
+unsigned long copy_to_user_fixup(void __user *to, const void *from,
+                                unsigned long size);
 static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long size)
 {
@@ -239,11 +239,11 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
 }
 #define __copy_to_user copy_to_user
 
-extern unsigned long __must_check ___copy_in_user(void __user *to,
-                                                 const void __user *from,
-                                                 unsigned long size);
-extern unsigned long copy_in_user_fixup(void __user *to, void __user *from,
-                                       unsigned long size);
+unsigned long __must_check ___copy_in_user(void __user *to,
+                                          const void __user *from,
+                                          unsigned long size);
+unsigned long copy_in_user_fixup(void __user *to, void __user *from,
+                                unsigned long size);
 static inline unsigned long __must_check
 copy_in_user(void __user *to, void __user *from, unsigned long size)
 {
@@ -255,20 +255,20 @@ copy_in_user(void __user *to, void __user *from, unsigned long size)
 }
 #define __copy_in_user copy_in_user
 
-extern unsigned long __must_check __clear_user(void __user *, unsigned long);
+unsigned long __must_check __clear_user(void __user *, unsigned long);
 
 #define clear_user __clear_user
 
-extern __must_check long strlen_user(const char __user *str);
-extern __must_check long strnlen_user(const char __user *str, long n);
+__must_check long strlen_user(const char __user *str);
+__must_check long strnlen_user(const char __user *str, long n);
 
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
 struct pt_regs;
-extern unsigned long compute_effective_address(struct pt_regs *,
-                                              unsigned int insn,
-                                              unsigned int rd);
+unsigned long compute_effective_address(struct pt_regs *,
+                                       unsigned int insn,
+                                       unsigned int rd);
 
 #endif  /* __ASSEMBLY__ */
 
index 432afa8..e0f6c39 100644 (file)
@@ -372,14 +372,14 @@ do {      if (vio->debug & VIO_DEBUG_##TYPE) \
                       vio->vdev->channel_id, ## a); \
 } while (0)
 
-extern int __vio_register_driver(struct vio_driver *drv, struct module *owner,
+int __vio_register_driver(struct vio_driver *drv, struct module *owner,
                                 const char *mod_name);
 /*
  * vio_register_driver must be a macro so that KBUILD_MODNAME can be expanded
  */
 #define vio_register_driver(driver)            \
        __vio_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
-extern void vio_unregister_driver(struct vio_driver *drv);
+void vio_unregister_driver(struct vio_driver *drv);
 
 static inline struct vio_driver *to_vio_driver(struct device_driver *drv)
 {
@@ -391,21 +391,21 @@ static inline struct vio_dev *to_vio_dev(struct device *dev)
        return container_of(dev, struct vio_dev, dev);
 }
 
-extern int vio_ldc_send(struct vio_driver_state *vio, void *data, int len);
-extern void vio_link_state_change(struct vio_driver_state *vio, int event);
-extern void vio_conn_reset(struct vio_driver_state *vio);
-extern int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt);
-extern int vio_validate_sid(struct vio_driver_state *vio,
-                           struct vio_msg_tag *tp);
-extern u32 vio_send_sid(struct vio_driver_state *vio);
-extern int vio_ldc_alloc(struct vio_driver_state *vio,
-                        struct ldc_channel_config *base_cfg, void *event_arg);
-extern void vio_ldc_free(struct vio_driver_state *vio);
-extern int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
-                          u8 dev_class, struct vio_version *ver_table,
-                          int ver_table_size, struct vio_driver_ops *ops,
-                          char *name);
-
-extern void vio_port_up(struct vio_driver_state *vio);
+int vio_ldc_send(struct vio_driver_state *vio, void *data, int len);
+void vio_link_state_change(struct vio_driver_state *vio, int event);
+void vio_conn_reset(struct vio_driver_state *vio);
+int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt);
+int vio_validate_sid(struct vio_driver_state *vio,
+                    struct vio_msg_tag *tp);
+u32 vio_send_sid(struct vio_driver_state *vio);
+int vio_ldc_alloc(struct vio_driver_state *vio,
+                 struct ldc_channel_config *base_cfg, void *event_arg);
+void vio_ldc_free(struct vio_driver_state *vio);
+int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
+                   u8 dev_class, struct vio_version *ver_table,
+                   int ver_table_size, struct vio_driver_ops *ops,
+                   char *name);
+
+void vio_port_up(struct vio_driver_state *vio);
 
 #endif /* _SPARC64_VIO_H */
index 39ca301..b266737 100644 (file)
@@ -57,7 +57,8 @@ static inline void save_and_clear_fpu(void) {
 "              " : : "i" (FPRS_FEF|FPRS_DU) :
                "o5", "g1", "g2", "g3", "g7", "cc");
 }
-extern int vis_emul(struct pt_regs *, unsigned int);
+
+int vis_emul(struct pt_regs *, unsigned int);
 #endif
 
 #endif /* _SPARC64_ASI_H */
index ee8edc6..50c8828 100644 (file)
 
 #include <asm/spitfire.h>
 
-extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
-                     unsigned long *);
-extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
-                     unsigned long *, unsigned long *);
-extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
-                     unsigned long *, unsigned long *, unsigned long *);
+void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
+void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
+              unsigned long *);
+void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
+              unsigned long *, unsigned long *);
+void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
+              unsigned long *, unsigned long *, unsigned long *);
 
 /* XXX Ugh, write cheetah versions... -DaveM */
 
@@ -38,13 +38,13 @@ static struct xor_block_template xor_block_VIS = {
         .do_5  = xor_vis_5,
 };
 
-extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
-                         unsigned long *);
-extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
-                         unsigned long *, unsigned long *);
-extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
-                         unsigned long *, unsigned long *, unsigned long *);
+void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
+void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
+                  unsigned long *);
+void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
+                  unsigned long *, unsigned long *);
+void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
+                  unsigned long *, unsigned long *, unsigned long *);
 
 static struct xor_block_template xor_block_niagara = {
         .name  = "Niagara",
index b73274f..42f2bca 100644 (file)
 #define __NR_finit_module      342
 #define __NR_sched_setattr     343
 #define __NR_sched_getattr     344
+#define __NR_renameat2         345
 
-#define NR_syscalls            345
+#define NR_syscalls            346
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index d15cc17..7cf9c6e 100644 (file)
@@ -42,7 +42,6 @@ obj-y                   += time_$(BITS).o
 obj-$(CONFIG_SPARC32)   += windows.o
 obj-y                   += cpu.o
 obj-$(CONFIG_SPARC32)   += devices.o
-obj-$(CONFIG_SPARC32)   += tadpole.o
 obj-y                   += ptrace_$(BITS).o
 obj-y                   += unaligned_$(BITS).o
 obj-y                   += una_asm_$(BITS).o
index 8fff0ac..24361b4 100644 (file)
@@ -3,6 +3,8 @@
 #include <linux/audit.h>
 #include <asm/unistd.h>
 
+#include "kernel.h"
+
 static unsigned dir_class[] = {
 #include <asm-generic/audit_dir_write.h>
 ~0U
@@ -40,7 +42,6 @@ int audit_classify_arch(int arch)
 int audit_classify_syscall(int abi, unsigned syscall)
 {
 #ifdef CONFIG_COMPAT
-       extern int sparc32_classify_syscall(unsigned);
        if (abi == AUDIT_ARCH_SPARC)
                return sparc32_classify_syscall(syscall);
 #endif
@@ -61,11 +62,6 @@ int audit_classify_syscall(int abi, unsigned syscall)
 static int __init audit_classes_init(void)
 {
 #ifdef CONFIG_COMPAT
-       extern __u32 sparc32_dir_class[];
-       extern __u32 sparc32_write_class[];
-       extern __u32 sparc32_read_class[];
-       extern __u32 sparc32_chattr_class[];
-       extern __u32 sparc32_signal_class[];
        audit_register_class(AUDIT_CLASS_WRITE_32, sparc32_write_class);
        audit_register_class(AUDIT_CLASS_READ_32, sparc32_read_class);
        audit_register_class(AUDIT_CLASS_DIR_WRITE_32, sparc32_dir_class);
index e20cc55..ae88c22 100644 (file)
@@ -9,12 +9,15 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/export.h>
+
 #include <asm/oplib.h>
 #include <asm/io.h>
 #include <asm/auxio.h>
 #include <asm/string.h>                /* memset(), Linux has no bzero() */
 #include <asm/cpu_type.h>
 
+#include "kernel.h"
+
 /* Probe and map in the Auxiliary I/O register */
 
 /* auxio_register is not static because it is referenced 
@@ -103,7 +106,7 @@ EXPORT_SYMBOL(set_auxio);
 
 /* sun4m power control register (AUXIO2) */
 
-volatile unsigned char * auxio_power_register = NULL;
+volatile u8 __iomem *auxio_power_register = NULL;
 
 void __init auxio_power_probe(void)
 {
@@ -127,8 +130,8 @@ void __init auxio_power_probe(void)
        r.flags = regs.which_io & 0xF;
        r.start = regs.phys_addr;
        r.end = regs.phys_addr + regs.reg_size - 1;
-       auxio_power_register = (unsigned char *) of_ioremap(&r, 0,
-           regs.reg_size, "auxpower");
+       auxio_power_register =
+               (u8 __iomem *)of_ioremap(&r, 0, regs.reg_size, "auxpower");
 
        /* Display a quick message on the console. */
        if (auxio_power_register)
index 57073e5..987f7ec 100644 (file)
@@ -137,7 +137,7 @@ static void scrollscreen(void)
 }
 #endif /* ndef NO_SCROLL */
 
-void btext_drawchar(char c)
+static void btext_drawchar(char c)
 {
        int cline = 0;
 #ifdef NO_SCROLL
index d865575..7062263 100644 (file)
@@ -1,5 +1,6 @@
 #define __32bit_syscall_numbers__
 #include <asm/unistd.h>
+#include "kernel.h"
 
 unsigned sparc32_dir_class[] = {
 #include <asm-generic/audit_dir_write.h>
index 5c51258..82a3a71 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/cpudata.h>
 
 #include "kernel.h"
+#include "entry.h"
 
 DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
 EXPORT_PER_CPU_SYMBOL(__cpu_data);
index e639880..9dac398 100644 (file)
@@ -2,8 +2,8 @@
 #define _CPUMAP_H
 
 #ifdef CONFIG_SMP
-extern void cpu_map_rebuild(void);
-extern int  map_to_cpu(unsigned int index);
+void cpu_map_rebuild(void);
+int map_to_cpu(unsigned int index);
 #define cpu_map_init() cpu_map_rebuild()
 #else
 #define cpu_map_init() do {} while (0)
index 3d465e8..8d5d09f 100644 (file)
@@ -19,8 +19,9 @@
 #include <asm/smp.h>
 #include <asm/cpudata.h>
 #include <asm/cpu_type.h>
+#include <asm/setup.h>
 
-extern void clock_stop_probe(void); /* tadpole.c */
+#include "kernel.h"
 
 static char *cpu_mid_prop(void)
 {
@@ -131,11 +132,6 @@ void __init device_scan(void)
        }
 #endif /* !CONFIG_SMP */
 
-       {
-               extern void auxio_probe(void);
-               extern void auxio_power_probe(void);
-               auxio_probe();
-               auxio_power_probe();
-       }
-       clock_stop_probe();
+       auxio_probe();
+       auxio_power_probe();
 }
index 140966f..ebaba61 100644 (file)
@@ -6,40 +6,39 @@
 #include <linux/init.h>
 
 /* irq */
-extern void handler_irq(int irq, struct pt_regs *regs);
+void handler_irq(int irq, struct pt_regs *regs);
 
 #ifdef CONFIG_SPARC32
 /* traps */
-extern void do_hw_interrupt(struct pt_regs *regs, unsigned long type);
-extern void do_illegal_instruction(struct pt_regs *regs, unsigned long pc,
-                                   unsigned long npc, unsigned long psr);
-
-extern void do_priv_instruction(struct pt_regs *regs, unsigned long pc,
-                                unsigned long npc, unsigned long psr);
-extern void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc,
-                                   unsigned long npc,
-                                   unsigned long psr);
-extern void do_fpd_trap(struct pt_regs *regs, unsigned long pc,
+void do_hw_interrupt(struct pt_regs *regs, unsigned long type);
+void do_illegal_instruction(struct pt_regs *regs, unsigned long pc,
+                            unsigned long npc, unsigned long psr);
+
+void do_priv_instruction(struct pt_regs *regs, unsigned long pc,
+                         unsigned long npc, unsigned long psr);
+void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc,
+                            unsigned long npc, unsigned long psr);
+void do_fpd_trap(struct pt_regs *regs, unsigned long pc,
+                 unsigned long npc, unsigned long psr);
+void do_fpe_trap(struct pt_regs *regs, unsigned long pc,
+                 unsigned long npc, unsigned long psr);
+void handle_tag_overflow(struct pt_regs *regs, unsigned long pc,
+                         unsigned long npc, unsigned long psr);
+void handle_watchpoint(struct pt_regs *regs, unsigned long pc,
+                       unsigned long npc, unsigned long psr);
+void handle_reg_access(struct pt_regs *regs, unsigned long pc,
+                       unsigned long npc, unsigned long psr);
+void handle_cp_disabled(struct pt_regs *regs, unsigned long pc,
                         unsigned long npc, unsigned long psr);
-extern void do_fpe_trap(struct pt_regs *regs, unsigned long pc,
-                        unsigned long npc, unsigned long psr);
-extern void handle_tag_overflow(struct pt_regs *regs, unsigned long pc,
-                                unsigned long npc, unsigned long psr);
-extern void handle_watchpoint(struct pt_regs *regs, unsigned long pc,
-                              unsigned long npc, unsigned long psr);
-extern void handle_reg_access(struct pt_regs *regs, unsigned long pc,
-                              unsigned long npc, unsigned long psr);
-extern void handle_cp_disabled(struct pt_regs *regs, unsigned long pc,
-                               unsigned long npc, unsigned long psr);
-extern void handle_cp_exception(struct pt_regs *regs, unsigned long pc,
-                                unsigned long npc, unsigned long psr);
+void handle_cp_exception(struct pt_regs *regs, unsigned long pc,
+                         unsigned long npc, unsigned long psr);
 
 
 
 /* entry.S */
-extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
-                   void *fpqueue, unsigned long *fpqdepth);
-extern void fpload(unsigned long *fpregs, unsigned long *fsr);
+void fpsave(unsigned long *fpregs, unsigned long *fsr,
+            void *fpqueue, unsigned long *fpqdepth);
+void fpload(unsigned long *fpregs, unsigned long *fsr);
 
 #else /* CONFIG_SPARC32 */
 
@@ -66,123 +65,123 @@ struct pause_patch_entry {
 extern struct pause_patch_entry __pause_3insn_patch,
        __pause_3insn_patch_end;
 
-extern void __init per_cpu_patch(void);
-extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
-                                   struct sun4v_1insn_patch_entry *);
-extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
-                                   struct sun4v_2insn_patch_entry *);
-extern void __init sun4v_patch(void);
-extern void __init boot_cpu_id_too_large(int cpu);
+void __init per_cpu_patch(void);
+void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
+                            struct sun4v_1insn_patch_entry *);
+void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+                            struct sun4v_2insn_patch_entry *);
+void __init sun4v_patch(void);
+void __init boot_cpu_id_too_large(int cpu);
 extern unsigned int dcache_parity_tl1_occurred;
 extern unsigned int icache_parity_tl1_occurred;
 
-extern asmlinkage void sparc_breakpoint(struct pt_regs *regs);
-extern void timer_interrupt(int irq, struct pt_regs *regs);
-
-extern void do_notify_resume(struct pt_regs *regs,
-                            unsigned long orig_i0,
-                            unsigned long thread_info_flags);
-
-extern asmlinkage int syscall_trace_enter(struct pt_regs *regs);
-extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
-
-extern void bad_trap_tl1(struct pt_regs *regs, long lvl);
-
-extern void do_fpieee(struct pt_regs *regs);
-extern void do_fpother(struct pt_regs *regs);
-extern void do_tof(struct pt_regs *regs);
-extern void do_div0(struct pt_regs *regs);
-extern void do_illegal_instruction(struct pt_regs *regs);
-extern void mem_address_unaligned(struct pt_regs *regs,
-                                 unsigned long sfar,
-                                 unsigned long sfsr);
-extern void sun4v_do_mna(struct pt_regs *regs,
-                        unsigned long addr,
-                        unsigned long type_ctx);
-extern void do_privop(struct pt_regs *regs);
-extern void do_privact(struct pt_regs *regs);
-extern void do_cee(struct pt_regs *regs);
-extern void do_cee_tl1(struct pt_regs *regs);
-extern void do_dae_tl1(struct pt_regs *regs);
-extern void do_iae_tl1(struct pt_regs *regs);
-extern void do_div0_tl1(struct pt_regs *regs);
-extern void do_fpdis_tl1(struct pt_regs *regs);
-extern void do_fpieee_tl1(struct pt_regs *regs);
-extern void do_fpother_tl1(struct pt_regs *regs);
-extern void do_ill_tl1(struct pt_regs *regs);
-extern void do_irq_tl1(struct pt_regs *regs);
-extern void do_lddfmna_tl1(struct pt_regs *regs);
-extern void do_stdfmna_tl1(struct pt_regs *regs);
-extern void do_paw(struct pt_regs *regs);
-extern void do_paw_tl1(struct pt_regs *regs);
-extern void do_vaw(struct pt_regs *regs);
-extern void do_vaw_tl1(struct pt_regs *regs);
-extern void do_tof_tl1(struct pt_regs *regs);
-extern void do_getpsr(struct pt_regs *regs);
-
-extern void spitfire_insn_access_exception(struct pt_regs *regs,
-                                          unsigned long sfsr,
-                                          unsigned long sfar);
-extern void spitfire_insn_access_exception_tl1(struct pt_regs *regs,
-                                              unsigned long sfsr,
-                                              unsigned long sfar);
-extern void spitfire_data_access_exception(struct pt_regs *regs,
-                                          unsigned long sfsr,
-                                          unsigned long sfar);
-extern void spitfire_data_access_exception_tl1(struct pt_regs *regs,
-                                              unsigned long sfsr,
-                                              unsigned long sfar);
-extern void spitfire_access_error(struct pt_regs *regs,
-                                 unsigned long status_encoded,
-                                 unsigned long afar);
-
-extern void cheetah_fecc_handler(struct pt_regs *regs,
-                                unsigned long afsr,
-                                unsigned long afar);
-extern void cheetah_cee_handler(struct pt_regs *regs,
-                               unsigned long afsr,
-                               unsigned long afar);
-extern void cheetah_deferred_handler(struct pt_regs *regs,
-                                    unsigned long afsr,
-                                    unsigned long afar);
-extern void cheetah_plus_parity_error(int type, struct pt_regs *regs);
-
-extern void sun4v_insn_access_exception(struct pt_regs *regs,
-                                       unsigned long addr,
-                                       unsigned long type_ctx);
-extern void sun4v_insn_access_exception_tl1(struct pt_regs *regs,
-                                           unsigned long addr,
-                                           unsigned long type_ctx);
-extern void sun4v_data_access_exception(struct pt_regs *regs,
-                                       unsigned long addr,
-                                       unsigned long type_ctx);
-extern void sun4v_data_access_exception_tl1(struct pt_regs *regs,
-                                           unsigned long addr,
-                                           unsigned long type_ctx);
-extern void sun4v_resum_error(struct pt_regs *regs,
-                             unsigned long offset);
-extern void sun4v_resum_overflow(struct pt_regs *regs);
-extern void sun4v_nonresum_error(struct pt_regs *regs,
-                                unsigned long offset);
-extern void sun4v_nonresum_overflow(struct pt_regs *regs);
+asmlinkage void sparc_breakpoint(struct pt_regs *regs);
+void timer_interrupt(int irq, struct pt_regs *regs);
+
+void do_notify_resume(struct pt_regs *regs,
+                     unsigned long orig_i0,
+                     unsigned long thread_info_flags);
+
+asmlinkage int syscall_trace_enter(struct pt_regs *regs);
+asmlinkage void syscall_trace_leave(struct pt_regs *regs);
+
+void bad_trap_tl1(struct pt_regs *regs, long lvl);
+
+void do_fpieee(struct pt_regs *regs);
+void do_fpother(struct pt_regs *regs);
+void do_tof(struct pt_regs *regs);
+void do_div0(struct pt_regs *regs);
+void do_illegal_instruction(struct pt_regs *regs);
+void mem_address_unaligned(struct pt_regs *regs,
+                          unsigned long sfar,
+                          unsigned long sfsr);
+void sun4v_do_mna(struct pt_regs *regs,
+                 unsigned long addr,
+                 unsigned long type_ctx);
+void do_privop(struct pt_regs *regs);
+void do_privact(struct pt_regs *regs);
+void do_cee(struct pt_regs *regs);
+void do_cee_tl1(struct pt_regs *regs);
+void do_dae_tl1(struct pt_regs *regs);
+void do_iae_tl1(struct pt_regs *regs);
+void do_div0_tl1(struct pt_regs *regs);
+void do_fpdis_tl1(struct pt_regs *regs);
+void do_fpieee_tl1(struct pt_regs *regs);
+void do_fpother_tl1(struct pt_regs *regs);
+void do_ill_tl1(struct pt_regs *regs);
+void do_irq_tl1(struct pt_regs *regs);
+void do_lddfmna_tl1(struct pt_regs *regs);
+void do_stdfmna_tl1(struct pt_regs *regs);
+void do_paw(struct pt_regs *regs);
+void do_paw_tl1(struct pt_regs *regs);
+void do_vaw(struct pt_regs *regs);
+void do_vaw_tl1(struct pt_regs *regs);
+void do_tof_tl1(struct pt_regs *regs);
+void do_getpsr(struct pt_regs *regs);
+
+void spitfire_insn_access_exception(struct pt_regs *regs,
+                                   unsigned long sfsr,
+                                   unsigned long sfar);
+void spitfire_insn_access_exception_tl1(struct pt_regs *regs,
+                                       unsigned long sfsr,
+                                       unsigned long sfar);
+void spitfire_data_access_exception(struct pt_regs *regs,
+                                   unsigned long sfsr,
+                                   unsigned long sfar);
+void spitfire_data_access_exception_tl1(struct pt_regs *regs,
+                                       unsigned long sfsr,
+                                       unsigned long sfar);
+void spitfire_access_error(struct pt_regs *regs,
+                          unsigned long status_encoded,
+                          unsigned long afar);
+
+void cheetah_fecc_handler(struct pt_regs *regs,
+                         unsigned long afsr,
+                         unsigned long afar);
+void cheetah_cee_handler(struct pt_regs *regs,
+                        unsigned long afsr,
+                        unsigned long afar);
+void cheetah_deferred_handler(struct pt_regs *regs,
+                             unsigned long afsr,
+                             unsigned long afar);
+void cheetah_plus_parity_error(int type, struct pt_regs *regs);
+
+void sun4v_insn_access_exception(struct pt_regs *regs,
+                                unsigned long addr,
+                                unsigned long type_ctx);
+void sun4v_insn_access_exception_tl1(struct pt_regs *regs,
+                                    unsigned long addr,
+                                    unsigned long type_ctx);
+void sun4v_data_access_exception(struct pt_regs *regs,
+                                unsigned long addr,
+                                unsigned long type_ctx);
+void sun4v_data_access_exception_tl1(struct pt_regs *regs,
+                                    unsigned long addr,
+                                    unsigned long type_ctx);
+void sun4v_resum_error(struct pt_regs *regs,
+                      unsigned long offset);
+void sun4v_resum_overflow(struct pt_regs *regs);
+void sun4v_nonresum_error(struct pt_regs *regs,
+                         unsigned long offset);
+void sun4v_nonresum_overflow(struct pt_regs *regs);
 
 extern unsigned long sun4v_err_itlb_vaddr;
 extern unsigned long sun4v_err_itlb_ctx;
 extern unsigned long sun4v_err_itlb_pte;
 extern unsigned long sun4v_err_itlb_error;
 
-extern void sun4v_itlb_error_report(struct pt_regs *regs, int tl);
+void sun4v_itlb_error_report(struct pt_regs *regs, int tl);
 
 extern unsigned long sun4v_err_dtlb_vaddr;
 extern unsigned long sun4v_err_dtlb_ctx;
 extern unsigned long sun4v_err_dtlb_pte;
 extern unsigned long sun4v_err_dtlb_error;
 
-extern void sun4v_dtlb_error_report(struct pt_regs *regs, int tl);
-extern void hypervisor_tlbop_error(unsigned long err,
-                                  unsigned long op);
-extern void hypervisor_tlbop_error_xcall(unsigned long err,
-                                        unsigned long op);
+void sun4v_dtlb_error_report(struct pt_regs *regs, int tl);
+void hypervisor_tlbop_error(unsigned long err,
+                           unsigned long op);
+void hypervisor_tlbop_error_xcall(unsigned long err,
+                                 unsigned long op);
 
 /* WARNING: The error trap handlers in assembly know the precise
  *         layout of the following structure.
@@ -248,8 +247,8 @@ struct ino_bucket {
 extern struct ino_bucket *ivector_table;
 extern unsigned long ivector_table_pa;
 
-extern void init_irqwork_curcpu(void);
-extern void sun4v_register_mondo_queues(int this_cpu);
+void init_irqwork_curcpu(void);
+void sun4v_register_mondo_queues(int this_cpu);
 
 #endif /* CONFIG_SPARC32 */
 #endif /* _ENTRY_H */
index 76663b0..bfa4d0c 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/iommu.h>
 
 #include "iommu_common.h"
+#include "kernel.h"
 
 #define STC_CTXMATCH_ADDR(STC, CTX)    \
        ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
@@ -840,8 +841,6 @@ static struct dma_map_ops sun4u_dma_ops = {
 struct dma_map_ops *dma_ops = &sun4u_dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
-extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
-
 int dma_supported(struct device *dev, u64 device_mask)
 {
        struct iommu *iommu = dev->archdata.iommu;
index 591f587..1ec0de4 100644 (file)
@@ -48,12 +48,12 @@ static inline int is_span_boundary(unsigned long entry,
        return iommu_is_span_boundary(entry, nr, shift, boundary_size);
 }
 
-extern unsigned long iommu_range_alloc(struct device *dev,
-                                      struct iommu *iommu,
-                                      unsigned long npages,
-                                      unsigned long *handle);
-extern void iommu_range_free(struct iommu *iommu,
-                            dma_addr_t dma_addr,
-                            unsigned long npages);
+unsigned long iommu_range_alloc(struct device *dev,
+                               struct iommu *iommu,
+                               unsigned long npages,
+                               unsigned long *handle);
+void iommu_range_free(struct iommu *iommu,
+                     dma_addr_t dma_addr,
+                     unsigned long npages);
 
 #endif /* _IOMMU_COMMON_H */
index e7e215d..7f08ec8 100644 (file)
@@ -186,7 +186,7 @@ static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
 
        if (name == NULL) name = "???";
 
-       if ((xres = xres_alloc()) != 0) {
+       if ((xres = xres_alloc()) != NULL) {
                tack = xres->xname;
                res = &xres->xres;
        } else {
@@ -400,7 +400,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
        BUG();
 }
 
-struct dma_map_ops sbus_dma_ops = {
+static struct dma_map_ops sbus_dma_ops = {
        .alloc                  = sbus_alloc_coherent,
        .free                   = sbus_free_coherent,
        .map_page               = sbus_map_page,
@@ -681,7 +681,7 @@ static int sparc_io_proc_show(struct seq_file *m, void *v)
        const char *nm;
 
        for (r = root->child; r != NULL; r = r->sibling) {
-               if ((nm = r->name) == 0) nm = "???";
+               if ((nm = r->name) == NULL) nm = "???";
                seq_printf(m, "%016llx-%016llx: %s\n",
                                (unsigned long long)r->start,
                                (unsigned long long)r->end, nm);
index b66b6aa..70a0b8d 100644 (file)
@@ -82,11 +82,20 @@ void handler_irq(unsigned int pil, struct pt_regs *regs);
 
 unsigned long leon_get_irqmask(unsigned int irq);
 
+/* irq_32.c */
+void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs);
+
+/* sun4m_irq.c */
+void sun4m_nmi(struct pt_regs *regs);
+
+/* sun4d_irq.c */
+void sun4d_handler_irq(unsigned int pil, struct pt_regs *regs);
+
 #ifdef CONFIG_SMP
 
 /* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
 #define SUN4D_IPI_IRQ 13
 
-extern void sun4d_ipi_interrupt(void);
+void sun4d_ipi_interrupt(void);
 
 #endif
index c145f6f..a979e99 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <asm/cacheflush.h>
 #include <asm/cpudata.h>
+#include <asm/setup.h>
 #include <asm/pcic.h>
 #include <asm/leon.h>
 
index a702d9a..e7f652b 100644 (file)
@@ -2,6 +2,7 @@
 #define __SPARC_KERNEL_H
 
 #include <linux/interrupt.h>
+#include <linux/ftrace.h>
 
 #include <asm/traps.h>
 #include <asm/head.h>
@@ -15,62 +16,111 @@ extern int ncpus_probed;
 #ifdef CONFIG_SPARC64
 /* setup_64.c */
 struct seq_file;
-extern void cpucap_info(struct seq_file *);
+void cpucap_info(struct seq_file *);
 
-static inline unsigned long kimage_addr_to_ra(const char *p)
+static inline unsigned long kimage_addr_to_ra(const void *p)
 {
        unsigned long val = (unsigned long) p;
 
        return kern_base + (val - KERNBASE);
 }
+
+/* sys_sparc_64.c */
+asmlinkage long sys_kern_features(void);
+
+/* unaligned_64.c */
+asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
+int handle_popc(u32 insn, struct pt_regs *regs);
+void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr);
+void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr);
+
+/* smp_64.c */
+void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
+void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
+void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
+void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
+void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
+
+/* kgdb_64.c */
+void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs);
+
+/* pci.c */
+int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
+
+/* signal32.c */
+void do_sigreturn32(struct pt_regs *regs);
+asmlinkage void do_rt_sigreturn32(struct pt_regs *regs);
+void do_signal32(struct pt_regs * regs);
+asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp);
+
+/* compat_audit.c */
+extern unsigned sparc32_dir_class[];
+extern unsigned sparc32_chattr_class[];
+extern unsigned sparc32_write_class[];
+extern unsigned sparc32_read_class[];
+extern unsigned sparc32_signal_class[];
+int sparc32_classify_syscall(unsigned syscall);
 #endif
 
 #ifdef CONFIG_SPARC32
 /* setup_32.c */
+struct linux_romvec;
 void sparc32_start_kernel(struct linux_romvec *rp);
 
 /* cpu.c */
-extern void cpu_probe(void);
+void cpu_probe(void);
 
 /* traps_32.c */
-extern void handle_hw_divzero(struct pt_regs *regs, unsigned long pc,
-                              unsigned long npc, unsigned long psr);
+void handle_hw_divzero(struct pt_regs *regs, unsigned long pc,
+                       unsigned long npc, unsigned long psr);
 /* irq_32.c */
 extern struct irqaction static_irqaction[];
 extern int static_irq_count;
 extern spinlock_t irq_action_lock;
 
-extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
-extern void init_IRQ(void);
+void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
+void init_IRQ(void);
 
 /* sun4m_irq.c */
-extern void sun4m_init_IRQ(void);
-extern void sun4m_unmask_profile_irq(void);
-extern void sun4m_clear_profile_irq(int cpu);
+void sun4m_init_IRQ(void);
+void sun4m_unmask_profile_irq(void);
+void sun4m_clear_profile_irq(int cpu);
 
 /* sun4m_smp.c */
 void sun4m_cpu_pre_starting(void *arg);
 void sun4m_cpu_pre_online(void *arg);
+void __init smp4m_boot_cpus(void);
+int smp4m_boot_one_cpu(int i, struct task_struct *idle);
+void __init smp4m_smp_done(void);
+void smp4m_cross_call_irq(void);
+void smp4m_percpu_timer_interrupt(struct pt_regs *regs);
 
 /* sun4d_irq.c */
 extern spinlock_t sun4d_imsk_lock;
 
-extern void sun4d_init_IRQ(void);
-extern int sun4d_request_irq(unsigned int irq,
-                             irq_handler_t handler,
-                             unsigned long irqflags,
-                             const char *devname, void *dev_id);
-extern int show_sun4d_interrupts(struct seq_file *, void *);
-extern void sun4d_distribute_irqs(void);
-extern void sun4d_free_irq(unsigned int irq, void *dev_id);
+void sun4d_init_IRQ(void);
+int sun4d_request_irq(unsigned int irq,
+                      irq_handler_t handler,
+                      unsigned long irqflags,
+                      const char *devname, void *dev_id);
+int show_sun4d_interrupts(struct seq_file *, void *);
+void sun4d_distribute_irqs(void);
+void sun4d_free_irq(unsigned int irq, void *dev_id);
 
 /* sun4d_smp.c */
 void sun4d_cpu_pre_starting(void *arg);
 void sun4d_cpu_pre_online(void *arg);
+void __init smp4d_boot_cpus(void);
+int smp4d_boot_one_cpu(int i, struct task_struct *idle);
+void __init smp4d_smp_done(void);
+void smp4d_cross_call_irq(void);
+void smp4d_percpu_timer_interrupt(struct pt_regs *regs);
 
 /* leon_smp.c */
 void leon_cpu_pre_starting(void *arg);
 void leon_cpu_pre_online(void *arg);
+void leonsmp_ipi_interrupt(void);
+void leon_cross_call_irq(void);
 
 /* head_32.S */
 extern unsigned int t_nmi[];
@@ -89,12 +139,48 @@ extern unsigned int real_irq_entry[];
 extern unsigned int smp4d_ticker[];
 extern unsigned int patchme_maybe_smp_msg[];
 
-extern void floppy_hardint(void);
+void floppy_hardint(void);
 
 /* trampoline_32.S */
 extern unsigned long sun4m_cpu_startup;
 extern unsigned long sun4d_cpu_startup;
 
+/* process_32.c */
+asmlinkage int sparc_do_fork(unsigned long clone_flags,
+                             unsigned long stack_start,
+                             struct pt_regs *regs,
+                             unsigned long stack_size);
+
+/* signal_32.c */
+asmlinkage void do_sigreturn(struct pt_regs *regs);
+asmlinkage void do_rt_sigreturn(struct pt_regs *regs);
+void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
+                      unsigned long thread_info_flags);
+asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr,
+                               struct sigstack __user *ossptr,
+                               unsigned long sp);
+
+/* ptrace_32.c */
+asmlinkage int syscall_trace(struct pt_regs *regs, int syscall_exit_p);
+
+/* unaligned_32.c */
+asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
+asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn);
+
+/* windows.c */
+void try_to_clear_window_buffer(struct pt_regs *regs, int who);
+
+/* auxio_32.c */
+void __init auxio_probe(void);
+void __init auxio_power_probe(void);
+
+/* pcic.c */
+extern void __iomem *pcic_regs;
+void pcic_nmi(unsigned int pend, struct pt_regs *regs);
+
+/* time_32.c */
+void __init time_init(void);
+
 #else /* CONFIG_SPARC32 */
 #endif /* CONFIG_SPARC32 */
 #endif /* !(__SPARC_KERNEL_H) */
index b45fe3f..cbf21d0 100644 (file)
@@ -13,6 +13,8 @@
 #include <asm/ptrace.h>
 #include <asm/irq.h>
 
+#include "kernel.h"
+
 void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
 {
        struct reg_window *win;
index 1b09735..98d7128 100644 (file)
@@ -512,7 +512,8 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
 /*
  * Called when the probe at kretprobe trampoline is hit
  */
-int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+                                             struct pt_regs *regs)
 {
        struct kretprobe_instance *ri = NULL;
        struct hlist_head *head, empty_rp;
@@ -576,7 +577,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
        return 1;
 }
 
-void kretprobe_trampoline_holder(void)
+static void __used kretprobe_trampoline_holder(void)
 {
        asm volatile(".global kretprobe_trampoline\n"
                     "kretprobe_trampoline:\n"
index b7c6897..683c4af 100644 (file)
@@ -32,12 +32,12 @@ struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base addr
 
 int leondebug_irq_disable;
 int leon_debug_irqout;
-static int dummy_master_l10_counter;
+static volatile u32 dummy_master_l10_counter;
 unsigned long amba_system_id;
 static DEFINE_SPINLOCK(leon_irq_lock);
 
+static unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
 unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
-unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
 unsigned int sparc_leon_eirq;
 #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
 #define LEON_IACK (&leon3_irqctrl_regs->iclear)
@@ -65,7 +65,7 @@ static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
 }
 
 /* The extended IRQ controller has been found, this function registers it */
-void leon_eirq_setup(unsigned int eirq)
+static void leon_eirq_setup(unsigned int eirq)
 {
        unsigned long mask, oldmask;
        unsigned int veirq;
@@ -270,7 +270,7 @@ static u32 leon_cycles_offset(void)
 #ifdef CONFIG_SMP
 
 /* smp clockevent irq */
-irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
+static irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
 {
        struct clock_event_device *ce;
        int cpu = smp_processor_id();
@@ -313,7 +313,7 @@ void __init leon_init_timers(void)
 
        leondebug_irq_disable = 0;
        leon_debug_irqout = 0;
-       master_l10_counter = (unsigned int *)&dummy_master_l10_counter;
+       master_l10_counter = (u32 __iomem *)&dummy_master_l10_counter;
        dummy_master_l10_counter = 0;
 
        rootnp = of_find_node_by_path("/ambapp0");
index e16c415..899b720 100644 (file)
@@ -98,82 +98,3 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 {
        return res->start;
 }
-
-/* in/out routines taken from pcic.c
- *
- * This probably belongs here rather than ioport.c because
- * we do not want this crud linked into SBus kernels.
- * Also, think for a moment about likes of floppy.c that
- * include architecture specific parts. They may want to redefine ins/outs.
- *
- * We do not use horrible macros here because we want to
- * advance pointer by sizeof(size).
- */
-void outsb(unsigned long addr, const void *src, unsigned long count)
-{
-       while (count) {
-               count -= 1;
-               outb(*(const char *)src, addr);
-               src += 1;
-               /* addr += 1; */
-       }
-}
-EXPORT_SYMBOL(outsb);
-
-void outsw(unsigned long addr, const void *src, unsigned long count)
-{
-       while (count) {
-               count -= 2;
-               outw(*(const short *)src, addr);
-               src += 2;
-               /* addr += 2; */
-       }
-}
-EXPORT_SYMBOL(outsw);
-
-void outsl(unsigned long addr, const void *src, unsigned long count)
-{
-       while (count) {
-               count -= 4;
-               outl(*(const long *)src, addr);
-               src += 4;
-               /* addr += 4; */
-       }
-}
-EXPORT_SYMBOL(outsl);
-
-void insb(unsigned long addr, void *dst, unsigned long count)
-{
-       while (count) {
-               count -= 1;
-               *(unsigned char *)dst = inb(addr);
-               dst += 1;
-               /* addr += 1; */
-       }
-}
-EXPORT_SYMBOL(insb);
-
-void insw(unsigned long addr, void *dst, unsigned long count)
-{
-       while (count) {
-               count -= 2;
-               *(unsigned short *)dst = inw(addr);
-               dst += 2;
-               /* addr += 2; */
-       }
-}
-EXPORT_SYMBOL(insw);
-
-void insl(unsigned long addr, void *dst, unsigned long count)
-{
-       while (count) {
-               count -= 4;
-               /*
-                * XXX I am sure we are in for an unaligned trap here.
-                */
-               *(unsigned long *)dst = inl(addr);
-               dst += 4;
-               /* addr += 4; */
-       }
-}
-EXPORT_SYMBOL(insl);
index 6df26e3..c8bf26e 100644 (file)
@@ -80,7 +80,7 @@ struct grpci1_regs {
 
 struct grpci1_priv {
        struct leon_pci_info    info; /* must be on top of this structure */
-       struct grpci1_regs      *regs;          /* GRPCI register map */
+       struct grpci1_regs __iomem *regs;               /* GRPCI register map */
        struct device           *dev;
        int                     pci_err_mask;   /* STATUS register error mask */
        int                     irq;            /* LEON irqctrl GRPCI IRQ */
@@ -101,7 +101,7 @@ static struct grpci1_priv *grpci1priv;
 static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus,
                                unsigned int devfn, int where, u32 val);
 
-int grpci1_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int grpci1_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        struct grpci1_priv *priv = dev->bus->sysdata;
        int irq_group;
@@ -144,7 +144,7 @@ static int grpci1_cfg_r32(struct grpci1_priv *priv, unsigned int bus,
                grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, tmp);
        } else {
                /* Bus always little endian (unaffected by byte-swapping) */
-               *val = flip_dword(tmp);
+               *val = swab32(tmp);
        }
 
        return 0;
@@ -197,7 +197,7 @@ static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus,
 
        pci_conf = (unsigned int *) (priv->pci_conf |
                                                (devfn << 8) | (where & 0xfc));
-       LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val));
+       LEON3_BYPASS_STORE_PA(pci_conf, swab32(val));
 
        return 0;
 }
@@ -417,10 +417,10 @@ out:
  *  BAR1: peripheral DMA to host's memory (size at least 256MByte)
  *  BAR2..BAR5: not implemented in hardware
  */
-void grpci1_hw_init(struct grpci1_priv *priv)
+static void grpci1_hw_init(struct grpci1_priv *priv)
 {
        u32 ahbadr, bar_sz, data, pciadr;
-       struct grpci1_regs *regs = priv->regs;
+       struct grpci1_regs __iomem *regs = priv->regs;
 
        /* set 1:1 mapping between AHB -> PCI memory space */
        REGSTORE(regs->cfg_stat, priv->pci_area & 0xf0000000);
@@ -509,7 +509,7 @@ static irqreturn_t grpci1_err_interrupt(int irq, void *arg)
 
 static int grpci1_of_probe(struct platform_device *ofdev)
 {
-       struct grpci1_regs *regs;
+       struct grpci1_regs __iomem *regs;
        struct grpci1_priv *priv;
        int err, len;
        const int *tmp;
@@ -690,7 +690,7 @@ err3:
 err2:
        release_resource(&priv->info.mem_space);
 err1:
-       iounmap((void *)priv->pci_io_va);
+       iounmap((void __iomem *)priv->pci_io_va);
        grpci1priv = NULL;
        return err;
 }
index 24d6a44..e433a4d 100644 (file)
@@ -191,7 +191,7 @@ struct grpci2_cap_first {
 
 struct grpci2_priv {
        struct leon_pci_info    info; /* must be on top of this structure */
-       struct grpci2_regs      *regs;
+       struct grpci2_regs __iomem *regs;
        char                    irq;
        char                    irq_mode; /* IRQ Mode from CAPSTS REG */
        char                    bt_enabled;
@@ -215,10 +215,10 @@ struct grpci2_priv {
        struct grpci2_barcfg    tgtbars[6];
 };
 
-DEFINE_SPINLOCK(grpci2_dev_lock);
-struct grpci2_priv *grpci2priv;
+static DEFINE_SPINLOCK(grpci2_dev_lock);
+static struct grpci2_priv *grpci2priv;
 
-int grpci2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int grpci2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        struct grpci2_priv *priv = dev->bus->sysdata;
        int irq_group;
@@ -270,7 +270,7 @@ static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
                *val = 0xffffffff;
        } else {
                /* Bus always little endian (unaffected by byte-swapping) */
-               *val = flip_dword(tmp);
+               *val = swab32(tmp);
        }
 
        return 0;
@@ -328,7 +328,7 @@ static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
 
        pci_conf = (unsigned int *) (priv->pci_conf |
                                                (devfn << 8) | (where & 0xfc));
-       LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val));
+       LEON3_BYPASS_STORE_PA(pci_conf, swab32(val));
 
        /* Wait until GRPCI2 signals that CFG access is done, it should be
         * done instantaneously unless a DMA operation is ongoing...
@@ -561,10 +561,10 @@ out:
        return virq;
 }
 
-void grpci2_hw_init(struct grpci2_priv *priv)
+static void grpci2_hw_init(struct grpci2_priv *priv)
 {
        u32 ahbadr, pciadr, bar_sz, capptr, io_map, data;
-       struct grpci2_regs *regs = priv->regs;
+       struct grpci2_regs __iomem *regs = priv->regs;
        int i;
        struct grpci2_barcfg *barcfg = priv->tgtbars;
 
@@ -655,7 +655,7 @@ static irqreturn_t grpci2_jump_interrupt(int irq, void *arg)
 static irqreturn_t grpci2_err_interrupt(int irq, void *arg)
 {
        struct grpci2_priv *priv = arg;
-       struct grpci2_regs *regs = priv->regs;
+       struct grpci2_regs __iomem *regs = priv->regs;
        unsigned int status;
 
        status = REGLOAD(regs->sts_cap);
@@ -682,7 +682,7 @@ static irqreturn_t grpci2_err_interrupt(int irq, void *arg)
 
 static int grpci2_of_probe(struct platform_device *ofdev)
 {
-       struct grpci2_regs *regs;
+       struct grpci2_regs __iomem *regs;
        struct grpci2_priv *priv;
        int err, i, len;
        const int *tmp;
@@ -878,7 +878,7 @@ err4:
        release_resource(&priv->info.mem_space);
 err3:
        err = -ENOMEM;
-       iounmap((void *)priv->pci_io_va);
+       iounmap((void __iomem *)priv->pci_io_va);
 err2:
        kfree(priv);
 err1:
index b0b3967..ddcf950 100644 (file)
 #include <asm/processor.h>
 
 /* List of Systems that need fixup instructions around power-down instruction */
-unsigned int pmc_leon_fixup_ids[] = {
+static unsigned int pmc_leon_fixup_ids[] = {
        AEROFLEX_UT699,
        GAISLER_GR712RC,
        LEON4_NEXTREME1,
        0
 };
 
-int pmc_leon_need_fixup(void)
+static int pmc_leon_need_fixup(void)
 {
        unsigned int systemid = amba_system_id >> 16;
        unsigned int *id;
@@ -38,7 +38,7 @@ int pmc_leon_need_fixup(void)
  * CPU idle callback function for systems that need some extra handling
  * See .../arch/sparc/kernel/process.c
  */
-void pmc_leon_idle_fixup(void)
+static void pmc_leon_idle_fixup(void)
 {
        /* Prepare an address to a non-cachable region. APB is always
         * none-cachable. One instruction is executed after the Sleep
@@ -62,7 +62,7 @@ void pmc_leon_idle_fixup(void)
  * CPU idle callback function
  * See .../arch/sparc/kernel/process.c
  */
-void pmc_leon_idle(void)
+static void pmc_leon_idle(void)
 {
        /* Interrupts need to be enabled to not hang the CPU */
        local_irq_enable();
index 6edf955..018ef11 100644 (file)
@@ -130,7 +130,7 @@ void leon_configure_cache_smp(void)
        local_ops->tlb_all();
 }
 
-void leon_smp_setbroadcast(unsigned int mask)
+static void leon_smp_setbroadcast(unsigned int mask)
 {
        int broadcast =
            ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >>
@@ -148,13 +148,6 @@ void leon_smp_setbroadcast(unsigned int mask)
        LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask);
 }
 
-unsigned int leon_smp_getbroadcast(void)
-{
-       unsigned int mask;
-       mask = LEON_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpbroadcast));
-       return mask;
-}
-
 int leon_smp_nrcpus(void)
 {
        int nrcpu =
@@ -266,10 +259,6 @@ void __init leon_smp_done(void)
 
 }
 
-void leon_irq_rotate(int cpu)
-{
-}
-
 struct leon_ipi_work {
        int single;
        int msk;
index 3241f56..de0ee39 100644 (file)
@@ -5,8 +5,10 @@
 #include <linux/mod_devicetable.h>
 #include <linux/errno.h>
 #include <linux/irq.h>
-#include <linux/of_device.h>
 #include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
 
 #include "of_device_common.h"
 
index 857ad77..539babf 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/apb.h>
 
 #include "pci_impl.h"
+#include "kernel.h"
 
 /* List of all PCI controllers found in the system. */
 struct pci_pbm_info *pci_pbm_root = NULL;
index 5f68853..75803c7 100644 (file)
@@ -48,8 +48,8 @@ struct sparc64_msiq_ops {
                              unsigned long devino);
 };
 
-extern void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
-                                const struct sparc64_msiq_ops *ops);
+void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
+                         const struct sparc64_msiq_ops *ops);
 
 struct sparc64_msiq_cookie {
        struct pci_pbm_info *pbm;
@@ -158,23 +158,23 @@ extern struct pci_pbm_info *pci_pbm_root;
 extern int pci_num_pbms;
 
 /* PCI bus scanning and fixup support. */
-extern void pci_get_pbm_props(struct pci_pbm_info *pbm);
-extern struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
-                                       struct device *parent);
-extern void pci_determine_mem_io_space(struct pci_pbm_info *pbm);
+void pci_get_pbm_props(struct pci_pbm_info *pbm);
+struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
+                                struct device *parent);
+void pci_determine_mem_io_space(struct pci_pbm_info *pbm);
 
 /* Error reporting support. */
-extern void pci_scan_for_target_abort(struct pci_pbm_info *, struct pci_bus *);
-extern void pci_scan_for_master_abort(struct pci_pbm_info *, struct pci_bus *);
-extern void pci_scan_for_parity_error(struct pci_pbm_info *, struct pci_bus *);
+void pci_scan_for_target_abort(struct pci_pbm_info *, struct pci_bus *);
+void pci_scan_for_master_abort(struct pci_pbm_info *, struct pci_bus *);
+void pci_scan_for_parity_error(struct pci_pbm_info *, struct pci_bus *);
 
 /* Configuration space access. */
-extern void pci_config_read8(u8 *addr, u8 *ret);
-extern void pci_config_read16(u16 *addr, u16 *ret);
-extern void pci_config_read32(u32 *addr, u32 *ret);
-extern void pci_config_write8(u8 *addr, u8 val);
-extern void pci_config_write16(u16 *addr, u16 val);
-extern void pci_config_write32(u32 *addr, u32 val);
+void pci_config_read8(u8 *addr, u8 *ret);
+void pci_config_read16(u16 *addr, u16 *ret);
+void pci_config_read32(u32 *addr, u32 *ret);
+void pci_config_write8(u8 *addr, u8 val);
+void pci_config_write16(u16 *addr, u16 val);
+void pci_config_write32(u32 *addr, u32 val);
 
 extern struct pci_ops sun4u_pci_ops;
 extern struct pci_ops sun4v_pci_ops;
index 8e9fc3a..5642212 100644 (file)
@@ -6,87 +6,87 @@
 #ifndef _PCI_SUN4V_H
 #define _PCI_SUN4V_H
 
-extern long pci_sun4v_iommu_map(unsigned long devhandle,
-                               unsigned long tsbid,
-                               unsigned long num_ttes,
-                               unsigned long io_attributes,
-                               unsigned long io_page_list_pa);
-extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle,
-                                          unsigned long tsbid,
-                                          unsigned long num_ttes);
-extern unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle,
-                                           unsigned long tsbid,
-                                           unsigned long *io_attributes,
-                                           unsigned long *real_address);
-extern unsigned long pci_sun4v_config_get(unsigned long devhandle,
-                                         unsigned long pci_device,
-                                         unsigned long config_offset,
-                                         unsigned long size);
-extern int pci_sun4v_config_put(unsigned long devhandle,
-                               unsigned long pci_device,
-                               unsigned long config_offset,
-                               unsigned long size,
-                               unsigned long data);
+long pci_sun4v_iommu_map(unsigned long devhandle,
+                        unsigned long tsbid,
+                        unsigned long num_ttes,
+                        unsigned long io_attributes,
+                        unsigned long io_page_list_pa);
+unsigned long pci_sun4v_iommu_demap(unsigned long devhandle,
+                                   unsigned long tsbid,
+                                   unsigned long num_ttes);
+unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle,
+                                    unsigned long tsbid,
+                                    unsigned long *io_attributes,
+                                    unsigned long *real_address);
+unsigned long pci_sun4v_config_get(unsigned long devhandle,
+                                  unsigned long pci_device,
+                                  unsigned long config_offset,
+                                  unsigned long size);
+int pci_sun4v_config_put(unsigned long devhandle,
+                        unsigned long pci_device,
+                        unsigned long config_offset,
+                        unsigned long size,
+                        unsigned long data);
 
-extern unsigned long pci_sun4v_msiq_conf(unsigned long devhandle,
+unsigned long pci_sun4v_msiq_conf(unsigned long devhandle,
                                         unsigned long msiqid,
                                         unsigned long msiq_paddr,
                                         unsigned long num_entries);
-extern unsigned long pci_sun4v_msiq_info(unsigned long devhandle,
-                                        unsigned long msiqid,
-                                        unsigned long *msiq_paddr,
-                                        unsigned long *num_entries);
-extern unsigned long pci_sun4v_msiq_getvalid(unsigned long devhandle,
-                                            unsigned long msiqid,
-                                            unsigned long *valid);
-extern unsigned long pci_sun4v_msiq_setvalid(unsigned long devhandle,
-                                            unsigned long msiqid,
-                                            unsigned long valid);
-extern unsigned long pci_sun4v_msiq_getstate(unsigned long devhandle,
-                                            unsigned long msiqid,
-                                            unsigned long *state);
-extern unsigned long pci_sun4v_msiq_setstate(unsigned long devhandle,
-                                            unsigned long msiqid,
-                                            unsigned long state);
-extern unsigned long pci_sun4v_msiq_gethead(unsigned long devhandle,
-                                            unsigned long msiqid,
-                                            unsigned long *head);
-extern unsigned long pci_sun4v_msiq_sethead(unsigned long devhandle,
-                                            unsigned long msiqid,
-                                            unsigned long head);
-extern unsigned long pci_sun4v_msiq_gettail(unsigned long devhandle,
-                                            unsigned long msiqid,
-                                            unsigned long *head);
-extern unsigned long pci_sun4v_msi_getvalid(unsigned long devhandle,
-                                           unsigned long msinum,
-                                           unsigned long *valid);
-extern unsigned long pci_sun4v_msi_setvalid(unsigned long devhandle,
-                                           unsigned long msinum,
-                                           unsigned long valid);
-extern unsigned long pci_sun4v_msi_getmsiq(unsigned long devhandle,
-                                          unsigned long msinum,
-                                          unsigned long *msiq);
-extern unsigned long pci_sun4v_msi_setmsiq(unsigned long devhandle,
-                                          unsigned long msinum,
-                                          unsigned long msiq,
-                                          unsigned long msitype);
-extern unsigned long pci_sun4v_msi_getstate(unsigned long devhandle,
-                                           unsigned long msinum,
-                                           unsigned long *state);
-extern unsigned long pci_sun4v_msi_setstate(unsigned long devhandle,
-                                           unsigned long msinum,
-                                           unsigned long state);
-extern unsigned long pci_sun4v_msg_getmsiq(unsigned long devhandle,
-                                          unsigned long msinum,
-                                          unsigned long *msiq);
-extern unsigned long pci_sun4v_msg_setmsiq(unsigned long devhandle,
-                                          unsigned long msinum,
-                                          unsigned long msiq);
-extern unsigned long pci_sun4v_msg_getvalid(unsigned long devhandle,
-                                           unsigned long msinum,
-                                           unsigned long *valid);
-extern unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
-                                           unsigned long msinum,
-                                           unsigned long valid);
+unsigned long pci_sun4v_msiq_info(unsigned long devhandle,
+                                 unsigned long msiqid,
+                                 unsigned long *msiq_paddr,
+                                 unsigned long *num_entries);
+unsigned long pci_sun4v_msiq_getvalid(unsigned long devhandle,
+                                     unsigned long msiqid,
+                                     unsigned long *valid);
+unsigned long pci_sun4v_msiq_setvalid(unsigned long devhandle,
+                                     unsigned long msiqid,
+                                     unsigned long valid);
+unsigned long pci_sun4v_msiq_getstate(unsigned long devhandle,
+                                     unsigned long msiqid,
+                                     unsigned long *state);
+unsigned long pci_sun4v_msiq_setstate(unsigned long devhandle,
+                                     unsigned long msiqid,
+                                     unsigned long state);
+unsigned long pci_sun4v_msiq_gethead(unsigned long devhandle,
+                                    unsigned long msiqid,
+                                    unsigned long *head);
+unsigned long pci_sun4v_msiq_sethead(unsigned long devhandle,
+                                    unsigned long msiqid,
+                                    unsigned long head);
+unsigned long pci_sun4v_msiq_gettail(unsigned long devhandle,
+                                     unsigned long msiqid,
+                                     unsigned long *head);
+unsigned long pci_sun4v_msi_getvalid(unsigned long devhandle,
+                                    unsigned long msinum,
+                                    unsigned long *valid);
+unsigned long pci_sun4v_msi_setvalid(unsigned long devhandle,
+                                    unsigned long msinum,
+                                    unsigned long valid);
+unsigned long pci_sun4v_msi_getmsiq(unsigned long devhandle,
+                                   unsigned long msinum,
+                                   unsigned long *msiq);
+unsigned long pci_sun4v_msi_setmsiq(unsigned long devhandle,
+                                   unsigned long msinum,
+                                   unsigned long msiq,
+                                   unsigned long msitype);
+unsigned long pci_sun4v_msi_getstate(unsigned long devhandle,
+                                    unsigned long msinum,
+                                    unsigned long *state);
+unsigned long pci_sun4v_msi_setstate(unsigned long devhandle,
+                                    unsigned long msinum,
+                                    unsigned long state);
+unsigned long pci_sun4v_msg_getmsiq(unsigned long devhandle,
+                                   unsigned long msinum,
+                                   unsigned long *msiq);
+unsigned long pci_sun4v_msg_setmsiq(unsigned long devhandle,
+                                   unsigned long msinum,
+                                   unsigned long msiq);
+unsigned long pci_sun4v_msg_getvalid(unsigned long devhandle,
+                                    unsigned long msinum,
+                                    unsigned long *valid);
+unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
+                                    unsigned long msinum,
+                                    unsigned long valid);
 
 #endif /* !(_PCI_SUN4V_H) */
index 09f4fdd..6cc78c2 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/uaccess.h>
 #include <asm/irq_regs.h>
 
+#include "kernel.h"
 #include "irq.h"
 
 /*
@@ -162,8 +163,8 @@ static int pcic0_up;
 static struct linux_pcic pcic0;
 
 void __iomem *pcic_regs;
-volatile int pcic_speculative;
-volatile int pcic_trapped;
+static volatile int pcic_speculative;
+static volatile int pcic_trapped;
 
 /* forward */
 unsigned int pcic_build_device_irq(struct platform_device *op,
@@ -329,7 +330,7 @@ int __init pcic_probe(void)
 
        pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr";
        if ((pcic->pcic_config_space_addr =
-           ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == 0) {
+           ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == NULL) {
                prom_printf("PCIC: Error, cannot map "
                            "PCI Configuration Space Address.\n");
                prom_halt();
@@ -341,7 +342,7 @@ int __init pcic_probe(void)
         */
        pcic->pcic_res_cfg_data.name = "pcic_cfg_data";
        if ((pcic->pcic_config_space_data =
-           ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == 0) {
+           ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == NULL) {
                prom_printf("PCIC: Error, cannot map "
                            "PCI Configuration Space Data.\n");
                prom_halt();
@@ -353,7 +354,6 @@ int __init pcic_probe(void)
        strcpy(pbm->prom_name, namebuf);
 
        {
-               extern volatile int t_nmi[4];
                extern int pcic_nmi_trap_patch[4];
 
                t_nmi[0] = pcic_nmi_trap_patch[0];
@@ -536,7 +536,7 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
                prom_getstring(node, "name", namebuf, sizeof(namebuf));
        }
 
-       if ((p = pcic->pcic_imap) == 0) {
+       if ((p = pcic->pcic_imap) == NULL) {
                dev->irq = 0;
                return;
        }
@@ -670,30 +670,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
        }
 }
 
-/*
- * pcic_pin_to_irq() is exported to bus probing code
- */
-unsigned int
-pcic_pin_to_irq(unsigned int pin, const char *name)
-{
-       struct linux_pcic *pcic = &pcic0;
-       unsigned int irq;
-       unsigned int ivec;
-
-       if (pin < 4) {
-               ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
-               irq = ivec >> (pin << 2) & 0xF;
-       } else if (pin < 8) {
-               ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
-               irq = ivec >> ((pin-4) << 2) & 0xF;
-       } else {                                        /* Corrupted map */
-               printk("PCIC: BAD PIN %d FOR %s\n", pin, name);
-               for (;;) {}     /* XXX Cannot panic properly in case of PROLL */
-       }
-/* P3 */ /* printk("PCIC: dev %s pin %d ivec 0x%x irq %x\n", name, pin, ivec, irq); */
-       return irq;
-}
-
 /* Makes compiler happy */
 static volatile int pcic_timer_dummy;
 
@@ -783,7 +759,7 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
 void pcic_nmi(unsigned int pend, struct pt_regs *regs)
 {
 
-       pend = flip_dword(pend);
+       pend = swab32(pend);
 
        if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) {
                /*
@@ -875,82 +851,4 @@ void __init sun4m_pci_init_IRQ(void)
        sparc_config.load_profile_irq = pcic_load_profile_irq;
 }
 
-/*
- * This probably belongs here rather than ioport.c because
- * we do not want this crud linked into SBus kernels.
- * Also, think for a moment about likes of floppy.c that
- * include architecture specific parts. They may want to redefine ins/outs.
- *
- * We do not use horrible macros here because we want to
- * advance pointer by sizeof(size).
- */
-void outsb(unsigned long addr, const void *src, unsigned long count)
-{
-       while (count) {
-               count -= 1;
-               outb(*(const char *)src, addr);
-               src += 1;
-               /* addr += 1; */
-       }
-}
-EXPORT_SYMBOL(outsb);
-
-void outsw(unsigned long addr, const void *src, unsigned long count)
-{
-       while (count) {
-               count -= 2;
-               outw(*(const short *)src, addr);
-               src += 2;
-               /* addr += 2; */
-       }
-}
-EXPORT_SYMBOL(outsw);
-
-void outsl(unsigned long addr, const void *src, unsigned long count)
-{
-       while (count) {
-               count -= 4;
-               outl(*(const long *)src, addr);
-               src += 4;
-               /* addr += 4; */
-       }
-}
-EXPORT_SYMBOL(outsl);
-
-void insb(unsigned long addr, void *dst, unsigned long count)
-{
-       while (count) {
-               count -= 1;
-               *(unsigned char *)dst = inb(addr);
-               dst += 1;
-               /* addr += 1; */
-       }
-}
-EXPORT_SYMBOL(insb);
-
-void insw(unsigned long addr, void *dst, unsigned long count)
-{
-       while (count) {
-               count -= 2;
-               *(unsigned short *)dst = inw(addr);
-               dst += 2;
-               /* addr += 2; */
-       }
-}
-EXPORT_SYMBOL(insw);
-
-void insl(unsigned long addr, void *dst, unsigned long count)
-{
-       while (count) {
-               count -= 4;
-               /*
-                * XXX I am sure we are in for an unaligned trap here.
-                */
-               *(unsigned long *)dst = inl(addr);
-               dst += 4;
-               /* addr += 4; */
-       }
-}
-EXPORT_SYMBOL(insl);
-
 subsys_initcall(pcic_init);
index b5c38fa..8efd337 100644 (file)
@@ -110,7 +110,7 @@ struct cpu_hw_events {
 
        unsigned int            group_flag;
 };
-DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
 
 /* An event map describes the characteristics of a performance
  * counter event.  In particular it gives the encoding as well as
@@ -1153,7 +1153,7 @@ static void perf_stop_nmi_watchdog(void *unused)
                cpuc->pcr[i] = pcr_ops->read_pcr(i);
 }
 
-void perf_event_grab_pmc(void)
+static void perf_event_grab_pmc(void)
 {
        if (atomic_inc_not_zero(&active_events))
                return;
@@ -1169,7 +1169,7 @@ void perf_event_grab_pmc(void)
        mutex_unlock(&pmc_grab_mutex);
 }
 
-void perf_event_release_pmc(void)
+static void perf_event_release_pmc(void)
 {
        if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
                if (atomic_read(&nmi_active) == 0)
@@ -1669,7 +1669,7 @@ static bool __init supported_pmu(void)
        return false;
 }
 
-int __init init_hw_perf_events(void)
+static int __init init_hw_perf_events(void)
 {
        pr_info("Performance events: ");
 
@@ -1742,10 +1742,11 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
 
        ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
        do {
-               struct sparc_stackf *usf, sf;
+               struct sparc_stackf __user *usf;
+               struct sparc_stackf sf;
                unsigned long pc;
 
-               usf = (struct sparc_stackf *) ufp;
+               usf = (struct sparc_stackf __user *)ufp;
                if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
                        break;
 
@@ -1765,17 +1766,19 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
                unsigned long pc;
 
                if (thread32_stack_is_64bit(ufp)) {
-                       struct sparc_stackf *usf, sf;
+                       struct sparc_stackf __user *usf;
+                       struct sparc_stackf sf;
 
                        ufp += STACK_BIAS;
-                       usf = (struct sparc_stackf *) ufp;
+                       usf = (struct sparc_stackf __user *)ufp;
                        if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
                                break;
                        pc = sf.callers_pc & 0xffffffff;
                        ufp = ((unsigned long) sf.fp) & 0xffffffff;
                } else {
-                       struct sparc_stackf32 *usf, sf;
-                       usf = (struct sparc_stackf32 *) ufp;
+                       struct sparc_stackf32 __user *usf;
+                       struct sparc_stackf32 sf;
+                       usf = (struct sparc_stackf32 __user *)ufp;
                        if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
                                break;
                        pc = sf.callers_pc;
index 510baec..50e7b62 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <stdarg.h>
 
+#include <linux/elfcore.h>
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/sched.h>
@@ -23,6 +24,7 @@
 #include <linux/delay.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
+#include <linux/cpu.h>
 
 #include <asm/auxio.h>
 #include <asm/oplib.h>
@@ -38,6 +40,8 @@
 #include <asm/unistd.h>
 #include <asm/setup.h>
 
+#include "kernel.h"
+
 /* 
  * Power management idle function 
  * Set in pm platform drivers (apc.c and pmc.c)
@@ -102,8 +106,12 @@ void machine_restart(char * cmd)
 void machine_power_off(void)
 {
        if (auxio_power_register &&
-           (strcmp(of_console_device->type, "serial") || scons_pwroff))
-               *auxio_power_register |= AUXIO_POWER_OFF;
+           (strcmp(of_console_device->type, "serial") || scons_pwroff)) {
+               u8 power_register = sbus_readb(auxio_power_register);
+               power_register |= AUXIO_POWER_OFF;
+               sbus_writeb(power_register, auxio_power_register);
+       }
+
        machine_halt();
 }
 
index d7b4967..027e099 100644 (file)
@@ -88,7 +88,7 @@ void arch_cpu_idle(void)
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead()
+void arch_cpu_idle_dead(void)
 {
        sched_preempt_enable_no_resched();
        cpu_play_dead();
@@ -239,7 +239,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
        }
 }
 
-void arch_trigger_all_cpu_backtrace(void)
+void arch_trigger_all_cpu_backtrace(bool include_self)
 {
        struct thread_info *tp = current_thread_info();
        struct pt_regs *regs = get_irq_regs();
@@ -251,16 +251,22 @@ void arch_trigger_all_cpu_backtrace(void)
 
        spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
 
-       memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
-
        this_cpu = raw_smp_processor_id();
 
-       __global_reg_self(tp, regs, this_cpu);
+       memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
+
+       if (include_self)
+               __global_reg_self(tp, regs, this_cpu);
 
        smp_fetch_global_regs();
 
        for_each_online_cpu(cpu) {
-               struct global_reg_snapshot *gp = &global_cpu_snapshot[cpu].reg;
+               struct global_reg_snapshot *gp;
+
+               if (!include_self && cpu == this_cpu)
+                       continue;
+
+               gp = &global_cpu_snapshot[cpu].reg;
 
                __global_reg_poll(gp);
 
@@ -292,7 +298,7 @@ void arch_trigger_all_cpu_backtrace(void)
 
 static void sysrq_handle_globreg(int key)
 {
-       arch_trigger_all_cpu_backtrace();
+       arch_trigger_all_cpu_backtrace(true);
 }
 
 static struct sysrq_key_op sparc_globalreg_op = {
index cf5fe1c..890281b 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/spinlock.h>
 #include <asm/prom.h>
 
-extern void of_console_init(void);
+void of_console_init(void);
 
 extern unsigned int prom_early_allocated;
 
index 9a690d3..20cc5d8 100644 (file)
  *      2 of the License, or (at your option) any later version.
  */
 
+#include <linux/memblock.h>
 #include <linux/kernel.h>
-#include <linux/types.h>
 #include <linux/string.h>
+#include <linux/types.h>
+#include <linux/cpu.h>
 #include <linux/mm.h>
-#include <linux/memblock.h>
 #include <linux/of.h>
 
 #include <asm/prom.h>
index 590b4ed..05a6e30 100644 (file)
@@ -30,19 +30,19 @@ enum psycho_error_type {
        UE_ERR, CE_ERR, PCI_ERR
 };
 
-extern void psycho_check_iommu_error(struct pci_pbm_info *pbm,
-                                    unsigned long afsr,
-                                    unsigned long afar,
-                                    enum psycho_error_type type);
+void psycho_check_iommu_error(struct pci_pbm_info *pbm,
+                             unsigned long afsr,
+                             unsigned long afar,
+                             enum psycho_error_type type);
 
-extern irqreturn_t psycho_pcierr_intr(int irq, void *dev_id);
+irqreturn_t psycho_pcierr_intr(int irq, void *dev_id);
 
-extern int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize,
-                            u32 dvma_offset, u32 dma_mask,
-                            unsigned long write_complete_offset);
+int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize,
+                     u32 dvma_offset, u32 dma_mask,
+                     unsigned long write_complete_offset);
 
-extern void psycho_pbm_init_common(struct pci_pbm_info *pbm,
-                                  struct platform_device *op,
-                                  const char *chip_name, int chip_type);
+void psycho_pbm_init_common(struct pci_pbm_info *pbm,
+                           struct platform_device *op,
+                           const char *chip_name, int chip_type);
 
 #endif /* _PSYCHO_COMMON_H */
index 896ba7c..a331fdc 100644 (file)
@@ -26,6 +26,8 @@
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
 
+#include "kernel.h"
+
 /* #define ALLOW_INIT_TRACING */
 
 /*
index 1434526..baef495 100644 (file)
@@ -267,7 +267,7 @@ static __init void leon_patch(void)
 }
 
 struct tt_entry *sparc_ttable;
-struct pt_regs fake_swapper_regs;
+static struct pt_regs fake_swapper_regs;
 
 /* Called from head_32.S - before we have setup anything
  * in the kernel. Be very careful with what you do here.
@@ -365,7 +365,7 @@ void __init setup_arch(char **cmdline_p)
 
        prom_setsync(prom_sync_me);
 
-       if((boot_flags&BOOTME_DEBUG) && (linux_dbvec!=0) && 
+       if((boot_flags & BOOTME_DEBUG) && (linux_dbvec != NULL) &&
           ((*(short *)linux_dbvec) != -1)) {
                printk("Booted under KADB. Syncing trap table.\n");
                (*(linux_dbvec->teach_debugger))();
index ee789d2..62deba7 100644 (file)
@@ -31,6 +31,7 @@
 #include <asm/switch_to.h>
 
 #include "sigutil.h"
+#include "kernel.h"
 
 /* This magic should be in g_upper[0] for all upper parts
  * to be valid.
@@ -145,7 +146,7 @@ void do_sigreturn32(struct pt_regs *regs)
        unsigned int psr;
        unsigned pc, npc;
        sigset_t set;
-       unsigned seta[_COMPAT_NSIG_WORDS];
+       compat_sigset_t seta;
        int err, i;
        
        /* Always make any pending restarted system calls return -EINTR */
@@ -209,17 +210,13 @@ void do_sigreturn32(struct pt_regs *regs)
                if (restore_rwin_state(compat_ptr(rwin_save)))
                        goto segv;
        }
-       err |= __get_user(seta[0], &sf->info.si_mask);
-       err |= copy_from_user(seta+1, &sf->extramask,
+       err |= __get_user(seta.sig[0], &sf->info.si_mask);
+       err |= copy_from_user(&seta.sig[1], &sf->extramask,
                              (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
        if (err)
                goto segv;
-       switch (_NSIG_WORDS) {
-               case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32);
-               case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
-               case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
-               case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
-       }
+
+       set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
        set_current_blocked(&set);
        return;
 
@@ -303,12 +300,7 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
                        goto segv;
        }
 
-       switch (_NSIG_WORDS) {
-               case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32);
-               case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32);
-               case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32);
-               case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
-       }
+       set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
        set_current_blocked(&set);
        return;
 segv:
@@ -417,7 +409,7 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs,
        void __user *tail;
        int sigframe_size;
        u32 psr;
-       unsigned int seta[_COMPAT_NSIG_WORDS];
+       compat_sigset_t seta;
 
        /* 1. Make sure everything is clean */
        synchronize_user_stack();
@@ -481,18 +473,14 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs,
                err |= __put_user(0, &sf->rwin_save);
        }
 
-       switch (_NSIG_WORDS) {
-       case 4: seta[7] = (oldset->sig[3] >> 32);
-               seta[6] = oldset->sig[3];
-       case 3: seta[5] = (oldset->sig[2] >> 32);
-               seta[4] = oldset->sig[2];
-       case 2: seta[3] = (oldset->sig[1] >> 32);
-               seta[2] = oldset->sig[1];
-       case 1: seta[1] = (oldset->sig[0] >> 32);
-               seta[0] = oldset->sig[0];
-       }
-       err |= __put_user(seta[0], &sf->info.si_mask);
-       err |= __copy_to_user(sf->extramask, seta + 1,
+       /* If these change we need to know - assignments to seta relies on these sizes */
+       BUILD_BUG_ON(_NSIG_WORDS != 1);
+       BUILD_BUG_ON(_COMPAT_NSIG_WORDS != 2);
+       seta.sig[1] = (oldset->sig[0] >> 32);
+       seta.sig[0] = oldset->sig[0];
+
+       err |= __put_user(seta.sig[0], &sf->info.si_mask);
+       err |= __copy_to_user(sf->extramask, &seta.sig[1],
                              (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
 
        if (!wsaved) {
@@ -622,16 +610,8 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs,
        /* Setup sigaltstack */
        err |= __compat_save_altstack(&sf->stack, regs->u_regs[UREG_FP]);
 
-       switch (_NSIG_WORDS) {
-       case 4: seta.sig[7] = (oldset->sig[3] >> 32);
-               seta.sig[6] = oldset->sig[3];
-       case 3: seta.sig[5] = (oldset->sig[2] >> 32);
-               seta.sig[4] = oldset->sig[2];
-       case 2: seta.sig[3] = (oldset->sig[1] >> 32);
-               seta.sig[2] = oldset->sig[1];
-       case 1: seta.sig[1] = (oldset->sig[0] >> 32);
-               seta.sig[0] = oldset->sig[0];
-       }
+       seta.sig[1] = (oldset->sig[0] >> 32);
+       seta.sig[0] = oldset->sig[0];
        err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t));
 
        if (!wsaved) {
index 7d5d8e1..9ee72fc 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/switch_to.h>
 
 #include "sigutil.h"
+#include "kernel.h"
 
 extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
                   void *fpqueue, unsigned long *fpqdepth);
@@ -341,7 +342,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
        err |= __put_user(0, &sf->extra_size);
 
        if (psr & PSR_EF) {
-               __siginfo_fpu_t *fp = tail;
+               __siginfo_fpu_t __user *fp = tail;
                tail += sizeof(*fp);
                err |= save_fpu_state(regs, fp);
                err |= __put_user(fp, &sf->fpu_save);
@@ -349,7 +350,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
                err |= __put_user(0, &sf->fpu_save);
        }
        if (wsaved) {
-               __siginfo_rwin_t *rwp = tail;
+               __siginfo_rwin_t __user *rwp = tail;
                tail += sizeof(*rwp);
                err |= save_rwin_state(wsaved, rwp);
                err |= __put_user(rwp, &sf->rwin_save);
@@ -517,9 +518,9 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
        }
 }
 
-asmlinkage int
-do_sys_sigstack(struct sigstack __user *ssptr, struct sigstack __user *ossptr,
-               unsigned long sp)
+asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr,
+                               struct sigstack __user *ossptr,
+                               unsigned long sp)
 {
        int ret = -EFAULT;
 
index cd91d01..1a69998 100644 (file)
 #include <asm/switch_to.h>
 #include <asm/cacheflush.h>
 
-#include "entry.h"
-#include "systbls.h"
 #include "sigutil.h"
+#include "systbls.h"
+#include "kernel.h"
+#include "entry.h"
 
 /* {set, get}context() needed for 64-bit SparcLinux userland. */
 asmlinkage void sparc64_set_context(struct pt_regs *regs)
@@ -492,7 +493,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
 
 #ifdef CONFIG_COMPAT
        if (test_thread_flag(TIF_32BIT)) {
-               extern void do_signal32(struct pt_regs *);
                do_signal32(regs);
                return;
        }
index a102bfb..7958242 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/seq_file.h>
 #include <linux/cache.h>
 #include <linux/delay.h>
+#include <linux/profile.h>
 #include <linux/cpu.h>
 
 #include <asm/ptrace.h>
@@ -75,8 +76,6 @@ void smp_store_cpu_info(int id)
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
-       extern void smp4m_smp_done(void);
-       extern void smp4d_smp_done(void);
        unsigned long bogosum = 0;
        int cpu, num = 0;
 
@@ -183,8 +182,6 @@ int setup_profiling_timer(unsigned int multiplier)
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-       extern void __init smp4m_boot_cpus(void);
-       extern void __init smp4d_boot_cpus(void);
        int i, cpuid, extra;
 
        printk("Entering SMP Mode...\n");
@@ -261,8 +258,6 @@ void __init smp_prepare_boot_cpu(void)
 
 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
-       extern int smp4m_boot_one_cpu(int, struct task_struct *);
-       extern int smp4d_boot_one_cpu(int, struct task_struct *);
        int ret=0;
 
        switch(sparc_cpu_model) {
@@ -297,7 +292,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
        return ret;
 }
 
-void arch_cpu_pre_starting(void *arg)
+static void arch_cpu_pre_starting(void *arg)
 {
        local_ops->cache_all();
        local_ops->tlb_all();
@@ -317,7 +312,7 @@ void arch_cpu_pre_starting(void *arg)
        }
 }
 
-void arch_cpu_pre_online(void *arg)
+static void arch_cpu_pre_online(void *arg)
 {
        unsigned int cpuid = hard_smp_processor_id();
 
@@ -344,7 +339,7 @@ void arch_cpu_pre_online(void *arg)
        }
 }
 
-void sparc_start_secondary(void *arg)
+static void sparc_start_secondary(void *arg)
 {
        unsigned int cpu;
 
index 745a363..41aa247 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/ftrace.h>
 #include <linux/cpu.h>
 #include <linux/slab.h>
+#include <linux/kgdb.h>
 
 #include <asm/head.h>
 #include <asm/ptrace.h>
@@ -35,6 +36,7 @@
 #include <asm/hvtramp.h>
 #include <asm/io.h>
 #include <asm/timer.h>
+#include <asm/setup.h>
 
 #include <asm/irq.h>
 #include <asm/irq_regs.h>
@@ -52,6 +54,7 @@
 #include <asm/pcr.h>
 
 #include "cpumap.h"
+#include "kernel.h"
 
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
@@ -272,14 +275,6 @@ static void smp_synchronize_one_tick(int cpu)
 }
 
 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
-/* XXX Put this in some common place. XXX */
-static unsigned long kimage_addr_to_ra(void *p)
-{
-       unsigned long val = (unsigned long) p;
-
-       return kern_base + (val - KERNBASE);
-}
-
 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
                                void **descrp)
 {
@@ -867,11 +862,6 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
 #endif
 extern unsigned long xcall_flush_dcache_page_spitfire;
 
-#ifdef CONFIG_DEBUG_DCFLUSH
-extern atomic_t dcpage_flushes;
-extern atomic_t dcpage_flushes_xcall;
-#endif
-
 static inline void __local_flush_dcache_page(struct page *page)
 {
 #ifdef DCACHE_ALIASING_POSSIBLE
index f8933be..a1bb267 100644 (file)
@@ -143,7 +143,7 @@ static void sun4d_sbus_handler_irq(int sbusl)
        }
 }
 
-void sun4d_handler_irq(int pil, struct pt_regs *regs)
+void sun4d_handler_irq(unsigned int pil, struct pt_regs *regs)
 {
        struct pt_regs *old_regs;
        /* SBUS IRQ level (1 - 7) */
@@ -236,7 +236,7 @@ static void sun4d_shutdown_irq(struct irq_data *data)
        irq_unlink(data->irq);
 }
 
-struct irq_chip sun4d_irq = {
+static struct irq_chip sun4d_irq = {
        .name           = "sun4d",
        .irq_startup    = sun4d_startup_irq,
        .irq_shutdown   = sun4d_shutdown_irq,
@@ -285,9 +285,9 @@ static void __init sun4d_load_profile_irqs(void)
        }
 }
 
-unsigned int _sun4d_build_device_irq(unsigned int real_irq,
-                                     unsigned int pil,
-                                     unsigned int board)
+static unsigned int _sun4d_build_device_irq(unsigned int real_irq,
+                                            unsigned int pil,
+                                            unsigned int board)
 {
        struct sun4d_handler_data *handler_data;
        unsigned int irq;
@@ -320,8 +320,8 @@ err_out:
 
 
 
-unsigned int sun4d_build_device_irq(struct platform_device *op,
-                                    unsigned int real_irq)
+static unsigned int sun4d_build_device_irq(struct platform_device *op,
+                                           unsigned int real_irq)
 {
        struct device_node *dp = op->dev.of_node;
        struct device_node *board_parent, *bus = dp->parent;
@@ -383,7 +383,8 @@ err_out:
        return irq;
 }
 
-unsigned int sun4d_build_timer_irq(unsigned int board, unsigned int real_irq)
+static unsigned int sun4d_build_timer_irq(unsigned int board,
+                                          unsigned int real_irq)
 {
        return _sun4d_build_device_irq(real_irq, real_irq, board);
 }
index d066eb1..f834224 100644 (file)
@@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1)
 SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
 SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
 SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
+SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)
 
        .globl          sys32_mmap2
 sys32_mmap2:
index 7136885..022c30c 100644 (file)
@@ -49,6 +49,8 @@
 #include <asm/mmu_context.h>
 #include <asm/compat_signal.h>
 
+#include "systbls.h"
+
 asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
 {
        if ((int)high < 0)
index 3a8d184..646988d 100644 (file)
@@ -24,6 +24,8 @@
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 
+#include "systbls.h"
+
 /* #define DEBUG_UNIMP_SYSCALL */
 
 /* XXX Make this per-binary type, this way we can detect the type of
@@ -68,7 +70,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  * sys_pipe() is the normal C calling standard for creating
  * a pipe. It's not the way unix traditionally does this, though.
  */
-asmlinkage int sparc_pipe(struct pt_regs *regs)
+asmlinkage long sparc_pipe(struct pt_regs *regs)
 {
        int fd[2];
        int error;
@@ -93,7 +95,7 @@ int sparc_mmap_check(unsigned long addr, unsigned long len)
 
 /* Linux version of mmap */
 
-asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
        unsigned long prot, unsigned long flags, unsigned long fd,
        unsigned long pgoff)
 {
@@ -103,7 +105,7 @@ asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
                              pgoff >> (PAGE_SHIFT - 12));
 }
 
-asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
+asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
        unsigned long prot, unsigned long flags, unsigned long fd,
        unsigned long off)
 {
@@ -197,7 +199,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
        return ret;
 }
 
-asmlinkage int sys_getdomainname(char __user *name, int len)
+asmlinkage long sys_getdomainname(char __user *name, int len)
 {
        int nlen, err;
        
index beb0b5a..c85403d 100644 (file)
@@ -31,6 +31,7 @@
 #include <asm/unistd.h>
 
 #include "entry.h"
+#include "kernel.h"
 #include "systbls.h"
 
 /* #define DEBUG_UNIMP_SYSCALL */
index 26e6dd7..2dab823 100644 (file)
 #ifndef _SYSTBLS_H
 #define _SYSTBLS_H
 
+#include <linux/signal.h>
 #include <linux/kernel.h>
+#include <linux/compat.h>
 #include <linux/types.h>
-#include <linux/signal.h>
+
 #include <asm/utrap.h>
 
-extern asmlinkage unsigned long sys_getpagesize(void);
-extern asmlinkage long sparc_pipe(struct pt_regs *regs);
-extern asmlinkage long sys_sparc_ipc(unsigned int call, int first,
-                              unsigned long second,
-                              unsigned long third,
-                              void __user *ptr, long fifth);
-extern asmlinkage long sparc64_personality(unsigned long personality);
-extern asmlinkage long sys64_munmap(unsigned long addr, size_t len);
-extern asmlinkage unsigned long sys64_mremap(unsigned long addr,
-                                            unsigned long old_len,
-                                            unsigned long new_len,
-                                            unsigned long flags,
-                                            unsigned long new_addr);
-extern asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs);
-extern asmlinkage long sys_getdomainname(char __user *name, int len);
-extern asmlinkage long sys_utrap_install(utrap_entry_t type,
-                                        utrap_handler_t new_p,
-                                        utrap_handler_t new_d,
-                                        utrap_handler_t __user *old_p,
-                                        utrap_handler_t __user *old_d);
-extern asmlinkage long sparc_memory_ordering(unsigned long model,
-                                            struct pt_regs *regs);
-extern asmlinkage long sys_rt_sigaction(int sig,
-                                       const struct sigaction __user *act,
-                                       struct sigaction __user *oact,
-                                       void __user *restorer,
-                                       size_t sigsetsize);
+asmlinkage unsigned long sys_getpagesize(void);
+asmlinkage long sparc_pipe(struct pt_regs *regs);
+asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs);
+asmlinkage long sys_getdomainname(char __user *name, int len);
+void do_rt_sigreturn(struct pt_regs *regs);
+asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
+                        unsigned long prot, unsigned long flags,
+                        unsigned long fd, unsigned long off);
+asmlinkage void sparc_breakpoint(struct pt_regs *regs);
+
+#ifdef CONFIG_SPARC32
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+                         unsigned long prot, unsigned long flags,
+                         unsigned long fd, unsigned long pgoff);
+long sparc_remap_file_pages(unsigned long start, unsigned long size,
+                           unsigned long prot, unsigned long pgoff,
+                           unsigned long flags);
 
-extern asmlinkage void sparc64_set_context(struct pt_regs *regs);
-extern asmlinkage void sparc64_get_context(struct pt_regs *regs);
-extern void do_rt_sigreturn(struct pt_regs *regs);
+#endif /* CONFIG_SPARC32 */
 
+#ifdef CONFIG_SPARC64
+asmlinkage long sys_sparc_ipc(unsigned int call, int first,
+                             unsigned long second,
+                             unsigned long third,
+                             void __user *ptr, long fifth);
+asmlinkage long sparc64_personality(unsigned long personality);
+asmlinkage long sys64_munmap(unsigned long addr, size_t len);
+asmlinkage unsigned long sys64_mremap(unsigned long addr,
+                                     unsigned long old_len,
+                                     unsigned long new_len,
+                                     unsigned long flags,
+                                     unsigned long new_addr);
+asmlinkage long sys_utrap_install(utrap_entry_t type,
+                                 utrap_handler_t new_p,
+                                 utrap_handler_t new_d,
+                                 utrap_handler_t __user *old_p,
+                                 utrap_handler_t __user *old_d);
+asmlinkage long sparc_memory_ordering(unsigned long model,
+                                     struct pt_regs *regs);
+asmlinkage void sparc64_set_context(struct pt_regs *regs);
+asmlinkage void sparc64_get_context(struct pt_regs *regs);
+asmlinkage long sys32_truncate64(const char __user * path,
+                                unsigned long high,
+                                unsigned long low);
+asmlinkage long sys32_ftruncate64(unsigned int fd,
+                                 unsigned long high,
+                                 unsigned long low);
+struct compat_stat64;
+asmlinkage long compat_sys_stat64(const char __user * filename,
+                                 struct compat_stat64 __user *statbuf);
+asmlinkage long compat_sys_lstat64(const char __user * filename,
+                                  struct compat_stat64 __user *statbuf);
+asmlinkage long compat_sys_fstat64(unsigned int fd,
+                                  struct compat_stat64 __user * statbuf);
+asmlinkage long compat_sys_fstatat64(unsigned int dfd,
+                                    const char __user *filename,
+                                    struct compat_stat64 __user * statbuf, int flag);
+asmlinkage compat_ssize_t sys32_pread64(unsigned int fd,
+                                       char __user *ubuf,
+                                       compat_size_t count,
+                                       unsigned long poshi,
+                                       unsigned long poslo);
+asmlinkage compat_ssize_t sys32_pwrite64(unsigned int fd,
+                                        char __user *ubuf,
+                                        compat_size_t count,
+                                        unsigned long poshi,
+                                        unsigned long poslo);
+asmlinkage long compat_sys_readahead(int fd,
+                                    unsigned long offhi,
+                                    unsigned long offlo,
+                                    compat_size_t count);
+long compat_sys_fadvise64(int fd,
+                         unsigned long offhi,
+                         unsigned long offlo,
+                         compat_size_t len, int advice);
+long compat_sys_fadvise64_64(int fd,
+                            unsigned long offhi, unsigned long offlo,
+                            unsigned long lenhi, unsigned long lenlo,
+                            int advice);
+long sys32_sync_file_range(unsigned int fd,
+                          unsigned long off_high, unsigned long off_low,
+                          unsigned long nb_high, unsigned long nb_low,
+                          unsigned int flags);
+asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
+                                    u32 lenhi, u32 lenlo);
+asmlinkage long compat_sys_fstat64(unsigned int fd,
+                                  struct compat_stat64 __user * statbuf);
+asmlinkage long compat_sys_fstatat64(unsigned int dfd,
+                                    const char __user *filename,
+                                    struct compat_stat64 __user * statbuf,
+                                    int flag);
+#endif /* CONFIG_SPARC64 */
 #endif /* _SYSTBLS_H */
index 151ace8..85fe9b1 100644 (file)
@@ -86,3 +86,4 @@ sys_call_table:
 /*330*/        .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
 /*335*/        .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
 /*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+/*345*/        .long sys_renameat2
index 4bd4e2b..33ecba2 100644 (file)
@@ -87,6 +87,7 @@ sys_call_table32:
 /*330*/        .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
        .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+       .word sys32_renameat2
 
 #endif /* CONFIG_COMPAT */
 
@@ -165,3 +166,4 @@ sys_call_table:
 /*330*/        .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
        .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+       .word sys_renameat2
diff --git a/arch/sparc/kernel/tadpole.c b/arch/sparc/kernel/tadpole.c
deleted file mode 100644 (file)
index 9aba8bd..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-/* tadpole.c: Probing for the tadpole clock stopping h/w at boot time.
- *
- * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
- */
-
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-
-#include <asm/asi.h>
-#include <asm/oplib.h>
-#include <asm/io.h>
-
-#define MACIO_SCSI_CSR_ADDR    0x78400000
-#define MACIO_EN_DMA           0x00000200
-#define CLOCK_INIT_DONE                1
-
-static int clk_state;
-static volatile unsigned char *clk_ctrl;
-void (*cpu_pwr_save)(void);
-
-static inline unsigned int ldphys(unsigned int addr)
-{
-       unsigned long data;
-    
-       __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" : 
-                            "=r" (data) :
-                            "r" (addr), "i" (ASI_M_BYPASS));
-       return data;
-}
-
-static void clk_init(void)
-{
-       __asm__ __volatile__("mov 0x6c, %%g1\n\t"
-                            "mov 0x4c, %%g2\n\t"
-                            "mov 0xdf, %%g3\n\t"
-                            "stb %%g1, [%0+3]\n\t"
-                            "stb %%g2, [%0+3]\n\t"
-                            "stb %%g3, [%0+3]\n\t" : :
-                            "r" (clk_ctrl) :
-                            "g1", "g2", "g3");
-}
-
-static void clk_slow(void)
-{
-       __asm__ __volatile__("mov 0xcc, %%g2\n\t"
-                            "mov 0x4c, %%g3\n\t"
-                            "mov 0xcf, %%g4\n\t"
-                            "mov 0xdf, %%g5\n\t"
-                            "stb %%g2, [%0+3]\n\t"
-                            "stb %%g3, [%0+3]\n\t"
-                            "stb %%g4, [%0+3]\n\t"
-                            "stb %%g5, [%0+3]\n\t" : :
-                            "r" (clk_ctrl) :
-                            "g2", "g3", "g4", "g5");
-}
-
-/*
- * Tadpole is guaranteed to be UP, using local_irq_save.
- */
-static void tsu_clockstop(void)
-{
-       unsigned int mcsr;
-       unsigned long flags;
-
-       if (!clk_ctrl)
-               return;
-       if (!(clk_state & CLOCK_INIT_DONE)) {
-               local_irq_save(flags);
-               clk_init();
-               clk_state |= CLOCK_INIT_DONE;       /* all done */
-               local_irq_restore(flags);
-               return;
-       }
-       if (!(clk_ctrl[2] & 1))
-               return;               /* no speed up yet */
-
-       local_irq_save(flags);
-
-       /* if SCSI DMA in progress, don't slow clock */
-       mcsr = ldphys(MACIO_SCSI_CSR_ADDR);
-       if ((mcsr&MACIO_EN_DMA) != 0) {
-               local_irq_restore(flags);
-               return;
-       }
-       /* TODO... the minimum clock setting ought to increase the
-        * memory refresh interval..
-        */
-       clk_slow();
-       local_irq_restore(flags);
-}
-
-static void swift_clockstop(void)
-{
-       if (!clk_ctrl)
-               return;
-       clk_ctrl[0] = 0;
-}
-
-void __init clock_stop_probe(void)
-{
-       phandle node, clk_nd;
-       char name[20];
-    
-       prom_getstring(prom_root_node, "name", name, sizeof(name));
-       if (strncmp(name, "Tadpole", 7))
-               return;
-       node = prom_getchild(prom_root_node);
-       node = prom_searchsiblings(node, "obio");
-       node = prom_getchild(node);
-       clk_nd = prom_searchsiblings(node, "clk-ctrl");
-       if (!clk_nd)
-               return;
-       printk("Clock Stopping h/w detected... ");
-       clk_ctrl = (char *) prom_getint(clk_nd, "address");
-       clk_state = 0;
-       if (name[10] == '\0') {
-               cpu_pwr_save = tsu_clockstop;
-               printk("enabled (S3)\n");
-       } else if ((name[10] == 'X') || (name[10] == 'G')) {
-               cpu_pwr_save = swift_clockstop;
-               printk("enabled (%s)\n",name+7);
-       } else
-               printk("disabled %s\n",name+7);
-}
index c4c27b0..5923d1e 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 
+#include <asm/mc146818rtc.h>
 #include <asm/oplib.h>
 #include <asm/timex.h>
 #include <asm/timer.h>
@@ -47,6 +48,7 @@
 #include <asm/irq_regs.h>
 #include <asm/setup.h>
 
+#include "kernel.h"
 #include "irq.h"
 
 static __cacheline_aligned_in_smp DEFINE_SEQLOCK(timer_cs_lock);
@@ -83,7 +85,7 @@ unsigned long profile_pc(struct pt_regs *regs)
 
 EXPORT_SYMBOL(profile_pc);
 
-__volatile__ unsigned int *master_l10_counter;
+volatile u32 __iomem *master_l10_counter;
 
 int update_persistent_clock(struct timespec now)
 {
@@ -143,9 +145,9 @@ static __init void setup_timer_ce(void)
 
 static unsigned int sbus_cycles_offset(void)
 {
-       unsigned int val, offset;
+       u32 val, offset;
 
-       val = *master_l10_counter;
+       val = sbus_readl(master_l10_counter);
        offset = (val >> TIMER_VALUE_SHIFT) & TIMER_VALUE_MASK;
 
        /* Limit hit? */
index 6629829..6fd386c 100644 (file)
@@ -44,7 +44,7 @@ static void instruction_dump(unsigned long *pc)
 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
 
-void die_if_kernel(char *str, struct pt_regs *regs)
+void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
 {
        static int die_counter;
        int count = 0;
@@ -219,8 +219,6 @@ static unsigned long fake_fsr;
 static unsigned long fake_queue[32] __attribute__ ((aligned (8)));
 static unsigned long fake_depth;
 
-extern int do_mathemu(struct pt_regs *, struct task_struct *);
-
 void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
                 unsigned long psr)
 {
index 4ced92f..fb6640e 100644 (file)
 #include <asm/prom.h>
 #include <asm/memctrl.h>
 #include <asm/cacheflush.h>
+#include <asm/setup.h>
 
 #include "entry.h"
+#include "kernel.h"
 #include "kstack.h"
 
 /* When an irrecoverable trap occurs at tl > 0, the trap entry
@@ -2209,8 +2211,6 @@ out:
        exception_exit(prev_state);
 }
 
-extern int do_mathemu(struct pt_regs *, struct fpustate *, bool);
-
 void do_fpother(struct pt_regs *regs)
 {
        enum ctx_state prev_state = exception_enter();
@@ -2383,7 +2383,7 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
        return (struct reg_window *) (fp + STACK_BIAS);
 }
 
-void die_if_kernel(char *str, struct pt_regs *regs)
+void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
 {
        static int die_counter;
        int count = 0;
@@ -2433,9 +2433,6 @@ EXPORT_SYMBOL(die_if_kernel);
 #define VIS_OPCODE_MASK        ((0x3 << 30) | (0x3f << 19))
 #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
 
-extern int handle_popc(u32 insn, struct pt_regs *regs);
-extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
-
 void do_illegal_instruction(struct pt_regs *regs)
 {
        enum ctx_state prev_state = exception_enter();
@@ -2486,8 +2483,6 @@ out:
        exception_exit(prev_state);
 }
 
-extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
-
 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
 {
        enum ctx_state prev_state = exception_enter();
index c0ec897..c5c61b3 100644 (file)
 #include <linux/smp.h>
 #include <linux/perf_event.h>
 
+#include <asm/setup.h>
+
+#include "kernel.h"
+
 enum direction {
        load,    /* ld, ldd, ldh, ldsh */
        store,   /* st, std, sth, stsh */
index 35ab8b6..62098a8 100644 (file)
 #include <linux/context_tracking.h>
 #include <asm/fpumacro.h>
 #include <asm/cacheflush.h>
+#include <asm/setup.h>
 
 #include "entry.h"
+#include "kernel.h"
 
 enum direction {
        load,    /* ld, ldd, ldh, ldsh */
index 3107381..87bab0a 100644 (file)
 #include <linux/mm.h>
 #include <linux/smp.h>
 
+#include <asm/cacheflush.h>
 #include <asm/uaccess.h>
 
+#include "kernel.h"
+
 /* Do save's until all user register windows are out of the cpu. */
 void flush_user_windows(void)
 {
index dbe119b..3269b02 100644 (file)
@@ -41,7 +41,7 @@ lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
 lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
 lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
 
-obj-y                 += iomap.o
+obj-$(CONFIG_SPARC64) += iomap.o
 obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
 obj-y                 += ksyms.o
 obj-$(CONFIG_SPARC64) += PeeCeeI.o
index 3ad6cbd..0b0ed4d 100644 (file)
@@ -24,10 +24,7 @@ mcount:
 #ifdef CONFIG_DYNAMIC_FTRACE
        /* Do nothing, the retl/nop below is all we need.  */
 #else
-       sethi           %hi(function_trace_stop), %g1
-       lduw            [%g1 + %lo(function_trace_stop)], %g2
-       brnz,pn         %g2, 2f
-        sethi          %hi(ftrace_trace_function), %g1
+       sethi           %hi(ftrace_trace_function), %g1
        sethi           %hi(ftrace_stub), %g2
        ldx             [%g1 + %lo(ftrace_trace_function)], %g1
        or              %g2, %lo(ftrace_stub), %g2
@@ -80,11 +77,8 @@ ftrace_stub:
        .globl          ftrace_caller
        .type           ftrace_caller,#function
 ftrace_caller:
-       sethi           %hi(function_trace_stop), %g1
        mov             %i7, %g2
-       lduw            [%g1 + %lo(function_trace_stop)], %g1
-       brnz,pn         %g1, ftrace_stub
-        mov            %fp, %g3
+       mov             %fp, %g3
        save            %sp, -176, %sp
        mov             %g2, %o1
        mov             %g2, %l0
index d1b2aff..bb587d5 100644 (file)
@@ -4,20 +4,20 @@
 #include <asm/byteorder.h>
 
 #define add_ssaaaa(sh, sl, ah, al, bh, bl)                             \
-  __asm__ ("addcc %r4,%5,%1\n\t"                                               \
+  __asm__ ("addcc %r4,%5,%1\n\t"                                       \
           "addx %r2,%3,%0\n"                                           \
-          : "=r" ((USItype)(sh)),                                      \
-            "=&r" ((USItype)(sl))                                      \
+          : "=r" (sh),                                                 \
+            "=&r" (sl)                                                 \
           : "%rJ" ((USItype)(ah)),                                     \
             "rI" ((USItype)(bh)),                                      \
             "%rJ" ((USItype)(al)),                                     \
             "rI" ((USItype)(bl))                                       \
           : "cc")
 #define sub_ddmmss(sh, sl, ah, al, bh, bl)                             \
-  __asm__ ("subcc %r4,%5,%1\n\t"                                               \
+  __asm__ ("subcc %r4,%5,%1\n\t"                                       \
           "subx %r2,%3,%0\n"                                           \
-          : "=r" ((USItype)(sh)),                                      \
-            "=&r" ((USItype)(sl))                                      \
+          : "=r" (sh),                                                 \
+            "=&r" (sl)                                                 \
           : "rJ" ((USItype)(ah)),                                      \
             "rI" ((USItype)(bh)),                                      \
             "rJ" ((USItype)(al)),                                      \
@@ -65,8 +65,8 @@
        "mulscc %%g1,0,%%g1\n\t"                                        \
        "add    %%g1,%%g2,%0\n\t"                                       \
        "rd     %%y,%1\n"                                               \
-          : "=r" ((USItype)(w1)),                                      \
-            "=r" ((USItype)(w0))                                       \
+          : "=r" (w1),                                                 \
+            "=r" (w0)                                                  \
           : "%rI" ((USItype)(u)),                                      \
             "r" ((USItype)(v))                                         \
           : "%g1", "%g2", "cc")
@@ -98,8 +98,8 @@
           "sub %1,%2,%1\n\t"                                           \
           "3:  xnor    %0,0,%0\n\t"                                    \
           "! End of inline udiv_qrnnd\n"                               \
-          : "=&r" ((USItype)(q)),                                      \
-            "=&r" ((USItype)(r))                                       \
+          : "=&r" (q),                                                 \
+            "=&r" (r)                                                  \
           : "r" ((USItype)(d)),                                        \
             "1" ((USItype)(n1)),                                       \
             "0" ((USItype)(n0)) : "%g1", "cc")
index 425d3cf..51320a8 100644 (file)
@@ -17,8 +17,8 @@
           "bcs,a,pn %%xcc, 1f\n\t"             \
           "add %0, 1, %0\n"                    \
           "1:"                                 \
-          : "=r" ((UDItype)(sh)),              \
-            "=&r" ((UDItype)(sl))              \
+          : "=r" (sh),                         \
+            "=&r" (sl)                         \
           : "r" ((UDItype)(ah)),               \
             "r" ((UDItype)(bh)),               \
             "r" ((UDItype)(al)),               \
@@ -31,8 +31,8 @@
           "bcs,a,pn %%xcc, 1f\n\t"             \
           "sub %0, 1, %0\n"                    \
           "1:"                                 \
-          : "=r" ((UDItype)(sh)),              \
-            "=&r" ((UDItype)(sl))              \
+          : "=r" (sh),                         \
+            "=&r" (sl)                         \
           : "r" ((UDItype)(ah)),               \
             "r" ((UDItype)(bh)),               \
             "r" ((UDItype)(al)),               \
@@ -64,8 +64,8 @@
                   "sllx %3,32,%3\n\t"                  \
                   "add %1,%3,%1\n\t"                   \
                   "add %5,%2,%0"                       \
-          : "=r" ((UDItype)(wh)),                      \
-            "=&r" ((UDItype)(wl)),                     \
+          : "=r" (wh),                                 \
+            "=&r" (wl),                                \
             "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4) \
           : "r" ((UDItype)(u)),                        \
             "r" ((UDItype)(v))                         \
index 59dbd46..908e8c1 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/openprom.h>
 #include <asm/oplib.h>
+#include <asm/setup.h>
 #include <asm/smp.h>
 #include <asm/traps.h>
 #include <asm/uaccess.h>
 
-int show_unhandled_signals = 1;
+#include "mm_32.h"
 
-static void unhandled_fault(unsigned long, struct task_struct *,
-               struct pt_regs *) __attribute__ ((noreturn));
+int show_unhandled_signals = 1;
 
 static void __noreturn unhandled_fault(unsigned long address,
                                       struct task_struct *tsk,
@@ -141,9 +141,6 @@ static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
        force_sig_info (sig, &info, current);
 }
 
-extern unsigned long safe_compute_effective_address(struct pt_regs *,
-                                                   unsigned int);
-
 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
 {
        unsigned int insn;
index 4ced3fc..587cd05 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/lsu.h>
 #include <asm/sections.h>
 #include <asm/mmu_context.h>
+#include <asm/setup.h>
 
 int show_unhandled_signals = 1;
 
@@ -196,9 +197,6 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
        force_sig_info(sig, &info, current);
 }
 
-extern int handle_ldf_stq(u32, struct pt_regs *);
-extern int handle_ld_nf(u32, struct pt_regs *);
-
 static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
 {
        if (!insn) {
index db69870..eb82871 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/vaddrs.h>
 #include <asm/pgalloc.h>       /* bug in asm-generic/tlb.h: check_pgt_cache */
+#include <asm/setup.h>
 #include <asm/tlb.h>
 #include <asm/prom.h>
 #include <asm/leon.h>
 
+#include "mm_32.h"
+
 unsigned long *sparc_valid_addr_bitmap;
 EXPORT_SYMBOL(sparc_valid_addr_bitmap);
 
@@ -63,7 +66,6 @@ void show_mem(unsigned int filter)
 }
 
 
-extern unsigned long cmdline_memory_size;
 unsigned long last_valid_pfn;
 
 unsigned long calc_highpages(void)
@@ -246,9 +248,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
  * init routine based upon the Sun model type on the Sparc.
  *
  */
-extern void srmmu_paging_init(void);
-extern void device_scan(void);
-
 void __init paging_init(void)
 {
        srmmu_paging_init();
index ed3c969..16b58ff 100644 (file)
@@ -47,6 +47,7 @@
 #include <asm/prom.h>
 #include <asm/mdesc.h>
 #include <asm/cpudata.h>
+#include <asm/setup.h>
 #include <asm/irq.h>
 
 #include "init_64.h"
@@ -794,11 +795,11 @@ struct node_mem_mask {
 static struct node_mem_mask node_masks[MAX_NUMNODES];
 static int num_node_masks;
 
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+
 int numa_cpu_lookup_table[NR_CPUS];
 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
 
-#ifdef CONFIG_NEED_MULTIPLE_NODES
-
 struct mdesc_mblock {
        u64     base;
        u64     size;
@@ -887,17 +888,21 @@ static void __init allocate_node_data(int nid)
 
 static void init_node_masks_nonnuma(void)
 {
+#ifdef CONFIG_NEED_MULTIPLE_NODES
        int i;
+#endif
 
        numadbg("Initializing tables for non-numa.\n");
 
        node_masks[0].mask = node_masks[0].val = 0;
        num_node_masks = 1;
 
+#ifdef CONFIG_NEED_MULTIPLE_NODES
        for (i = 0; i < NR_CPUS; i++)
                numa_cpu_lookup_table[i] = 0;
 
        cpumask_setall(&numa_cpumask_lookup_table[0]);
+#endif
 }
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
index 5d3782d..0668b36 100644 (file)
@@ -21,7 +21,7 @@ extern unsigned int sparc64_highest_unlocked_tlb_ent;
 extern unsigned long sparc64_kern_pri_context;
 extern unsigned long sparc64_kern_pri_nuc_bits;
 extern unsigned long sparc64_kern_sec_context;
-extern void mmu_info(struct seq_file *m);
+void mmu_info(struct seq_file *m);
 
 struct linux_prom_translation {
        unsigned long virt;
@@ -36,7 +36,7 @@ extern unsigned int prom_trans_ents;
 /* Exported for SMP bootup purposes. */
 extern unsigned long kern_locked_tte_data;
 
-extern void prom_world(int enter);
+void prom_world(int enter);
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 #define VMEMMAP_CHUNK_SHIFT    22
index eb99862..f311bf2 100644 (file)
@@ -25,6 +25,8 @@
 #include <asm/dma.h>
 #include <asm/oplib.h>
 
+#include "mm_32.h"
+
 /* #define IOUNIT_DEBUG */
 #ifdef IOUNIT_DEBUG
 #define IOD(x) printk(x)
@@ -38,7 +40,8 @@
 static void __init iounit_iommu_init(struct platform_device *op)
 {
        struct iounit_struct *iounit;
-       iopte_t *xpt, *xptend;
+       iopte_t __iomem *xpt;
+       iopte_t __iomem *xptend;
 
        iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
        if (!iounit) {
@@ -62,10 +65,10 @@ static void __init iounit_iommu_init(struct platform_device *op)
        op->dev.archdata.iommu = iounit;
        iounit->page_table = xpt;
        spin_lock_init(&iounit->lock);
-       
-       for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
-            xpt < xptend;)
-               iopte_val(*xpt++) = 0;
+
+       xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
+       for (; xpt < xptend; xpt++)
+               sbus_writel(0, xpt);
 }
 
 static int __init iounit_init(void)
@@ -130,7 +133,7 @@ nexti:      scan = find_next_zero_bit(iounit->bmap, limit, scan);
        vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
        for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
                set_bit(scan, iounit->bmap);
-               iounit->page_table[scan] = iopte;
+               sbus_writel(iopte, &iounit->page_table[scan]);
        }
        IOD(("%08lx\n", vaddr));
        return vaddr;
@@ -202,7 +205,7 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon
        struct iounit_struct *iounit = dev->archdata.iommu;
        unsigned long page, end;
        pgprot_t dvma_prot;
-       iopte_t *iopte;
+       iopte_t __iomem *iopte;
 
        *pba = addr;
 
@@ -224,8 +227,8 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon
                        
                        i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
 
-                       iopte = (iopte_t *)(iounit->page_table + i);
-                       *iopte = MKIOPTE(__pa(page));
+                       iopte = iounit->page_table + i;
+                       sbus_writel(MKIOPTE(__pa(page)), iopte);
                }
                addr += PAGE_SIZE;
                va += PAGE_SIZE;
index 28f96f2..491511d 100644 (file)
@@ -27,6 +27,8 @@
 #include <asm/iommu.h>
 #include <asm/dma.h>
 
+#include "mm_32.h"
+
 /*
  * This can be sized dynamically, but we will do this
  * only when we have a guidance about actual I/O pressures.
@@ -37,9 +39,6 @@
 #define IOMMU_NPTES    (IOMMU_WINSIZE/PAGE_SIZE)       /* 64K PTEs, 256KB */
 #define IOMMU_ORDER    6                               /* 4096 * (1<<6) */
 
-/* srmmu.c */
-extern int viking_mxcc_present;
-extern int flush_page_for_dma_global;
 static int viking_flush;
 /* viking.S */
 extern void viking_flush_page(unsigned long page);
@@ -59,6 +58,8 @@ static void __init sbus_iommu_init(struct platform_device *op)
        struct iommu_struct *iommu;
        unsigned int impl, vers;
        unsigned long *bitmap;
+       unsigned long control;
+       unsigned long base;
        unsigned long tmp;
 
        iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
@@ -73,12 +74,14 @@ static void __init sbus_iommu_init(struct platform_device *op)
                prom_printf("Cannot map IOMMU registers\n");
                prom_halt();
        }
-       impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
-       vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
-       tmp = iommu->regs->control;
-       tmp &= ~(IOMMU_CTRL_RNGE);
-       tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
-       iommu->regs->control = tmp;
+
+       control = sbus_readl(&iommu->regs->control);
+       impl = (control & IOMMU_CTRL_IMPL) >> 28;
+       vers = (control & IOMMU_CTRL_VERS) >> 24;
+       control &= ~(IOMMU_CTRL_RNGE);
+       control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
+       sbus_writel(control, &iommu->regs->control);
+
        iommu_invalidate(iommu->regs);
        iommu->start = IOMMU_START;
        iommu->end = 0xffffffff;
@@ -100,7 +103,9 @@ static void __init sbus_iommu_init(struct platform_device *op)
        memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
        flush_cache_all();
        flush_tlb_all();
-       iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
+
+       base = __pa((unsigned long)iommu->page_table) >> 4;
+       sbus_writel(base, &iommu->regs->base);
        iommu_invalidate(iommu->regs);
 
        bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
index 5bed085..3b17b6f 100644 (file)
 #include <asm/leon.h>
 #include <asm/tlbflush.h>
 
-#include "srmmu.h"
+#include "mm_32.h"
 
 int leon_flush_during_switch = 1;
-int srmmu_swprobe_trace;
+static int srmmu_swprobe_trace;
 
 static inline unsigned long leon_get_ctable_ptr(void)
 {
diff --git a/arch/sparc/mm/mm_32.h b/arch/sparc/mm/mm_32.h
new file mode 100644 (file)
index 0000000..a6c27ca
--- /dev/null
@@ -0,0 +1,24 @@
+/* fault_32.c - visible as they are called from assembler */
+asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
+                            unsigned long address);
+asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+                               unsigned long address);
+
+void window_overflow_fault(void);
+void window_underflow_fault(unsigned long sp);
+void window_ret_fault(struct pt_regs *regs);
+
+/* srmmu.c */
+extern char *srmmu_name;
+extern int viking_mxcc_present;
+extern int flush_page_for_dma_global;
+
+extern void (*poke_srmmu)(void);
+
+void __init srmmu_paging_init(void);
+
+/* iommu.c */
+void ld_mmu_iommu(void);
+
+/* io-unit.c */
+void ld_mmu_iounit(void);
index cfbe53c..be65f03 100644 (file)
@@ -49,7 +49,7 @@
 #include <asm/mxcc.h>
 #include <asm/ross.h>
 
-#include "srmmu.h"
+#include "mm_32.h"
 
 enum mbus_module srmmu_modtype;
 static unsigned int hwbug_bitmask;
@@ -100,7 +100,6 @@ static unsigned long srmmu_nocache_end;
 #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
 
 void *srmmu_nocache_pool;
-void *srmmu_nocache_bitmap;
 static struct bit_map srmmu_nocache_map;
 
 static inline int srmmu_pmd_none(pmd_t pmd)
@@ -173,7 +172,7 @@ static void *__srmmu_get_nocache(int size, int align)
                printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
                       size, (int) srmmu_nocache_size,
                       srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
-               return 0;
+               return NULL;
        }
 
        addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
@@ -269,6 +268,7 @@ static void __init srmmu_nocache_calcsize(void)
 
 static void __init srmmu_nocache_init(void)
 {
+       void *srmmu_nocache_bitmap;
        unsigned int bitmap_bits;
        pgd_t *pgd;
        pmd_t *pmd;
@@ -728,7 +728,7 @@ static inline unsigned long srmmu_probe(unsigned long vaddr)
                                     "=r" (retval) :
                                     "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
        } else {
-               retval = leon_swprobe(vaddr, 0);
+               retval = leon_swprobe(vaddr, NULL);
        }
        return retval;
 }
@@ -865,8 +865,6 @@ static void __init map_kernel(void)
 
 void (*poke_srmmu)(void) = NULL;
 
-extern unsigned long bootmem_init(unsigned long *pages_avail);
-
 void __init srmmu_paging_init(void)
 {
        int i;
@@ -1771,9 +1769,6 @@ static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
 /* Load up routines and constants for sun4m and sun4d mmu */
 void __init load_mmu(void)
 {
-       extern void ld_mmu_iommu(void);
-       extern void ld_mmu_iounit(void);
-
        /* Functions */
        get_srmmu_type();
 
diff --git a/arch/sparc/mm/srmmu.h b/arch/sparc/mm/srmmu.h
deleted file mode 100644 (file)
index 5703274..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-/* srmmu.c */
-extern char *srmmu_name;
-
-extern void (*poke_srmmu)(void);
index fe19b81..a065766 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/mmu_context.h>
+#include <asm/setup.h>
 #include <asm/tsb.h>
 #include <asm/tlb.h>
 #include <asm/oplib.h>
index f178b9d..53a696d 100644 (file)
@@ -81,11 +81,6 @@ void prom_feval(const char *fstring)
 }
 EXPORT_SYMBOL(prom_feval);
 
-#ifdef CONFIG_SMP
-extern void smp_capture(void);
-extern void smp_release(void);
-#endif
-
 /* Drop into the prom, with the chance to continue with the 'go'
  * prom command.
  */
index 4f3006b..7fcd492 100644 (file)
@@ -128,7 +128,6 @@ config TILEGX
        select SPARSE_IRQ
        select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
        select HAVE_FUNCTION_TRACER
-       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_DYNAMIC_FTRACE
        select HAVE_FTRACE_MCOUNT_RECORD
index 4232363..dd4f9f1 100644 (file)
@@ -266,6 +266,8 @@ static inline void cpu_relax(void)
        barrier();
 }
 
+#define cpu_relax_lowlatency() cpu_relax()
+
 /* Info on this processor (see fs/proc/cpuinfo.c) */
 struct seq_operations;
 extern const struct seq_operations cpuinfo_op;
index 70d7bb0..3c2b8d5 100644 (file)
@@ -77,15 +77,6 @@ STD_ENDPROC(__mcount)
 
        .align  64
 STD_ENTRY(ftrace_caller)
-       moveli  r11, hw2_last(function_trace_stop)
-       { shl16insli    r11, r11, hw1(function_trace_stop); move r12, lr }
-       { shl16insli    r11, r11, hw0(function_trace_stop); move lr, r10 }
-       ld      r11, r11
-       beqz    r11, 1f
-       jrp     r12
-
-1:
-       { move  r10, lr; move   lr, r12 }
        MCOUNT_SAVE_REGS
 
        /* arg1: self return address */
@@ -119,15 +110,6 @@ STD_ENDPROC(ftrace_caller)
 
        .align  64
 STD_ENTRY(__mcount)
-       moveli  r11, hw2_last(function_trace_stop)
-       { shl16insli    r11, r11, hw1(function_trace_stop); move r12, lr }
-       { shl16insli    r11, r11, hw0(function_trace_stop); move lr, r10 }
-       ld      r11, r11
-       beqz    r11, 1f
-       jrp     r12
-
-1:
-       { move  r10, lr; move   lr, r12 }
        {
         moveli r11, hw2_last(ftrace_trace_function)
         moveli r13, hw2_last(ftrace_stub)
index 9472079..f1b3eb1 100644 (file)
@@ -12,6 +12,7 @@
 #include <mem_user.h>
 #include <os.h>
 #include <skas.h>
+#include <kern_util.h>
 
 struct host_vm_change {
        struct host_vm_op {
@@ -124,6 +125,9 @@ static int add_munmap(unsigned long addr, unsigned long len,
        struct host_vm_op *last;
        int ret = 0;
 
+       if ((addr >= STUB_START) && (addr < STUB_END))
+               return -EINVAL;
+
        if (hvc->index != 0) {
                last = &hvc->ops[hvc->index - 1];
                if ((last->type == MUNMAP) &&
@@ -283,8 +287,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
        /* This is not an else because ret is modified above */
        if (ret) {
                printk(KERN_ERR "fix_range_common: failed, killing current "
-                      "process\n");
+                      "process: %d\n", task_tgid_vnr(current));
+               /* We are under mmap_sem, release it such that current can terminate */
+               up_write(&current->mm->mmap_sem);
                force_sig(SIGKILL, current);
+               do_signal();
        }
 }
 
index 974b874..5678c35 100644 (file)
@@ -206,7 +206,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
        int is_write = FAULT_WRITE(fi);
        unsigned long address = FAULT_ADDRESS(fi);
 
-       if (regs)
+       if (!is_user && regs)
                current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
 
        if (!is_user && (address >= start_vm) && (address < end_vm)) {
index d531879..908579f 100644 (file)
@@ -54,7 +54,7 @@ static int ptrace_dump_regs(int pid)
 
 void wait_stub_done(int pid)
 {
-       int n, status, err, bad_stop = 0;
+       int n, status, err;
 
        while (1) {
                CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
@@ -74,8 +74,6 @@ void wait_stub_done(int pid)
 
        if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
                return;
-       else
-               bad_stop = 1;
 
 bad_wait:
        err = ptrace_dump_regs(pid);
@@ -85,10 +83,7 @@ bad_wait:
        printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
               "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
               status);
-       if (bad_stop)
-               kill(pid, SIGKILL);
-       else
-               fatal_sigsegv();
+       fatal_sigsegv();
 }
 
 extern unsigned long current_stub_stack(void);
index aafad6f..928237a 100644 (file)
@@ -51,9 +51,6 @@ config ARCH_HAS_ILOG2_U32
 config ARCH_HAS_ILOG2_U64
        bool
 
-config ARCH_HAS_CPUFREQ
-       bool
-
 config GENERIC_HWEIGHT
        def_bool y
 
@@ -87,7 +84,6 @@ config ARCH_PUV3
        select GENERIC_CLOCKEVENTS
        select HAVE_CLK
        select ARCH_REQUIRE_GPIOLIB
-       select ARCH_HAS_CPUFREQ
 
 # CONFIGs for ARCH_PUV3
 
@@ -198,9 +194,7 @@ menu "Power management options"
 
 source "kernel/power/Kconfig"
 
-if ARCH_HAS_CPUFREQ
 source "drivers/cpufreq/Kconfig"
-endif
 
 config ARCH_SUSPEND_POSSIBLE
        def_bool y if !ARCH_FPGA
index 39decb6..cb1d8fd 100644 (file)
@@ -39,10 +39,37 @@ extern void __uc32_iounmap(volatile void __iomem *addr);
 #define ioremap_nocache(cookie, size)  __uc32_ioremap(cookie, size)
 #define iounmap(cookie)                        __uc32_iounmap(cookie)
 
+#define readb_relaxed readb
+#define readw_relaxed readw
+#define readl_relaxed readl
+
 #define HAVE_ARCH_PIO_SIZE
 #define PIO_OFFSET             (unsigned int)(PCI_IOBASE)
 #define PIO_MASK               (unsigned int)(IO_SPACE_LIMIT)
 #define PIO_RESERVED           (PIO_OFFSET + PIO_MASK + 1)
 
+#ifdef CONFIG_STRICT_DEVMEM
+
+#include <linux/ioport.h>
+#include <linux/mm.h>
+
+/*
+ * devmem_is_allowed() checks to see if /dev/mem access to a certain
+ * address is valid. The argument is a physical page number.
+ * We mimic x86 here by disallowing access to system RAM as well as
+ * device-exclusive MMIO regions. This effectively disable read()/write()
+ * on /dev/mem.
+ */
+static inline int devmem_is_allowed(unsigned long pfn)
+{
+       if (iomem_is_exclusive(pfn << PAGE_SHIFT))
+               return 0;
+       if (!page_is_ram(pfn))
+               return 1;
+       return 0;
+}
+
+#endif /* CONFIG_STRICT_DEVMEM */
+
 #endif /* __KERNEL__ */
 #endif /* __UNICORE_IO_H__ */
index 233c258..ed6f7d0 100644 (file)
@@ -87,16 +87,16 @@ extern pgprot_t pgprot_kernel;
 
 #define PAGE_NONE              pgprot_user
 #define PAGE_SHARED            __pgprot(pgprot_val(pgprot_user | PTE_READ \
-                                                               | PTE_WRITE)
+                                                               | PTE_WRITE))
 #define PAGE_SHARED_EXEC       __pgprot(pgprot_val(pgprot_user | PTE_READ \
                                                                | PTE_WRITE \
-                                                               | PTE_EXEC)
+                                                               | PTE_EXEC))
 #define PAGE_COPY              __pgprot(pgprot_val(pgprot_user | PTE_READ)
 #define PAGE_COPY_EXEC         __pgprot(pgprot_val(pgprot_user | PTE_READ \
-                                                               | PTE_EXEC)
-#define PAGE_READONLY          __pgprot(pgprot_val(pgprot_user | PTE_READ)
+                                                               | PTE_EXEC))
+#define PAGE_READONLY          __pgprot(pgprot_val(pgprot_user | PTE_READ))
 #define PAGE_READONLY_EXEC     __pgprot(pgprot_val(pgprot_user | PTE_READ \
-                                                               | PTE_EXEC)
+                                                               | PTE_EXEC))
 #define PAGE_KERNEL            pgprot_kernel
 #define PAGE_KERNEL_EXEC       __pgprot(pgprot_val(pgprot_kernel | PTE_EXEC))
 
index 4eaa421..8d21b7a 100644 (file)
@@ -71,6 +71,7 @@ extern void release_thread(struct task_struct *);
 unsigned long get_wchan(struct task_struct *p);
 
 #define cpu_relax()                    barrier()
+#define cpu_relax_lowlatency()                cpu_relax()
 
 #define task_pt_regs(p) \
        ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
index 9df53d9..02bf5a4 100644 (file)
@@ -55,6 +55,7 @@ static inline int valid_user_regs(struct pt_regs *regs)
 
 #define instruction_pointer(regs)      ((regs)->UCreg_pc)
 #define user_stack_pointer(regs)       ((regs)->UCreg_sp)
+#define profile_pc(regs)               instruction_pointer(regs)
 
 #endif /* __ASSEMBLY__ */
 #endif
index 18d4563..b1ca775 100644 (file)
@@ -179,7 +179,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
        }
 #ifdef CONFIG_CPU_FREQ
        if (clk == &clk_mclk_clk) {
-               u32 pll_rate, divstatus = PM_DIVSTATUS;
+               u32 pll_rate, divstatus = readl(PM_DIVSTATUS);
                int ret, i;
 
                /* lookup mclk_clk_table */
@@ -201,10 +201,10 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
                                / (((divstatus & 0x0000f000) >> 12) + 1);
 
                /* set pll sys cfg reg. */
-               PM_PLLSYSCFG = pll_rate;
+               writel(pll_rate, PM_PLLSYSCFG);
 
-               PM_PMCR = PM_PMCR_CFBSYS;
-               while ((PM_PLLDFCDONE & PM_PLLDFCDONE_SYSDFC)
+               writel(PM_PMCR_CFBSYS, PM_PMCR);
+               while ((readl(PM_PLLDFCDONE) & PM_PLLDFCDONE_SYSDFC)
                                != PM_PLLDFCDONE_SYSDFC)
                        udelay(100);
                        /* about 1ms */
index d285d71..0323528 100644 (file)
 
 #include "ksyms.h"
 
+EXPORT_SYMBOL(find_first_bit);
+EXPORT_SYMBOL(find_first_zero_bit);
 EXPORT_SYMBOL(find_next_zero_bit);
 EXPORT_SYMBOL(find_next_bit);
 
-EXPORT_SYMBOL(__backtrace);
-
        /* platform dependent support */
 EXPORT_SYMBOL(__udelay);
 EXPORT_SYMBOL(__const_udelay);
 
-       /* networking */
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(__csum_ipv6_magic);
-
-       /* io */
-#ifndef __raw_readsb
-EXPORT_SYMBOL(__raw_readsb);
-#endif
-#ifndef __raw_readsw
-EXPORT_SYMBOL(__raw_readsw);
-#endif
-#ifndef __raw_readsl
-EXPORT_SYMBOL(__raw_readsl);
-#endif
-#ifndef __raw_writesb
-EXPORT_SYMBOL(__raw_writesb);
-#endif
-#ifndef __raw_writesw
-EXPORT_SYMBOL(__raw_writesw);
-#endif
-#ifndef __raw_writesl
-EXPORT_SYMBOL(__raw_writesl);
-#endif
-
        /* string / mem functions */
 EXPORT_SYMBOL(strchr);
 EXPORT_SYMBOL(strrchr);
@@ -76,23 +50,12 @@ EXPORT_SYMBOL(__copy_from_user);
 EXPORT_SYMBOL(__copy_to_user);
 EXPORT_SYMBOL(__clear_user);
 
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-
-EXPORT_SYMBOL(__put_user_1);
-EXPORT_SYMBOL(__put_user_2);
-EXPORT_SYMBOL(__put_user_4);
-EXPORT_SYMBOL(__put_user_8);
-
 EXPORT_SYMBOL(__ashldi3);
 EXPORT_SYMBOL(__ashrdi3);
 EXPORT_SYMBOL(__divsi3);
 EXPORT_SYMBOL(__lshrdi3);
 EXPORT_SYMBOL(__modsi3);
-EXPORT_SYMBOL(__muldi3);
 EXPORT_SYMBOL(__ucmpdi2);
 EXPORT_SYMBOL(__udivsi3);
 EXPORT_SYMBOL(__umodsi3);
-EXPORT_SYMBOL(__bswapsi2);
 
index 185cdc7..31472ad 100644 (file)
@@ -8,8 +8,6 @@ extern void __ashrdi3(void);
 extern void __divsi3(void);
 extern void __lshrdi3(void);
 extern void __modsi3(void);
-extern void __muldi3(void);
 extern void __ucmpdi2(void);
 extern void __udivsi3(void);
 extern void __umodsi3(void);
-extern void __bswapsi2(void);
index 16bd149..dc41f6d 100644 (file)
 
 void *module_alloc(unsigned long size)
 {
-       struct vm_struct *area;
-
-       size = PAGE_ALIGN(size);
-       area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
-       if (!area)
-               return NULL;
-
-       return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
+       return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+                               GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+                               __builtin_return_address(0));
 }
 
 int
index 778ebba..b008e99 100644 (file)
@@ -60,6 +60,7 @@ void machine_halt(void)
  * Function pointers to optional machine specific functions
  */
 void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
 
 void machine_power_off(void)
 {
index 87adbf5..3fa317f 100644 (file)
@@ -53,6 +53,10 @@ struct stack {
 
 static struct stack stacks[NR_CPUS];
 
+#ifdef CONFIG_VGA_CONSOLE
+struct screen_info screen_info;
+#endif
+
 char elf_platform[ELF_PLATFORM_SIZE];
 EXPORT_SYMBOL(elf_platform);
 
index de7dc5f..24e8360 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/sched.h>
 #include <linux/uaccess.h>
 
+#include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/unaligned.h>
 
index f30071e..21c00fc 100644 (file)
@@ -19,5 +19,7 @@
 EXPORT_SYMBOL(cpu_dcache_clean_area);
 EXPORT_SYMBOL(cpu_set_pte);
 
+EXPORT_SYMBOL(__cpuc_coherent_kern_range);
+
 EXPORT_SYMBOL(__cpuc_dma_flush_range);
 EXPORT_SYMBOL(__cpuc_dma_clean_range);
index fcefdda..2840c27 100644 (file)
@@ -54,7 +54,6 @@ config X86
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_GRAPH_FP_TEST
-       select HAVE_FUNCTION_TRACE_MCOUNT_TEST
        select HAVE_SYSCALL_TRACEPOINTS
        select SYSCTL_EXCEPTION_TRACE
        select HAVE_KVM
@@ -131,6 +130,7 @@ config X86
        select HAVE_CC_STACKPROTECTOR
        select GENERIC_CPU_AUTOPROBE
        select HAVE_ARCH_AUDITSYSCALL
+       select ARCH_SUPPORTS_ATOMIC_RMW
 
 config INSTRUCTION_DECODER
        def_bool y
@@ -1672,7 +1672,6 @@ config RELOCATABLE
 config RANDOMIZE_BASE
        bool "Randomize the address of the kernel image"
        depends on RELOCATABLE
-       depends on !HIBERNATION
        default n
        ---help---
           Randomizes the physical and virtual address at which the
index 33f71b0..c65fd96 100644 (file)
@@ -15,12 +15,9 @@ endif
 # that way we can complain to the user if the CPU is insufficient.
 #
 # The -m16 option is supported by GCC >= 4.9 and clang >= 3.5. For
-# older versions of GCC, we need to play evil and unreliable tricks to
-# attempt to ensure that our asm(".code16gcc") is first in the asm
-# output.
-CODE16GCC_CFLAGS := -m32 -include $(srctree)/arch/x86/boot/code16gcc.h \
-                   $(call cc-option, -fno-toplevel-reorder,\
-                     $(call cc-option, -fno-unit-at-a-time))
+# older versions of GCC, include an *assembly* header to make sure that
+# gcc doesn't play any games behind our back.
+CODE16GCC_CFLAGS := -m32 -Wa,$(srctree)/arch/x86/boot/code16gcc.h
 M16_CFLAGS      := $(call cc-option, -m16, $(CODE16GCC_CFLAGS))
 
 REALMODE_CFLAGS        := $(M16_CFLAGS) -g -Os -D__KERNEL__ \
index d93e480..5ff4265 100644 (file)
@@ -1,15 +1,11 @@
-/*
- * code16gcc.h
- *
- * This file is -include'd when compiling 16-bit C code.
- * Note: this asm() needs to be emitted before gcc emits any code.
- * Depending on gcc version, this requires -fno-unit-at-a-time or
- * -fno-toplevel-reorder.
- *
- * Hopefully gcc will eventually have a real -m16 option so we can
- * drop this hack long term.
- */
+#
+# code16gcc.h
+#
+# This file is added to the assembler via -Wa when compiling 16-bit C code.
+# This is done this way instead via asm() to make sure gcc does not reorder
+# things around us.
+#
+# gcc 4.9+ has a real -m16 option so we can drop this hack long term.
+#
 
-#ifndef __ASSEMBLY__
-asm(".code16gcc");
-#endif
+       .code16gcc
index 4dbf967..fc6091a 100644 (file)
@@ -289,10 +289,17 @@ unsigned char *choose_kernel_location(unsigned char *input,
        unsigned long choice = (unsigned long)output;
        unsigned long random;
 
+#ifdef CONFIG_HIBERNATION
+       if (!cmdline_find_option_bool("kaslr")) {
+               debug_putstr("KASLR disabled by default...\n");
+               goto out;
+       }
+#else
        if (cmdline_find_option_bool("nokaslr")) {
-               debug_putstr("KASLR disabled...\n");
+               debug_putstr("KASLR disabled by cmdline...\n");
                goto out;
        }
+#endif
 
        /* Record the various known unsafe memory ranges. */
        mem_avoid_init((unsigned long)input, input_size,
index 84c2234..7a6d43a 100644 (file)
@@ -91,10 +91,9 @@ bs_die:
 
        .section ".bsdata", "a"
 bugger_off_msg:
-       .ascii  "Direct floppy boot is not supported. "
-       .ascii  "Use a boot loader program instead.\r\n"
+       .ascii  "Use a boot loader.\r\n"
        .ascii  "\n"
-       .ascii  "Remove disk and press any key to reboot ...\r\n"
+       .ascii  "Remove disk and press any key to reboot...\r\n"
        .byte   0
 
 #ifdef CONFIG_EFI_STUB
@@ -108,7 +107,7 @@ coff_header:
 #else
        .word   0x8664                          # x86-64
 #endif
-       .word   3                               # nr_sections
+       .word   4                               # nr_sections
        .long   0                               # TimeDateStamp
        .long   0                               # PointerToSymbolTable
        .long   1                               # NumberOfSymbols
@@ -250,6 +249,25 @@ section_table:
        .word   0                               # NumberOfLineNumbers
        .long   0x60500020                      # Characteristics (section flags)
 
+       #
+       # The offset & size fields are filled in by build.c.
+       #
+       .ascii  ".bss"
+       .byte   0
+       .byte   0
+       .byte   0
+       .byte   0
+       .long   0
+       .long   0x0
+       .long   0                               # Size of initialized data
+                                               # on disk
+       .long   0x0
+       .long   0                               # PointerToRelocations
+       .long   0                               # PointerToLineNumbers
+       .word   0                               # NumberOfRelocations
+       .word   0                               # NumberOfLineNumbers
+       .long   0xc8000080                      # Characteristics (section flags)
+
 #endif /* CONFIG_EFI_STUB */
 
        # Kernel attributes; used by setup.  This is part 1 of the
index 1a2f212..a7661c4 100644 (file)
@@ -143,7 +143,7 @@ static void usage(void)
 
 #ifdef CONFIG_EFI_STUB
 
-static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
 {
        unsigned int pe_header;
        unsigned short num_sections;
@@ -164,10 +164,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
                        put_unaligned_le32(size, section + 0x8);
 
                        /* section header vma field */
-                       put_unaligned_le32(offset, section + 0xc);
+                       put_unaligned_le32(vma, section + 0xc);
 
                        /* section header 'size of initialised data' field */
-                       put_unaligned_le32(size, section + 0x10);
+                       put_unaligned_le32(datasz, section + 0x10);
 
                        /* section header 'file offset' field */
                        put_unaligned_le32(offset, section + 0x14);
@@ -179,6 +179,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
        }
 }
 
+static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+{
+       update_pecoff_section_header_fields(section_name, offset, size, size, offset);
+}
+
 static void update_pecoff_setup_and_reloc(unsigned int size)
 {
        u32 setup_offset = 0x200;
@@ -203,9 +208,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
 
        pe_header = get_unaligned_le32(&buf[0x3c]);
 
-       /* Size of image */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
-
        /*
         * Size of code: Subtract the size of the first sector (512 bytes)
         * which includes the header.
@@ -220,6 +222,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
        update_pecoff_section_header(".text", text_start, text_sz);
 }
 
+static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
+{
+       unsigned int pe_header;
+       unsigned int bss_sz = init_sz - file_sz;
+
+       pe_header = get_unaligned_le32(&buf[0x3c]);
+
+       /* Size of uninitialized data */
+       put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
+
+       /* Size of image */
+       put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
+
+       update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
+}
+
 static int reserve_pecoff_reloc_section(int c)
 {
        /* Reserve 0x20 bytes for .reloc section */
@@ -259,6 +277,8 @@ static void efi_stub_entry_update(void)
 static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
 static inline void update_pecoff_text(unsigned int text_start,
                                      unsigned int file_sz) {}
+static inline void update_pecoff_bss(unsigned int file_sz,
+                                    unsigned int init_sz) {}
 static inline void efi_stub_defaults(void) {}
 static inline void efi_stub_entry_update(void) {}
 
@@ -310,7 +330,7 @@ static void parse_zoffset(char *fname)
 
 int main(int argc, char ** argv)
 {
-       unsigned int i, sz, setup_sectors;
+       unsigned int i, sz, setup_sectors, init_sz;
        int c;
        u32 sys_size;
        struct stat sb;
@@ -376,7 +396,9 @@ int main(int argc, char ** argv)
        buf[0x1f1] = setup_sectors-1;
        put_unaligned_le32(sys_size, &buf[0x1f4]);
 
-       update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
+       update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
+       init_sz = get_unaligned_le32(&buf[0x260]);
+       update_pecoff_bss(i + (sys_size * 16), init_sz);
 
        efi_stub_entry_update();
 
index 61d6e28..d551165 100644 (file)
@@ -14,6 +14,7 @@ obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o
 obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o
 
 obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o
+obj-$(CONFIG_CRYPTO_DES3_EDE_X86_64) += des3_ede-x86_64.o
 obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o
 obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o
 obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o
@@ -52,6 +53,7 @@ salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o
 serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o
 
 aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o
+des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o
 camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o
 blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o
 twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o
@@ -76,7 +78,7 @@ ifeq ($(avx2_supported),yes)
 endif
 
 aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o
-aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o
+aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o
 ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o
 sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o
 ifeq ($(avx2_supported),yes)
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
new file mode 100644 (file)
index 0000000..f091f12
--- /dev/null
@@ -0,0 +1,546 @@
+/*
+ *     Implement AES CTR mode by8 optimization with AVX instructions. (x86_64)
+ *
+ * This is AES128/192/256 CTR mode optimization implementation. It requires
+ * the support of Intel(R) AESNI and AVX instructions.
+ *
+ * This work was inspired by the AES CTR mode optimization published
+ * in Intel Optimized IPSEC Cryptograhpic library.
+ * Additional information on it can be found at:
+ *    http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972
+ *
+ * This file is provided under a dual BSD/GPLv2 license.  When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * Contact Information:
+ * James Guilford <james.guilford@intel.com>
+ * Sean Gulley <sean.m.gulley@intel.com>
+ * Chandramouli Narayanan <mouli@linux.intel.com>
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/inst.h>
+
+#define CONCAT(a,b)    a##b
+#define VMOVDQ         vmovdqu
+
+#define xdata0         %xmm0
+#define xdata1         %xmm1
+#define xdata2         %xmm2
+#define xdata3         %xmm3
+#define xdata4         %xmm4
+#define xdata5         %xmm5
+#define xdata6         %xmm6
+#define xdata7         %xmm7
+#define xcounter       %xmm8
+#define xbyteswap      %xmm9
+#define xkey0          %xmm10
+#define xkey3          %xmm11
+#define xkey6          %xmm12
+#define xkey9          %xmm13
+#define xkey4          %xmm11
+#define xkey8          %xmm12
+#define xkey12         %xmm13
+#define xkeyA          %xmm14
+#define xkeyB          %xmm15
+
+#define p_in           %rdi
+#define p_iv           %rsi
+#define p_keys         %rdx
+#define p_out          %rcx
+#define num_bytes      %r8
+
+#define tmp            %r10
+#define        DDQ(i)          CONCAT(ddq_add_,i)
+#define        XMM(i)          CONCAT(%xmm, i)
+#define        DDQ_DATA        0
+#define        XDATA           1
+#define KEY_128                1
+#define KEY_192                2
+#define KEY_256                3
+
+.section .rodata
+.align 16
+
+byteswap_const:
+       .octa 0x000102030405060708090A0B0C0D0E0F
+ddq_add_1:
+       .octa 0x00000000000000000000000000000001
+ddq_add_2:
+       .octa 0x00000000000000000000000000000002
+ddq_add_3:
+       .octa 0x00000000000000000000000000000003
+ddq_add_4:
+       .octa 0x00000000000000000000000000000004
+ddq_add_5:
+       .octa 0x00000000000000000000000000000005
+ddq_add_6:
+       .octa 0x00000000000000000000000000000006
+ddq_add_7:
+       .octa 0x00000000000000000000000000000007
+ddq_add_8:
+       .octa 0x00000000000000000000000000000008
+
+.text
+
+/* generate a unique variable for ddq_add_x */
+
+.macro setddq n
+       var_ddq_add = DDQ(\n)
+.endm
+
+/* generate a unique variable for xmm register */
+.macro setxdata n
+       var_xdata = XMM(\n)
+.endm
+
+/* club the numeric 'id' to the symbol 'name' */
+
+.macro club name, id
+.altmacro
+       .if \name == DDQ_DATA
+               setddq %\id
+       .elseif \name == XDATA
+               setxdata %\id
+       .endif
+.noaltmacro
+.endm
+
+/*
+ * do_aes num_in_par load_keys key_len
+ * This increments p_in, but not p_out
+ */
+.macro do_aes b, k, key_len
+       .set by, \b
+       .set load_keys, \k
+       .set klen, \key_len
+
+       .if (load_keys)
+               vmovdqa 0*16(p_keys), xkey0
+       .endif
+
+       vpshufb xbyteswap, xcounter, xdata0
+
+       .set i, 1
+       .rept (by - 1)
+               club DDQ_DATA, i
+               club XDATA, i
+               vpaddd  var_ddq_add(%rip), xcounter, var_xdata
+               vpshufb xbyteswap, var_xdata, var_xdata
+               .set i, (i +1)
+       .endr
+
+       vmovdqa 1*16(p_keys), xkeyA
+
+       vpxor   xkey0, xdata0, xdata0
+       club DDQ_DATA, by
+       vpaddd  var_ddq_add(%rip), xcounter, xcounter
+
+       .set i, 1
+       .rept (by - 1)
+               club XDATA, i
+               vpxor   xkey0, var_xdata, var_xdata
+               .set i, (i +1)
+       .endr
+
+       vmovdqa 2*16(p_keys), xkeyB
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkeyA, var_xdata, var_xdata             /* key 1 */
+               .set i, (i +1)
+       .endr
+
+       .if (klen == KEY_128)
+               .if (load_keys)
+                       vmovdqa 3*16(p_keys), xkeyA
+               .endif
+       .else
+               vmovdqa 3*16(p_keys), xkeyA
+       .endif
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkeyB, var_xdata, var_xdata             /* key 2 */
+               .set i, (i +1)
+       .endr
+
+       add     $(16*by), p_in
+
+       .if (klen == KEY_128)
+               vmovdqa 4*16(p_keys), xkey4
+       .else
+               .if (load_keys)
+                       vmovdqa 4*16(p_keys), xkey4
+               .endif
+       .endif
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkeyA, var_xdata, var_xdata             /* key 3 */
+               .set i, (i +1)
+       .endr
+
+       vmovdqa 5*16(p_keys), xkeyA
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkey4, var_xdata, var_xdata             /* key 4 */
+               .set i, (i +1)
+       .endr
+
+       .if (klen == KEY_128)
+               .if (load_keys)
+                       vmovdqa 6*16(p_keys), xkeyB
+               .endif
+       .else
+               vmovdqa 6*16(p_keys), xkeyB
+       .endif
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkeyA, var_xdata, var_xdata             /* key 5 */
+               .set i, (i +1)
+       .endr
+
+       vmovdqa 7*16(p_keys), xkeyA
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkeyB, var_xdata, var_xdata             /* key 6 */
+               .set i, (i +1)
+       .endr
+
+       .if (klen == KEY_128)
+               vmovdqa 8*16(p_keys), xkey8
+       .else
+               .if (load_keys)
+                       vmovdqa 8*16(p_keys), xkey8
+               .endif
+       .endif
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkeyA, var_xdata, var_xdata             /* key 7 */
+               .set i, (i +1)
+       .endr
+
+       .if (klen == KEY_128)
+               .if (load_keys)
+                       vmovdqa 9*16(p_keys), xkeyA
+               .endif
+       .else
+               vmovdqa 9*16(p_keys), xkeyA
+       .endif
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkey8, var_xdata, var_xdata             /* key 8 */
+               .set i, (i +1)
+       .endr
+
+       vmovdqa 10*16(p_keys), xkeyB
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               vaesenc xkeyA, var_xdata, var_xdata             /* key 9 */
+               .set i, (i +1)
+       .endr
+
+       .if (klen != KEY_128)
+               vmovdqa 11*16(p_keys), xkeyA
+       .endif
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               /* key 10 */
+               .if (klen == KEY_128)
+                       vaesenclast     xkeyB, var_xdata, var_xdata
+               .else
+                       vaesenc xkeyB, var_xdata, var_xdata
+               .endif
+               .set i, (i +1)
+       .endr
+
+       .if (klen != KEY_128)
+               .if (load_keys)
+                       vmovdqa 12*16(p_keys), xkey12
+               .endif
+
+               .set i, 0
+               .rept by
+                       club XDATA, i
+                       vaesenc xkeyA, var_xdata, var_xdata     /* key 11 */
+                       .set i, (i +1)
+               .endr
+
+               .if (klen == KEY_256)
+                       vmovdqa 13*16(p_keys), xkeyA
+               .endif
+
+               .set i, 0
+               .rept by
+                       club XDATA, i
+                       .if (klen == KEY_256)
+                               /* key 12 */
+                               vaesenc xkey12, var_xdata, var_xdata
+                       .else
+                               vaesenclast xkey12, var_xdata, var_xdata
+                       .endif
+                       .set i, (i +1)
+               .endr
+
+               .if (klen == KEY_256)
+                       vmovdqa 14*16(p_keys), xkeyB
+
+                       .set i, 0
+                       .rept by
+                               club XDATA, i
+                               /* key 13 */
+                               vaesenc xkeyA, var_xdata, var_xdata
+                               .set i, (i +1)
+                       .endr
+
+                       .set i, 0
+                       .rept by
+                               club XDATA, i
+                               /* key 14 */
+                               vaesenclast     xkeyB, var_xdata, var_xdata
+                               .set i, (i +1)
+                       .endr
+               .endif
+       .endif
+
+       .set i, 0
+       .rept (by / 2)
+               .set j, (i+1)
+               VMOVDQ  (i*16 - 16*by)(p_in), xkeyA
+               VMOVDQ  (j*16 - 16*by)(p_in), xkeyB
+               club XDATA, i
+               vpxor   xkeyA, var_xdata, var_xdata
+               club XDATA, j
+               vpxor   xkeyB, var_xdata, var_xdata
+               .set i, (i+2)
+       .endr
+
+       .if (i < by)
+               VMOVDQ  (i*16 - 16*by)(p_in), xkeyA
+               club XDATA, i
+               vpxor   xkeyA, var_xdata, var_xdata
+       .endif
+
+       .set i, 0
+       .rept by
+               club XDATA, i
+               VMOVDQ  var_xdata, i*16(p_out)
+               .set i, (i+1)
+       .endr
+.endm
+
+.macro do_aes_load val, key_len
+       do_aes \val, 1, \key_len
+.endm
+
+.macro do_aes_noload val, key_len
+       do_aes \val, 0, \key_len
+.endm
+
+/* main body of aes ctr load */
+
+.macro do_aes_ctrmain key_len
+
+       cmp     $16, num_bytes
+       jb      .Ldo_return2\key_len
+
+       vmovdqa byteswap_const(%rip), xbyteswap
+       vmovdqu (p_iv), xcounter
+       vpshufb xbyteswap, xcounter, xcounter
+
+       mov     num_bytes, tmp
+       and     $(7*16), tmp
+       jz      .Lmult_of_8_blks\key_len
+
+       /* 1 <= tmp <= 7 */
+       cmp     $(4*16), tmp
+       jg      .Lgt4\key_len
+       je      .Leq4\key_len
+
+.Llt4\key_len:
+       cmp     $(2*16), tmp
+       jg      .Leq3\key_len
+       je      .Leq2\key_len
+
+.Leq1\key_len:
+       do_aes_load     1, \key_len
+       add     $(1*16), p_out
+       and     $(~7*16), num_bytes
+       jz      .Ldo_return2\key_len
+       jmp     .Lmain_loop2\key_len
+
+.Leq2\key_len:
+       do_aes_load     2, \key_len
+       add     $(2*16), p_out
+       and     $(~7*16), num_bytes
+       jz      .Ldo_return2\key_len
+       jmp     .Lmain_loop2\key_len
+
+
+.Leq3\key_len:
+       do_aes_load     3, \key_len
+       add     $(3*16), p_out
+       and     $(~7*16), num_bytes
+       jz      .Ldo_return2\key_len
+       jmp     .Lmain_loop2\key_len
+
+.Leq4\key_len:
+       do_aes_load     4, \key_len
+       add     $(4*16), p_out
+       and     $(~7*16), num_bytes
+       jz      .Ldo_return2\key_len
+       jmp     .Lmain_loop2\key_len
+
+.Lgt4\key_len:
+       cmp     $(6*16), tmp
+       jg      .Leq7\key_len
+       je      .Leq6\key_len
+
+.Leq5\key_len:
+       do_aes_load     5, \key_len
+       add     $(5*16), p_out
+       and     $(~7*16), num_bytes
+       jz      .Ldo_return2\key_len
+       jmp     .Lmain_loop2\key_len
+
+.Leq6\key_len:
+       do_aes_load     6, \key_len
+       add     $(6*16), p_out
+       and     $(~7*16), num_bytes
+       jz      .Ldo_return2\key_len
+       jmp     .Lmain_loop2\key_len
+
+.Leq7\key_len:
+       do_aes_load     7, \key_len
+       add     $(7*16), p_out
+       and     $(~7*16), num_bytes
+       jz      .Ldo_return2\key_len
+       jmp     .Lmain_loop2\key_len
+
+.Lmult_of_8_blks\key_len:
+       .if (\key_len != KEY_128)
+               vmovdqa 0*16(p_keys), xkey0
+               vmovdqa 4*16(p_keys), xkey4
+               vmovdqa 8*16(p_keys), xkey8
+               vmovdqa 12*16(p_keys), xkey12
+       .else
+               vmovdqa 0*16(p_keys), xkey0
+               vmovdqa 3*16(p_keys), xkey4
+               vmovdqa 6*16(p_keys), xkey8
+               vmovdqa 9*16(p_keys), xkey12
+       .endif
+.align 16
+.Lmain_loop2\key_len:
+       /* num_bytes is a multiple of 8 and >0 */
+       do_aes_noload   8, \key_len
+       add     $(8*16), p_out
+       sub     $(8*16), num_bytes
+       jne     .Lmain_loop2\key_len
+
+.Ldo_return2\key_len:
+       /* return updated IV */
+       vpshufb xbyteswap, xcounter, xcounter
+       vmovdqu xcounter, (p_iv)
+       ret
+.endm
+
+/*
+ * routine to do AES128 CTR enc/decrypt "by8"
+ * XMM registers are clobbered.
+ * Saving/restoring must be done at a higher level
+ * aes_ctr_enc_128_avx_by8(void *in, void *iv, void *keys, void *out,
+ *                     unsigned int num_bytes)
+ */
+ENTRY(aes_ctr_enc_128_avx_by8)
+       /* call the aes main loop */
+       do_aes_ctrmain KEY_128
+
+ENDPROC(aes_ctr_enc_128_avx_by8)
+
+/*
+ * routine to do AES192 CTR enc/decrypt "by8"
+ * XMM registers are clobbered.
+ * Saving/restoring must be done at a higher level
+ * aes_ctr_enc_192_avx_by8(void *in, void *iv, void *keys, void *out,
+ *                     unsigned int num_bytes)
+ */
+ENTRY(aes_ctr_enc_192_avx_by8)
+       /* call the aes main loop */
+       do_aes_ctrmain KEY_192
+
+ENDPROC(aes_ctr_enc_192_avx_by8)
+
+/*
+ * routine to do AES256 CTR enc/decrypt "by8"
+ * XMM registers are clobbered.
+ * Saving/restoring must be done at a higher level
+ * aes_ctr_enc_256_avx_by8(void *in, void *iv, void *keys, void *out,
+ *                     unsigned int num_bytes)
+ */
+ENTRY(aes_ctr_enc_256_avx_by8)
+       /* call the aes main loop */
+       do_aes_ctrmain KEY_256
+
+ENDPROC(aes_ctr_enc_256_avx_by8)
index 948ad0e..888950f 100644 (file)
@@ -105,6 +105,9 @@ void crypto_fpu_exit(void);
 #define AVX_GEN4_OPTSIZE 4096
 
 #ifdef CONFIG_X86_64
+
+static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
+                             const u8 *in, unsigned int len, u8 *iv);
 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
                              const u8 *in, unsigned int len, u8 *iv);
 
@@ -155,6 +158,12 @@ asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
 
 
 #ifdef CONFIG_AS_AVX
+asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv,
+               void *keys, u8 *out, unsigned int num_bytes);
+asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv,
+               void *keys, u8 *out, unsigned int num_bytes);
+asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv,
+               void *keys, u8 *out, unsigned int num_bytes);
 /*
  * asmlinkage void aesni_gcm_precomp_avx_gen2()
  * gcm_data *my_ctx_data, context data
@@ -472,6 +481,25 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx,
        crypto_inc(ctrblk, AES_BLOCK_SIZE);
 }
 
+#ifdef CONFIG_AS_AVX
+static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out,
+                             const u8 *in, unsigned int len, u8 *iv)
+{
+       /*
+        * based on key length, override with the by8 version
+        * of ctr mode encryption/decryption for improved performance
+        * aes_set_key_common() ensures that key length is one of
+        * {128,192,256}
+        */
+       if (ctx->key_length == AES_KEYSIZE_128)
+               aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len);
+       else if (ctx->key_length == AES_KEYSIZE_192)
+               aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len);
+       else
+               aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len);
+}
+#endif
+
 static int ctr_crypt(struct blkcipher_desc *desc,
                     struct scatterlist *dst, struct scatterlist *src,
                     unsigned int nbytes)
@@ -486,8 +514,8 @@ static int ctr_crypt(struct blkcipher_desc *desc,
 
        kernel_fpu_begin();
        while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
-               aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-                             nbytes & AES_BLOCK_MASK, walk.iv);
+               aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
+                                 nbytes & AES_BLOCK_MASK, walk.iv);
                nbytes &= AES_BLOCK_SIZE - 1;
                err = blkcipher_walk_done(desc, &walk, nbytes);
        }
@@ -1493,6 +1521,14 @@ static int __init aesni_init(void)
                aesni_gcm_enc_tfm = aesni_gcm_enc;
                aesni_gcm_dec_tfm = aesni_gcm_dec;
        }
+       aesni_ctr_enc_tfm = aesni_ctr_enc;
+#ifdef CONFIG_AS_AVX
+       if (cpu_has_avx) {
+               /* optimize performance of ctr mode encryption transform */
+               aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm;
+               pr_info("AES CTR mode by8 optimization enabled\n");
+       }
+#endif
 #endif
 
        err = crypto_fpu_init();
index dbc4339..26d49eb 100644 (file)
@@ -72,6 +72,7 @@
 
 # unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init);
 
+.text
 ENTRY(crc_pcl)
 #define    bufp                %rdi
 #define    bufp_dw     %edi
@@ -216,15 +217,11 @@ LABEL crc_ %i
        ## 4) Combine three results:
        ################################################################
 
-       lea     (K_table-16)(%rip), bufp        # first entry is for idx 1
+       lea     (K_table-8)(%rip), bufp         # first entry is for idx 1
        shlq    $3, %rax                        # rax *= 8
-       subq    %rax, tmp                       # tmp -= rax*8
-       shlq    $1, %rax
-       subq    %rax, tmp                       # tmp -= rax*16
-                                               # (total tmp -= rax*24)
-       addq    %rax, bufp
-
-       movdqa  (bufp), %xmm0                   # 2 consts: K1:K2
+       pmovzxdq (bufp,%rax), %xmm0             # 2 consts: K1:K2
+       leal    (%eax,%eax,2), %eax             # rax *= 3 (total *24)
+       subq    %rax, tmp                       # tmp -= rax*24
 
        movq    crc_init, %xmm1                 # CRC for block 1
        PCLMULQDQ 0x00,%xmm0,%xmm1              # Multiply by K2
@@ -238,9 +235,9 @@ LABEL crc_ %i
        mov     crc2, crc_init
        crc32   %rax, crc_init
 
-################################################################
-## 5) Check for end:
-################################################################
+       ################################################################
+       ## 5) Check for end:
+       ################################################################
 
 LABEL crc_ 0
        mov     tmp, len
@@ -331,136 +328,136 @@ ENDPROC(crc_pcl)
 
        ################################################################
        ## PCLMULQDQ tables
-       ## Table is 128 entries x 2 quad words each
+       ## Table is 128 entries x 2 words (8 bytes) each
        ################################################################
-.data
-.align 64
+.section       .rotata, "a", %progbits
+.align 8
 K_table:
-        .quad 0x14cd00bd6,0x105ec76f0
-        .quad 0x0ba4fc28e,0x14cd00bd6
-        .quad 0x1d82c63da,0x0f20c0dfe
-        .quad 0x09e4addf8,0x0ba4fc28e
-        .quad 0x039d3b296,0x1384aa63a
-        .quad 0x102f9b8a2,0x1d82c63da
-        .quad 0x14237f5e6,0x01c291d04
-        .quad 0x00d3b6092,0x09e4addf8
-        .quad 0x0c96cfdc0,0x0740eef02
-        .quad 0x18266e456,0x039d3b296
-        .quad 0x0daece73e,0x0083a6eec
-        .quad 0x0ab7aff2a,0x102f9b8a2
-        .quad 0x1248ea574,0x1c1733996
-        .quad 0x083348832,0x14237f5e6
-        .quad 0x12c743124,0x02ad91c30
-        .quad 0x0b9e02b86,0x00d3b6092
-        .quad 0x018b33a4e,0x06992cea2
-        .quad 0x1b331e26a,0x0c96cfdc0
-        .quad 0x17d35ba46,0x07e908048
-        .quad 0x1bf2e8b8a,0x18266e456
-        .quad 0x1a3e0968a,0x11ed1f9d8
-        .quad 0x0ce7f39f4,0x0daece73e
-        .quad 0x061d82e56,0x0f1d0f55e
-        .quad 0x0d270f1a2,0x0ab7aff2a
-        .quad 0x1c3f5f66c,0x0a87ab8a8
-        .quad 0x12ed0daac,0x1248ea574
-        .quad 0x065863b64,0x08462d800
-        .quad 0x11eef4f8e,0x083348832
-        .quad 0x1ee54f54c,0x071d111a8
-        .quad 0x0b3e32c28,0x12c743124
-        .quad 0x0064f7f26,0x0ffd852c6
-        .quad 0x0dd7e3b0c,0x0b9e02b86
-        .quad 0x0f285651c,0x0dcb17aa4
-        .quad 0x010746f3c,0x018b33a4e
-        .quad 0x1c24afea4,0x0f37c5aee
-        .quad 0x0271d9844,0x1b331e26a
-        .quad 0x08e766a0c,0x06051d5a2
-        .quad 0x093a5f730,0x17d35ba46
-        .quad 0x06cb08e5c,0x11d5ca20e
-        .quad 0x06b749fb2,0x1bf2e8b8a
-        .quad 0x1167f94f2,0x021f3d99c
-        .quad 0x0cec3662e,0x1a3e0968a
-        .quad 0x19329634a,0x08f158014
-        .quad 0x0e6fc4e6a,0x0ce7f39f4
-        .quad 0x08227bb8a,0x1a5e82106
-        .quad 0x0b0cd4768,0x061d82e56
-        .quad 0x13c2b89c4,0x188815ab2
-        .quad 0x0d7a4825c,0x0d270f1a2
-        .quad 0x10f5ff2ba,0x105405f3e
-        .quad 0x00167d312,0x1c3f5f66c
-        .quad 0x0f6076544,0x0e9adf796
-        .quad 0x026f6a60a,0x12ed0daac
-        .quad 0x1a2adb74e,0x096638b34
-        .quad 0x19d34af3a,0x065863b64
-        .quad 0x049c3cc9c,0x1e50585a0
-        .quad 0x068bce87a,0x11eef4f8e
-        .quad 0x1524fa6c6,0x19f1c69dc
-        .quad 0x16cba8aca,0x1ee54f54c
-        .quad 0x042d98888,0x12913343e
-        .quad 0x1329d9f7e,0x0b3e32c28
-        .quad 0x1b1c69528,0x088f25a3a
-        .quad 0x02178513a,0x0064f7f26
-        .quad 0x0e0ac139e,0x04e36f0b0
-        .quad 0x0170076fa,0x0dd7e3b0c
-        .quad 0x141a1a2e2,0x0bd6f81f8
-        .quad 0x16ad828b4,0x0f285651c
-        .quad 0x041d17b64,0x19425cbba
-        .quad 0x1fae1cc66,0x010746f3c
-        .quad 0x1a75b4b00,0x18db37e8a
-        .quad 0x0f872e54c,0x1c24afea4
-        .quad 0x01e41e9fc,0x04c144932
-        .quad 0x086d8e4d2,0x0271d9844
-        .quad 0x160f7af7a,0x052148f02
-        .quad 0x05bb8f1bc,0x08e766a0c
-        .quad 0x0a90fd27a,0x0a3c6f37a
-        .quad 0x0b3af077a,0x093a5f730
-        .quad 0x04984d782,0x1d22c238e
-        .quad 0x0ca6ef3ac,0x06cb08e5c
-        .quad 0x0234e0b26,0x063ded06a
-        .quad 0x1d88abd4a,0x06b749fb2
-        .quad 0x04597456a,0x04d56973c
-        .quad 0x0e9e28eb4,0x1167f94f2
-        .quad 0x07b3ff57a,0x19385bf2e
-        .quad 0x0c9c8b782,0x0cec3662e
-        .quad 0x13a9cba9e,0x0e417f38a
-        .quad 0x093e106a4,0x19329634a
-        .quad 0x167001a9c,0x14e727980
-        .quad 0x1ddffc5d4,0x0e6fc4e6a
-        .quad 0x00df04680,0x0d104b8fc
-        .quad 0x02342001e,0x08227bb8a
-        .quad 0x00a2a8d7e,0x05b397730
-        .quad 0x168763fa6,0x0b0cd4768
-        .quad 0x1ed5a407a,0x0e78eb416
-        .quad 0x0d2c3ed1a,0x13c2b89c4
-        .quad 0x0995a5724,0x1641378f0
-        .quad 0x19b1afbc4,0x0d7a4825c
-        .quad 0x109ffedc0,0x08d96551c
-        .quad 0x0f2271e60,0x10f5ff2ba
-        .quad 0x00b0bf8ca,0x00bf80dd2
-        .quad 0x123888b7a,0x00167d312
-        .quad 0x1e888f7dc,0x18dcddd1c
-        .quad 0x002ee03b2,0x0f6076544
-        .quad 0x183e8d8fe,0x06a45d2b2
-        .quad 0x133d7a042,0x026f6a60a
-        .quad 0x116b0f50c,0x1dd3e10e8
-        .quad 0x05fabe670,0x1a2adb74e
-        .quad 0x130004488,0x0de87806c
-        .quad 0x000bcf5f6,0x19d34af3a
-        .quad 0x18f0c7078,0x014338754
-        .quad 0x017f27698,0x049c3cc9c
-        .quad 0x058ca5f00,0x15e3e77ee
-        .quad 0x1af900c24,0x068bce87a
-        .quad 0x0b5cfca28,0x0dd07448e
-        .quad 0x0ded288f8,0x1524fa6c6
-        .quad 0x059f229bc,0x1d8048348
-        .quad 0x06d390dec,0x16cba8aca
-        .quad 0x037170390,0x0a3e3e02c
-        .quad 0x06353c1cc,0x042d98888
-        .quad 0x0c4584f5c,0x0d73c7bea
-        .quad 0x1f16a3418,0x1329d9f7e
-        .quad 0x0531377e2,0x185137662
-        .quad 0x1d8d9ca7c,0x1b1c69528
-        .quad 0x0b25b29f2,0x18a08b5bc
-        .quad 0x19fb2a8b0,0x02178513a
-        .quad 0x1a08fe6ac,0x1da758ae0
-        .quad 0x045cddf4e,0x0e0ac139e
-        .quad 0x1a91647f2,0x169cf9eb0
-        .quad 0x1a0f717c4,0x0170076fa
+       .long 0x493c7d27, 0x00000001
+       .long 0xba4fc28e, 0x493c7d27
+       .long 0xddc0152b, 0xf20c0dfe
+       .long 0x9e4addf8, 0xba4fc28e
+       .long 0x39d3b296, 0x3da6d0cb
+       .long 0x0715ce53, 0xddc0152b
+       .long 0x47db8317, 0x1c291d04
+       .long 0x0d3b6092, 0x9e4addf8
+       .long 0xc96cfdc0, 0x740eef02
+       .long 0x878a92a7, 0x39d3b296
+       .long 0xdaece73e, 0x083a6eec
+       .long 0xab7aff2a, 0x0715ce53
+       .long 0x2162d385, 0xc49f4f67
+       .long 0x83348832, 0x47db8317
+       .long 0x299847d5, 0x2ad91c30
+       .long 0xb9e02b86, 0x0d3b6092
+       .long 0x18b33a4e, 0x6992cea2
+       .long 0xb6dd949b, 0xc96cfdc0
+       .long 0x78d9ccb7, 0x7e908048
+       .long 0xbac2fd7b, 0x878a92a7
+       .long 0xa60ce07b, 0x1b3d8f29
+       .long 0xce7f39f4, 0xdaece73e
+       .long 0x61d82e56, 0xf1d0f55e
+       .long 0xd270f1a2, 0xab7aff2a
+       .long 0xc619809d, 0xa87ab8a8
+       .long 0x2b3cac5d, 0x2162d385
+       .long 0x65863b64, 0x8462d800
+       .long 0x1b03397f, 0x83348832
+       .long 0xebb883bd, 0x71d111a8
+       .long 0xb3e32c28, 0x299847d5
+       .long 0x064f7f26, 0xffd852c6
+       .long 0xdd7e3b0c, 0xb9e02b86
+       .long 0xf285651c, 0xdcb17aa4
+       .long 0x10746f3c, 0x18b33a4e
+       .long 0xc7a68855, 0xf37c5aee
+       .long 0x271d9844, 0xb6dd949b
+       .long 0x8e766a0c, 0x6051d5a2
+       .long 0x93a5f730, 0x78d9ccb7
+       .long 0x6cb08e5c, 0x18b0d4ff
+       .long 0x6b749fb2, 0xbac2fd7b
+       .long 0x1393e203, 0x21f3d99c
+       .long 0xcec3662e, 0xa60ce07b
+       .long 0x96c515bb, 0x8f158014
+       .long 0xe6fc4e6a, 0xce7f39f4
+       .long 0x8227bb8a, 0xa00457f7
+       .long 0xb0cd4768, 0x61d82e56
+       .long 0x39c7ff35, 0x8d6d2c43
+       .long 0xd7a4825c, 0xd270f1a2
+       .long 0x0ab3844b, 0x00ac29cf
+       .long 0x0167d312, 0xc619809d
+       .long 0xf6076544, 0xe9adf796
+       .long 0x26f6a60a, 0x2b3cac5d
+       .long 0xa741c1bf, 0x96638b34
+       .long 0x98d8d9cb, 0x65863b64
+       .long 0x49c3cc9c, 0xe0e9f351
+       .long 0x68bce87a, 0x1b03397f
+       .long 0x57a3d037, 0x9af01f2d
+       .long 0x6956fc3b, 0xebb883bd
+       .long 0x42d98888, 0x2cff42cf
+       .long 0x3771e98f, 0xb3e32c28
+       .long 0xb42ae3d9, 0x88f25a3a
+       .long 0x2178513a, 0x064f7f26
+       .long 0xe0ac139e, 0x4e36f0b0
+       .long 0x170076fa, 0xdd7e3b0c
+       .long 0x444dd413, 0xbd6f81f8
+       .long 0x6f345e45, 0xf285651c
+       .long 0x41d17b64, 0x91c9bd4b
+       .long 0xff0dba97, 0x10746f3c
+       .long 0xa2b73df1, 0x885f087b
+       .long 0xf872e54c, 0xc7a68855
+       .long 0x1e41e9fc, 0x4c144932
+       .long 0x86d8e4d2, 0x271d9844
+       .long 0x651bd98b, 0x52148f02
+       .long 0x5bb8f1bc, 0x8e766a0c
+       .long 0xa90fd27a, 0xa3c6f37a
+       .long 0xb3af077a, 0x93a5f730
+       .long 0x4984d782, 0xd7c0557f
+       .long 0xca6ef3ac, 0x6cb08e5c
+       .long 0x234e0b26, 0x63ded06a
+       .long 0xdd66cbbb, 0x6b749fb2
+       .long 0x4597456a, 0x4d56973c
+       .long 0xe9e28eb4, 0x1393e203
+       .long 0x7b3ff57a, 0x9669c9df
+       .long 0xc9c8b782, 0xcec3662e
+       .long 0x3f70cc6f, 0xe417f38a
+       .long 0x93e106a4, 0x96c515bb
+       .long 0x62ec6c6d, 0x4b9e0f71
+       .long 0xd813b325, 0xe6fc4e6a
+       .long 0x0df04680, 0xd104b8fc
+       .long 0x2342001e, 0x8227bb8a
+       .long 0x0a2a8d7e, 0x5b397730
+       .long 0x6d9a4957, 0xb0cd4768
+       .long 0xe8b6368b, 0xe78eb416
+       .long 0xd2c3ed1a, 0x39c7ff35
+       .long 0x995a5724, 0x61ff0e01
+       .long 0x9ef68d35, 0xd7a4825c
+       .long 0x0c139b31, 0x8d96551c
+       .long 0xf2271e60, 0x0ab3844b
+       .long 0x0b0bf8ca, 0x0bf80dd2
+       .long 0x2664fd8b, 0x0167d312
+       .long 0xed64812d, 0x8821abed
+       .long 0x02ee03b2, 0xf6076544
+       .long 0x8604ae0f, 0x6a45d2b2
+       .long 0x363bd6b3, 0x26f6a60a
+       .long 0x135c83fd, 0xd8d26619
+       .long 0x5fabe670, 0xa741c1bf
+       .long 0x35ec3279, 0xde87806c
+       .long 0x00bcf5f6, 0x98d8d9cb
+       .long 0x8ae00689, 0x14338754
+       .long 0x17f27698, 0x49c3cc9c
+       .long 0x58ca5f00, 0x5bd2011f
+       .long 0xaa7c7ad5, 0x68bce87a
+       .long 0xb5cfca28, 0xdd07448e
+       .long 0xded288f8, 0x57a3d037
+       .long 0x59f229bc, 0xdde8f5b9
+       .long 0x6d390dec, 0x6956fc3b
+       .long 0x37170390, 0xa3e3e02c
+       .long 0x6353c1cc, 0x42d98888
+       .long 0xc4584f5c, 0xd73c7bea
+       .long 0xf48642e9, 0x3771e98f
+       .long 0x531377e2, 0x80ff0093
+       .long 0xdd35bc8d, 0xb42ae3d9
+       .long 0xb25b29f2, 0x8fe4c34d
+       .long 0x9a5ede41, 0x2178513a
+       .long 0xa563905d, 0xdf99fc11
+       .long 0x45cddf4e, 0xe0ac139e
+       .long 0xacfa3103, 0x6c23e841
+       .long 0xa51b6135, 0x170076fa
diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S
new file mode 100644 (file)
index 0000000..038f6ae
--- /dev/null
@@ -0,0 +1,805 @@
+/*
+ * des3_ede-asm_64.S  -  x86-64 assembly implementation of 3DES cipher
+ *
+ * Copyright Â© 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+
+.file "des3_ede-asm_64.S"
+.text
+
+#define s1 .L_s1
+#define s2 ((s1) + (64*8))
+#define s3 ((s2) + (64*8))
+#define s4 ((s3) + (64*8))
+#define s5 ((s4) + (64*8))
+#define s6 ((s5) + (64*8))
+#define s7 ((s6) + (64*8))
+#define s8 ((s7) + (64*8))
+
+/* register macros */
+#define CTX %rdi
+
+#define RL0 %r8
+#define RL1 %r9
+#define RL2 %r10
+
+#define RL0d %r8d
+#define RL1d %r9d
+#define RL2d %r10d
+
+#define RR0 %r11
+#define RR1 %r12
+#define RR2 %r13
+
+#define RR0d %r11d
+#define RR1d %r12d
+#define RR2d %r13d
+
+#define RW0 %rax
+#define RW1 %rbx
+#define RW2 %rcx
+
+#define RW0d %eax
+#define RW1d %ebx
+#define RW2d %ecx
+
+#define RW0bl %al
+#define RW1bl %bl
+#define RW2bl %cl
+
+#define RW0bh %ah
+#define RW1bh %bh
+#define RW2bh %ch
+
+#define RT0 %r15
+#define RT1 %rbp
+#define RT2 %r14
+#define RT3 %rdx
+
+#define RT0d %r15d
+#define RT1d %ebp
+#define RT2d %r14d
+#define RT3d %edx
+
+/***********************************************************************
+ * 1-way 3DES
+ ***********************************************************************/
+#define do_permutation(a, b, offset, mask) \
+       movl a, RT0d; \
+       shrl $(offset), RT0d; \
+       xorl b, RT0d; \
+       andl $(mask), RT0d; \
+       xorl RT0d, b; \
+       shll $(offset), RT0d; \
+       xorl RT0d, a;
+
+#define expand_to_64bits(val, mask) \
+       movl val##d, RT0d; \
+       rorl $4, RT0d; \
+       shlq $32, RT0; \
+       orq RT0, val; \
+       andq mask, val;
+
+#define compress_to_64bits(val) \
+       movq val, RT0; \
+       shrq $32, RT0; \
+       roll $4, RT0d; \
+       orl RT0d, val##d;
+
+#define initial_permutation(left, right) \
+       do_permutation(left##d, right##d,  4, 0x0f0f0f0f); \
+       do_permutation(left##d, right##d, 16, 0x0000ffff); \
+       do_permutation(right##d, left##d,  2, 0x33333333); \
+       do_permutation(right##d, left##d,  8, 0x00ff00ff); \
+       movabs $0x3f3f3f3f3f3f3f3f, RT3; \
+       movl left##d, RW0d; \
+       roll $1, right##d; \
+       xorl right##d, RW0d; \
+       andl $0xaaaaaaaa, RW0d; \
+       xorl RW0d, left##d; \
+       xorl RW0d, right##d; \
+       roll $1, left##d; \
+       expand_to_64bits(right, RT3); \
+       expand_to_64bits(left, RT3);
+
+#define final_permutation(left, right) \
+       compress_to_64bits(right); \
+       compress_to_64bits(left); \
+       movl right##d, RW0d; \
+       rorl $1, left##d; \
+       xorl left##d, RW0d; \
+       andl $0xaaaaaaaa, RW0d; \
+       xorl RW0d, right##d; \
+       xorl RW0d, left##d; \
+       rorl $1, right##d; \
+       do_permutation(right##d, left##d,  8, 0x00ff00ff); \
+       do_permutation(right##d, left##d,  2, 0x33333333); \
+       do_permutation(left##d, right##d, 16, 0x0000ffff); \
+       do_permutation(left##d, right##d,  4, 0x0f0f0f0f);
+
+#define round1(n, from, to, load_next_key) \
+       xorq from, RW0; \
+       \
+       movzbl RW0bl, RT0d; \
+       movzbl RW0bh, RT1d; \
+       shrq $16, RW0; \
+       movzbl RW0bl, RT2d; \
+       movzbl RW0bh, RT3d; \
+       shrq $16, RW0; \
+       movq s8(, RT0, 8), RT0; \
+       xorq s6(, RT1, 8), to; \
+       movzbl RW0bl, RL1d; \
+       movzbl RW0bh, RT1d; \
+       shrl $16, RW0d; \
+       xorq s4(, RT2, 8), RT0; \
+       xorq s2(, RT3, 8), to; \
+       movzbl RW0bl, RT2d; \
+       movzbl RW0bh, RT3d; \
+       xorq s7(, RL1, 8), RT0; \
+       xorq s5(, RT1, 8), to; \
+       xorq s3(, RT2, 8), RT0; \
+       load_next_key(n, RW0); \
+       xorq RT0, to; \
+       xorq s1(, RT3, 8), to; \
+
+#define load_next_key(n, RWx) \
+       movq (((n) + 1) * 8)(CTX), RWx;
+
+#define dummy2(a, b) /*_*/
+
+#define read_block(io, left, right) \
+       movl    (io), left##d; \
+       movl   4(io), right##d; \
+       bswapl left##d; \
+       bswapl right##d;
+
+#define write_block(io, left, right) \
+       bswapl left##d; \
+       bswapl right##d; \
+       movl   left##d,   (io); \
+       movl   right##d, 4(io);
+
+ENTRY(des3_ede_x86_64_crypt_blk)
+       /* input:
+        *      %rdi: round keys, CTX
+        *      %rsi: dst
+        *      %rdx: src
+        */
+       pushq %rbp;
+       pushq %rbx;
+       pushq %r12;
+       pushq %r13;
+       pushq %r14;
+       pushq %r15;
+
+       read_block(%rdx, RL0, RR0);
+       initial_permutation(RL0, RR0);
+
+       movq (CTX), RW0;
+
+       round1(0, RR0, RL0, load_next_key);
+       round1(1, RL0, RR0, load_next_key);
+       round1(2, RR0, RL0, load_next_key);
+       round1(3, RL0, RR0, load_next_key);
+       round1(4, RR0, RL0, load_next_key);
+       round1(5, RL0, RR0, load_next_key);
+       round1(6, RR0, RL0, load_next_key);
+       round1(7, RL0, RR0, load_next_key);
+       round1(8, RR0, RL0, load_next_key);
+       round1(9, RL0, RR0, load_next_key);
+       round1(10, RR0, RL0, load_next_key);
+       round1(11, RL0, RR0, load_next_key);
+       round1(12, RR0, RL0, load_next_key);
+       round1(13, RL0, RR0, load_next_key);
+       round1(14, RR0, RL0, load_next_key);
+       round1(15, RL0, RR0, load_next_key);
+
+       round1(16+0, RL0, RR0, load_next_key);
+       round1(16+1, RR0, RL0, load_next_key);
+       round1(16+2, RL0, RR0, load_next_key);
+       round1(16+3, RR0, RL0, load_next_key);
+       round1(16+4, RL0, RR0, load_next_key);
+       round1(16+5, RR0, RL0, load_next_key);
+       round1(16+6, RL0, RR0, load_next_key);
+       round1(16+7, RR0, RL0, load_next_key);
+       round1(16+8, RL0, RR0, load_next_key);
+       round1(16+9, RR0, RL0, load_next_key);
+       round1(16+10, RL0, RR0, load_next_key);
+       round1(16+11, RR0, RL0, load_next_key);
+       round1(16+12, RL0, RR0, load_next_key);
+       round1(16+13, RR0, RL0, load_next_key);
+       round1(16+14, RL0, RR0, load_next_key);
+       round1(16+15, RR0, RL0, load_next_key);
+
+       round1(32+0, RR0, RL0, load_next_key);
+       round1(32+1, RL0, RR0, load_next_key);
+       round1(32+2, RR0, RL0, load_next_key);
+       round1(32+3, RL0, RR0, load_next_key);
+       round1(32+4, RR0, RL0, load_next_key);
+       round1(32+5, RL0, RR0, load_next_key);
+       round1(32+6, RR0, RL0, load_next_key);
+       round1(32+7, RL0, RR0, load_next_key);
+       round1(32+8, RR0, RL0, load_next_key);
+       round1(32+9, RL0, RR0, load_next_key);
+       round1(32+10, RR0, RL0, load_next_key);
+       round1(32+11, RL0, RR0, load_next_key);
+       round1(32+12, RR0, RL0, load_next_key);
+       round1(32+13, RL0, RR0, load_next_key);
+       round1(32+14, RR0, RL0, load_next_key);
+       round1(32+15, RL0, RR0, dummy2);
+
+       final_permutation(RR0, RL0);
+       write_block(%rsi, RR0, RL0);
+
+       popq %r15;
+       popq %r14;
+       popq %r13;
+       popq %r12;
+       popq %rbx;
+       popq %rbp;
+
+       ret;
+ENDPROC(des3_ede_x86_64_crypt_blk)
+
+/***********************************************************************
+ * 3-way 3DES
+ ***********************************************************************/
+#define expand_to_64bits(val, mask) \
+       movl val##d, RT0d; \
+       rorl $4, RT0d; \
+       shlq $32, RT0; \
+       orq RT0, val; \
+       andq mask, val;
+
+#define compress_to_64bits(val) \
+       movq val, RT0; \
+       shrq $32, RT0; \
+       roll $4, RT0d; \
+       orl RT0d, val##d;
+
+#define initial_permutation3(left, right) \
+       do_permutation(left##0d, right##0d,  4, 0x0f0f0f0f); \
+       do_permutation(left##0d, right##0d, 16, 0x0000ffff); \
+         do_permutation(left##1d, right##1d,  4, 0x0f0f0f0f); \
+         do_permutation(left##1d, right##1d, 16, 0x0000ffff); \
+           do_permutation(left##2d, right##2d,  4, 0x0f0f0f0f); \
+           do_permutation(left##2d, right##2d, 16, 0x0000ffff); \
+           \
+       do_permutation(right##0d, left##0d,  2, 0x33333333); \
+       do_permutation(right##0d, left##0d,  8, 0x00ff00ff); \
+         do_permutation(right##1d, left##1d,  2, 0x33333333); \
+         do_permutation(right##1d, left##1d,  8, 0x00ff00ff); \
+           do_permutation(right##2d, left##2d,  2, 0x33333333); \
+           do_permutation(right##2d, left##2d,  8, 0x00ff00ff); \
+           \
+       movabs $0x3f3f3f3f3f3f3f3f, RT3; \
+           \
+       movl left##0d, RW0d; \
+       roll $1, right##0d; \
+       xorl right##0d, RW0d; \
+       andl $0xaaaaaaaa, RW0d; \
+       xorl RW0d, left##0d; \
+       xorl RW0d, right##0d; \
+       roll $1, left##0d; \
+       expand_to_64bits(right##0, RT3); \
+       expand_to_64bits(left##0, RT3); \
+         movl left##1d, RW1d; \
+         roll $1, right##1d; \
+         xorl right##1d, RW1d; \
+         andl $0xaaaaaaaa, RW1d; \
+         xorl RW1d, left##1d; \
+         xorl RW1d, right##1d; \
+         roll $1, left##1d; \
+         expand_to_64bits(right##1, RT3); \
+         expand_to_64bits(left##1, RT3); \
+           movl left##2d, RW2d; \
+           roll $1, right##2d; \
+           xorl right##2d, RW2d; \
+           andl $0xaaaaaaaa, RW2d; \
+           xorl RW2d, left##2d; \
+           xorl RW2d, right##2d; \
+           roll $1, left##2d; \
+           expand_to_64bits(right##2, RT3); \
+           expand_to_64bits(left##2, RT3);
+
+#define final_permutation3(left, right) \
+       compress_to_64bits(right##0); \
+       compress_to_64bits(left##0); \
+       movl right##0d, RW0d; \
+       rorl $1, left##0d; \
+       xorl left##0d, RW0d; \
+       andl $0xaaaaaaaa, RW0d; \
+       xorl RW0d, right##0d; \
+       xorl RW0d, left##0d; \
+       rorl $1, right##0d; \
+         compress_to_64bits(right##1); \
+         compress_to_64bits(left##1); \
+         movl right##1d, RW1d; \
+         rorl $1, left##1d; \
+         xorl left##1d, RW1d; \
+         andl $0xaaaaaaaa, RW1d; \
+         xorl RW1d, right##1d; \
+         xorl RW1d, left##1d; \
+         rorl $1, right##1d; \
+           compress_to_64bits(right##2); \
+           compress_to_64bits(left##2); \
+           movl right##2d, RW2d; \
+           rorl $1, left##2d; \
+           xorl left##2d, RW2d; \
+           andl $0xaaaaaaaa, RW2d; \
+           xorl RW2d, right##2d; \
+           xorl RW2d, left##2d; \
+           rorl $1, right##2d; \
+           \
+       do_permutation(right##0d, left##0d,  8, 0x00ff00ff); \
+       do_permutation(right##0d, left##0d,  2, 0x33333333); \
+         do_permutation(right##1d, left##1d,  8, 0x00ff00ff); \
+         do_permutation(right##1d, left##1d,  2, 0x33333333); \
+           do_permutation(right##2d, left##2d,  8, 0x00ff00ff); \
+           do_permutation(right##2d, left##2d,  2, 0x33333333); \
+           \
+       do_permutation(left##0d, right##0d, 16, 0x0000ffff); \
+       do_permutation(left##0d, right##0d,  4, 0x0f0f0f0f); \
+         do_permutation(left##1d, right##1d, 16, 0x0000ffff); \
+         do_permutation(left##1d, right##1d,  4, 0x0f0f0f0f); \
+           do_permutation(left##2d, right##2d, 16, 0x0000ffff); \
+           do_permutation(left##2d, right##2d,  4, 0x0f0f0f0f);
+
+#define round3(n, from, to, load_next_key, do_movq) \
+       xorq from##0, RW0; \
+       movzbl RW0bl, RT3d; \
+       movzbl RW0bh, RT1d; \
+       shrq $16, RW0; \
+       xorq s8(, RT3, 8), to##0; \
+       xorq s6(, RT1, 8), to##0; \
+       movzbl RW0bl, RT3d; \
+       movzbl RW0bh, RT1d; \
+       shrq $16, RW0; \
+       xorq s4(, RT3, 8), to##0; \
+       xorq s2(, RT1, 8), to##0; \
+       movzbl RW0bl, RT3d; \
+       movzbl RW0bh, RT1d; \
+       shrl $16, RW0d; \
+       xorq s7(, RT3, 8), to##0; \
+       xorq s5(, RT1, 8), to##0; \
+       movzbl RW0bl, RT3d; \
+       movzbl RW0bh, RT1d; \
+       load_next_key(n, RW0); \
+       xorq s3(, RT3, 8), to##0; \
+       xorq s1(, RT1, 8), to##0; \
+               xorq from##1, RW1; \
+               movzbl RW1bl, RT3d; \
+               movzbl RW1bh, RT1d; \
+               shrq $16, RW1; \
+               xorq s8(, RT3, 8), to##1; \
+               xorq s6(, RT1, 8), to##1; \
+               movzbl RW1bl, RT3d; \
+               movzbl RW1bh, RT1d; \
+               shrq $16, RW1; \
+               xorq s4(, RT3, 8), to##1; \
+               xorq s2(, RT1, 8), to##1; \
+               movzbl RW1bl, RT3d; \
+               movzbl RW1bh, RT1d; \
+               shrl $16, RW1d; \
+               xorq s7(, RT3, 8), to##1; \
+               xorq s5(, RT1, 8), to##1; \
+               movzbl RW1bl, RT3d; \
+               movzbl RW1bh, RT1d; \
+               do_movq(RW0, RW1); \
+               xorq s3(, RT3, 8), to##1; \
+               xorq s1(, RT1, 8), to##1; \
+                       xorq from##2, RW2; \
+                       movzbl RW2bl, RT3d; \
+                       movzbl RW2bh, RT1d; \
+                       shrq $16, RW2; \
+                       xorq s8(, RT3, 8), to##2; \
+                       xorq s6(, RT1, 8), to##2; \
+                       movzbl RW2bl, RT3d; \
+                       movzbl RW2bh, RT1d; \
+                       shrq $16, RW2; \
+                       xorq s4(, RT3, 8), to##2; \
+                       xorq s2(, RT1, 8), to##2; \
+                       movzbl RW2bl, RT3d; \
+                       movzbl RW2bh, RT1d; \
+                       shrl $16, RW2d; \
+                       xorq s7(, RT3, 8), to##2; \
+                       xorq s5(, RT1, 8), to##2; \
+                       movzbl RW2bl, RT3d; \
+                       movzbl RW2bh, RT1d; \
+                       do_movq(RW0, RW2); \
+                       xorq s3(, RT3, 8), to##2; \
+                       xorq s1(, RT1, 8), to##2;
+
+#define __movq(src, dst) \
+       movq src, dst;
+
+ENTRY(des3_ede_x86_64_crypt_blk_3way)
+       /* input:
+        *      %rdi: ctx, round keys
+        *      %rsi: dst (3 blocks)
+        *      %rdx: src (3 blocks)
+        */
+
+       pushq %rbp;
+       pushq %rbx;
+       pushq %r12;
+       pushq %r13;
+       pushq %r14;
+       pushq %r15;
+
+       /* load input */
+       movl 0 * 4(%rdx), RL0d;
+       movl 1 * 4(%rdx), RR0d;
+       movl 2 * 4(%rdx), RL1d;
+       movl 3 * 4(%rdx), RR1d;
+       movl 4 * 4(%rdx), RL2d;
+       movl 5 * 4(%rdx), RR2d;
+
+       bswapl RL0d;
+       bswapl RR0d;
+       bswapl RL1d;
+       bswapl RR1d;
+       bswapl RL2d;
+       bswapl RR2d;
+
+       initial_permutation3(RL, RR);
+
+       movq 0(CTX), RW0;
+       movq RW0, RW1;
+       movq RW0, RW2;
+
+       round3(0, RR, RL, load_next_key, __movq);
+       round3(1, RL, RR, load_next_key, __movq);
+       round3(2, RR, RL, load_next_key, __movq);
+       round3(3, RL, RR, load_next_key, __movq);
+       round3(4, RR, RL, load_next_key, __movq);
+       round3(5, RL, RR, load_next_key, __movq);
+       round3(6, RR, RL, load_next_key, __movq);
+       round3(7, RL, RR, load_next_key, __movq);
+       round3(8, RR, RL, load_next_key, __movq);
+       round3(9, RL, RR, load_next_key, __movq);
+       round3(10, RR, RL, load_next_key, __movq);
+       round3(11, RL, RR, load_next_key, __movq);
+       round3(12, RR, RL, load_next_key, __movq);
+       round3(13, RL, RR, load_next_key, __movq);
+       round3(14, RR, RL, load_next_key, __movq);
+       round3(15, RL, RR, load_next_key, __movq);
+
+       round3(16+0, RL, RR, load_next_key, __movq);
+       round3(16+1, RR, RL, load_next_key, __movq);
+       round3(16+2, RL, RR, load_next_key, __movq);
+       round3(16+3, RR, RL, load_next_key, __movq);
+       round3(16+4, RL, RR, load_next_key, __movq);
+       round3(16+5, RR, RL, load_next_key, __movq);
+       round3(16+6, RL, RR, load_next_key, __movq);
+       round3(16+7, RR, RL, load_next_key, __movq);
+       round3(16+8, RL, RR, load_next_key, __movq);
+       round3(16+9, RR, RL, load_next_key, __movq);
+       round3(16+10, RL, RR, load_next_key, __movq);
+       round3(16+11, RR, RL, load_next_key, __movq);
+       round3(16+12, RL, RR, load_next_key, __movq);
+       round3(16+13, RR, RL, load_next_key, __movq);
+       round3(16+14, RL, RR, load_next_key, __movq);
+       round3(16+15, RR, RL, load_next_key, __movq);
+
+       round3(32+0, RR, RL, load_next_key, __movq);
+       round3(32+1, RL, RR, load_next_key, __movq);
+       round3(32+2, RR, RL, load_next_key, __movq);
+       round3(32+3, RL, RR, load_next_key, __movq);
+       round3(32+4, RR, RL, load_next_key, __movq);
+       round3(32+5, RL, RR, load_next_key, __movq);
+       round3(32+6, RR, RL, load_next_key, __movq);
+       round3(32+7, RL, RR, load_next_key, __movq);
+       round3(32+8, RR, RL, load_next_key, __movq);
+       round3(32+9, RL, RR, load_next_key, __movq);
+       round3(32+10, RR, RL, load_next_key, __movq);
+       round3(32+11, RL, RR, load_next_key, __movq);
+       round3(32+12, RR, RL, load_next_key, __movq);
+       round3(32+13, RL, RR, load_next_key, __movq);
+       round3(32+14, RR, RL, load_next_key, __movq);
+       round3(32+15, RL, RR, dummy2, dummy2);
+
+       final_permutation3(RR, RL);
+
+       bswapl RR0d;
+       bswapl RL0d;
+       bswapl RR1d;
+       bswapl RL1d;
+       bswapl RR2d;
+       bswapl RL2d;
+
+       movl RR0d, 0 * 4(%rsi);
+       movl RL0d, 1 * 4(%rsi);
+       movl RR1d, 2 * 4(%rsi);
+       movl RL1d, 3 * 4(%rsi);
+       movl RR2d, 4 * 4(%rsi);
+       movl RL2d, 5 * 4(%rsi);
+
+       popq %r15;
+       popq %r14;
+       popq %r13;
+       popq %r12;
+       popq %rbx;
+       popq %rbp;
+
+       ret;
+ENDPROC(des3_ede_x86_64_crypt_blk_3way)
+
+.data
+.align 16
+.L_s1:
+       .quad 0x0010100001010400, 0x0000000000000000
+       .quad 0x0000100000010000, 0x0010100001010404
+       .quad 0x0010100001010004, 0x0000100000010404
+       .quad 0x0000000000000004, 0x0000100000010000
+       .quad 0x0000000000000400, 0x0010100001010400
+       .quad 0x0010100001010404, 0x0000000000000400
+       .quad 0x0010000001000404, 0x0010100001010004
+       .quad 0x0010000001000000, 0x0000000000000004
+       .quad 0x0000000000000404, 0x0010000001000400
+       .quad 0x0010000001000400, 0x0000100000010400
+       .quad 0x0000100000010400, 0x0010100001010000
+       .quad 0x0010100001010000, 0x0010000001000404
+       .quad 0x0000100000010004, 0x0010000001000004
+       .quad 0x0010000001000004, 0x0000100000010004
+       .quad 0x0000000000000000, 0x0000000000000404
+       .quad 0x0000100000010404, 0x0010000001000000
+       .quad 0x0000100000010000, 0x0010100001010404
+       .quad 0x0000000000000004, 0x0010100001010000
+       .quad 0x0010100001010400, 0x0010000001000000
+       .quad 0x0010000001000000, 0x0000000000000400
+       .quad 0x0010100001010004, 0x0000100000010000
+       .quad 0x0000100000010400, 0x0010000001000004
+       .quad 0x0000000000000400, 0x0000000000000004
+       .quad 0x0010000001000404, 0x0000100000010404
+       .quad 0x0010100001010404, 0x0000100000010004
+       .quad 0x0010100001010000, 0x0010000001000404
+       .quad 0x0010000001000004, 0x0000000000000404
+       .quad 0x0000100000010404, 0x0010100001010400
+       .quad 0x0000000000000404, 0x0010000001000400
+       .quad 0x0010000001000400, 0x0000000000000000
+       .quad 0x0000100000010004, 0x0000100000010400
+       .quad 0x0000000000000000, 0x0010100001010004
+.L_s2:
+       .quad 0x0801080200100020, 0x0800080000000000
+       .quad 0x0000080000000000, 0x0001080200100020
+       .quad 0x0001000000100000, 0x0000000200000020
+       .quad 0x0801000200100020, 0x0800080200000020
+       .quad 0x0800000200000020, 0x0801080200100020
+       .quad 0x0801080000100000, 0x0800000000000000
+       .quad 0x0800080000000000, 0x0001000000100000
+       .quad 0x0000000200000020, 0x0801000200100020
+       .quad 0x0001080000100000, 0x0001000200100020
+       .quad 0x0800080200000020, 0x0000000000000000
+       .quad 0x0800000000000000, 0x0000080000000000
+       .quad 0x0001080200100020, 0x0801000000100000
+       .quad 0x0001000200100020, 0x0800000200000020
+       .quad 0x0000000000000000, 0x0001080000100000
+       .quad 0x0000080200000020, 0x0801080000100000
+       .quad 0x0801000000100000, 0x0000080200000020
+       .quad 0x0000000000000000, 0x0001080200100020
+       .quad 0x0801000200100020, 0x0001000000100000
+       .quad 0x0800080200000020, 0x0801000000100000
+       .quad 0x0801080000100000, 0x0000080000000000
+       .quad 0x0801000000100000, 0x0800080000000000
+       .quad 0x0000000200000020, 0x0801080200100020
+       .quad 0x0001080200100020, 0x0000000200000020
+       .quad 0x0000080000000000, 0x0800000000000000
+       .quad 0x0000080200000020, 0x0801080000100000
+       .quad 0x0001000000100000, 0x0800000200000020
+       .quad 0x0001000200100020, 0x0800080200000020
+       .quad 0x0800000200000020, 0x0001000200100020
+       .quad 0x0001080000100000, 0x0000000000000000
+       .quad 0x0800080000000000, 0x0000080200000020
+       .quad 0x0800000000000000, 0x0801000200100020
+       .quad 0x0801080200100020, 0x0001080000100000
+.L_s3:
+       .quad 0x0000002000000208, 0x0000202008020200
+       .quad 0x0000000000000000, 0x0000200008020008
+       .quad 0x0000002008000200, 0x0000000000000000
+       .quad 0x0000202000020208, 0x0000002008000200
+       .quad 0x0000200000020008, 0x0000000008000008
+       .quad 0x0000000008000008, 0x0000200000020000
+       .quad 0x0000202008020208, 0x0000200000020008
+       .quad 0x0000200008020000, 0x0000002000000208
+       .quad 0x0000000008000000, 0x0000000000000008
+       .quad 0x0000202008020200, 0x0000002000000200
+       .quad 0x0000202000020200, 0x0000200008020000
+       .quad 0x0000200008020008, 0x0000202000020208
+       .quad 0x0000002008000208, 0x0000202000020200
+       .quad 0x0000200000020000, 0x0000002008000208
+       .quad 0x0000000000000008, 0x0000202008020208
+       .quad 0x0000002000000200, 0x0000000008000000
+       .quad 0x0000202008020200, 0x0000000008000000
+       .quad 0x0000200000020008, 0x0000002000000208
+       .quad 0x0000200000020000, 0x0000202008020200
+       .quad 0x0000002008000200, 0x0000000000000000
+       .quad 0x0000002000000200, 0x0000200000020008
+       .quad 0x0000202008020208, 0x0000002008000200
+       .quad 0x0000000008000008, 0x0000002000000200
+       .quad 0x0000000000000000, 0x0000200008020008
+       .quad 0x0000002008000208, 0x0000200000020000
+       .quad 0x0000000008000000, 0x0000202008020208
+       .quad 0x0000000000000008, 0x0000202000020208
+       .quad 0x0000202000020200, 0x0000000008000008
+       .quad 0x0000200008020000, 0x0000002008000208
+       .quad 0x0000002000000208, 0x0000200008020000
+       .quad 0x0000202000020208, 0x0000000000000008
+       .quad 0x0000200008020008, 0x0000202000020200
+.L_s4:
+       .quad 0x1008020000002001, 0x1000020800002001
+       .quad 0x1000020800002001, 0x0000000800000000
+       .quad 0x0008020800002000, 0x1008000800000001
+       .quad 0x1008000000000001, 0x1000020000002001
+       .quad 0x0000000000000000, 0x0008020000002000
+       .quad 0x0008020000002000, 0x1008020800002001
+       .quad 0x1000000800000001, 0x0000000000000000
+       .quad 0x0008000800000000, 0x1008000000000001
+       .quad 0x1000000000000001, 0x0000020000002000
+       .quad 0x0008000000000000, 0x1008020000002001
+       .quad 0x0000000800000000, 0x0008000000000000
+       .quad 0x1000020000002001, 0x0000020800002000
+       .quad 0x1008000800000001, 0x1000000000000001
+       .quad 0x0000020800002000, 0x0008000800000000
+       .quad 0x0000020000002000, 0x0008020800002000
+       .quad 0x1008020800002001, 0x1000000800000001
+       .quad 0x0008000800000000, 0x1008000000000001
+       .quad 0x0008020000002000, 0x1008020800002001
+       .quad 0x1000000800000001, 0x0000000000000000
+       .quad 0x0000000000000000, 0x0008020000002000
+       .quad 0x0000020800002000, 0x0008000800000000
+       .quad 0x1008000800000001, 0x1000000000000001
+       .quad 0x1008020000002001, 0x1000020800002001
+       .quad 0x1000020800002001, 0x0000000800000000
+       .quad 0x1008020800002001, 0x1000000800000001
+       .quad 0x1000000000000001, 0x0000020000002000
+       .quad 0x1008000000000001, 0x1000020000002001
+       .quad 0x0008020800002000, 0x1008000800000001
+       .quad 0x1000020000002001, 0x0000020800002000
+       .quad 0x0008000000000000, 0x1008020000002001
+       .quad 0x0000000800000000, 0x0008000000000000
+       .quad 0x0000020000002000, 0x0008020800002000
+.L_s5:
+       .quad 0x0000001000000100, 0x0020001002080100
+       .quad 0x0020000002080000, 0x0420001002000100
+       .quad 0x0000000000080000, 0x0000001000000100
+       .quad 0x0400000000000000, 0x0020000002080000
+       .quad 0x0400001000080100, 0x0000000000080000
+       .quad 0x0020001002000100, 0x0400001000080100
+       .quad 0x0420001002000100, 0x0420000002080000
+       .quad 0x0000001000080100, 0x0400000000000000
+       .quad 0x0020000002000000, 0x0400000000080000
+       .quad 0x0400000000080000, 0x0000000000000000
+       .quad 0x0400001000000100, 0x0420001002080100
+       .quad 0x0420001002080100, 0x0020001002000100
+       .quad 0x0420000002080000, 0x0400001000000100
+       .quad 0x0000000000000000, 0x0420000002000000
+       .quad 0x0020001002080100, 0x0020000002000000
+       .quad 0x0420000002000000, 0x0000001000080100
+       .quad 0x0000000000080000, 0x0420001002000100
+       .quad 0x0000001000000100, 0x0020000002000000
+       .quad 0x0400000000000000, 0x0020000002080000
+       .quad 0x0420001002000100, 0x0400001000080100
+       .quad 0x0020001002000100, 0x0400000000000000
+       .quad 0x0420000002080000, 0x0020001002080100
+       .quad 0x0400001000080100, 0x0000001000000100
+       .quad 0x0020000002000000, 0x0420000002080000
+       .quad 0x0420001002080100, 0x0000001000080100
+       .quad 0x0420000002000000, 0x0420001002080100
+       .quad 0x0020000002080000, 0x0000000000000000
+       .quad 0x0400000000080000, 0x0420000002000000
+       .quad 0x0000001000080100, 0x0020001002000100
+       .quad 0x0400001000000100, 0x0000000000080000
+       .quad 0x0000000000000000, 0x0400000000080000
+       .quad 0x0020001002080100, 0x0400001000000100
+.L_s6:
+       .quad 0x0200000120000010, 0x0204000020000000
+       .quad 0x0000040000000000, 0x0204040120000010
+       .quad 0x0204000020000000, 0x0000000100000010
+       .quad 0x0204040120000010, 0x0004000000000000
+       .quad 0x0200040020000000, 0x0004040100000010
+       .quad 0x0004000000000000, 0x0200000120000010
+       .quad 0x0004000100000010, 0x0200040020000000
+       .quad 0x0200000020000000, 0x0000040100000010
+       .quad 0x0000000000000000, 0x0004000100000010
+       .quad 0x0200040120000010, 0x0000040000000000
+       .quad 0x0004040000000000, 0x0200040120000010
+       .quad 0x0000000100000010, 0x0204000120000010
+       .quad 0x0204000120000010, 0x0000000000000000
+       .quad 0x0004040100000010, 0x0204040020000000
+       .quad 0x0000040100000010, 0x0004040000000000
+       .quad 0x0204040020000000, 0x0200000020000000
+       .quad 0x0200040020000000, 0x0000000100000010
+       .quad 0x0204000120000010, 0x0004040000000000
+       .quad 0x0204040120000010, 0x0004000000000000
+       .quad 0x0000040100000010, 0x0200000120000010
+       .quad 0x0004000000000000, 0x0200040020000000
+       .quad 0x0200000020000000, 0x0000040100000010
+       .quad 0x0200000120000010, 0x0204040120000010
+       .quad 0x0004040000000000, 0x0204000020000000
+       .quad 0x0004040100000010, 0x0204040020000000
+       .quad 0x0000000000000000, 0x0204000120000010
+       .quad 0x0000000100000010, 0x0000040000000000
+       .quad 0x0204000020000000, 0x0004040100000010
+       .quad 0x0000040000000000, 0x0004000100000010
+       .quad 0x0200040120000010, 0x0000000000000000
+       .quad 0x0204040020000000, 0x0200000020000000
+       .quad 0x0004000100000010, 0x0200040120000010
+.L_s7:
+       .quad 0x0002000000200000, 0x2002000004200002
+       .quad 0x2000000004000802, 0x0000000000000000
+       .quad 0x0000000000000800, 0x2000000004000802
+       .quad 0x2002000000200802, 0x0002000004200800
+       .quad 0x2002000004200802, 0x0002000000200000
+       .quad 0x0000000000000000, 0x2000000004000002
+       .quad 0x2000000000000002, 0x0000000004000000
+       .quad 0x2002000004200002, 0x2000000000000802
+       .quad 0x0000000004000800, 0x2002000000200802
+       .quad 0x2002000000200002, 0x0000000004000800
+       .quad 0x2000000004000002, 0x0002000004200000
+       .quad 0x0002000004200800, 0x2002000000200002
+       .quad 0x0002000004200000, 0x0000000000000800
+       .quad 0x2000000000000802, 0x2002000004200802
+       .quad 0x0002000000200800, 0x2000000000000002
+       .quad 0x0000000004000000, 0x0002000000200800
+       .quad 0x0000000004000000, 0x0002000000200800
+       .quad 0x0002000000200000, 0x2000000004000802
+       .quad 0x2000000004000802, 0x2002000004200002
+       .quad 0x2002000004200002, 0x2000000000000002
+       .quad 0x2002000000200002, 0x0000000004000000
+       .quad 0x0000000004000800, 0x0002000000200000
+       .quad 0x0002000004200800, 0x2000000000000802
+       .quad 0x2002000000200802, 0x0002000004200800
+       .quad 0x2000000000000802, 0x2000000004000002
+       .quad 0x2002000004200802, 0x0002000004200000
+       .quad 0x0002000000200800, 0x0000000000000000
+       .quad 0x2000000000000002, 0x2002000004200802
+       .quad 0x0000000000000000, 0x2002000000200802
+       .quad 0x0002000004200000, 0x0000000000000800
+       .quad 0x2000000004000002, 0x0000000004000800
+       .quad 0x0000000000000800, 0x2002000000200002
+.L_s8:
+       .quad 0x0100010410001000, 0x0000010000001000
+       .quad 0x0000000000040000, 0x0100010410041000
+       .quad 0x0100000010000000, 0x0100010410001000
+       .quad 0x0000000400000000, 0x0100000010000000
+       .quad 0x0000000400040000, 0x0100000010040000
+       .quad 0x0100010410041000, 0x0000010000041000
+       .quad 0x0100010010041000, 0x0000010400041000
+       .quad 0x0000010000001000, 0x0000000400000000
+       .quad 0x0100000010040000, 0x0100000410000000
+       .quad 0x0100010010001000, 0x0000010400001000
+       .quad 0x0000010000041000, 0x0000000400040000
+       .quad 0x0100000410040000, 0x0100010010041000
+       .quad 0x0000010400001000, 0x0000000000000000
+       .quad 0x0000000000000000, 0x0100000410040000
+       .quad 0x0100000410000000, 0x0100010010001000
+       .quad 0x0000010400041000, 0x0000000000040000
+       .quad 0x0000010400041000, 0x0000000000040000
+       .quad 0x0100010010041000, 0x0000010000001000
+       .quad 0x0000000400000000, 0x0100000410040000
+       .quad 0x0000010000001000, 0x0000010400041000
+       .quad 0x0100010010001000, 0x0000000400000000
+       .quad 0x0100000410000000, 0x0100000010040000
+       .quad 0x0100000410040000, 0x0100000010000000
+       .quad 0x0000000000040000, 0x0100010410001000
+       .quad 0x0000000000000000, 0x0100010410041000
+       .quad 0x0000000400040000, 0x0100000410000000
+       .quad 0x0100000010040000, 0x0100010010001000
+       .quad 0x0100010410001000, 0x0000000000000000
+       .quad 0x0100010410041000, 0x0000010000041000
+       .quad 0x0000010000041000, 0x0000010400001000
+       .quad 0x0000010400001000, 0x0000000400040000
+       .quad 0x0100000010000000, 0x0100010010041000
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c
new file mode 100644 (file)
index 0000000..0e9c066
--- /dev/null
@@ -0,0 +1,509 @@
+/*
+ * Glue Code for assembler optimized version of 3DES
+ *
+ * Copyright Â© 2014 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
+ *
+ * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
+ *   Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
+ * CTR part based on code (crypto/ctr.c) by:
+ *   (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/processor.h>
+#include <crypto/des.h>
+#include <linux/crypto.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+
+struct des3_ede_x86_ctx {
+       u32 enc_expkey[DES3_EDE_EXPKEY_WORDS];
+       u32 dec_expkey[DES3_EDE_EXPKEY_WORDS];
+};
+
+/* regular block cipher functions */
+asmlinkage void des3_ede_x86_64_crypt_blk(const u32 *expkey, u8 *dst,
+                                         const u8 *src);
+
+/* 3-way parallel cipher functions */
+asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst,
+                                              const u8 *src);
+
+static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
+                                   const u8 *src)
+{
+       u32 *enc_ctx = ctx->enc_expkey;
+
+       des3_ede_x86_64_crypt_blk(enc_ctx, dst, src);
+}
+
+static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst,
+                                   const u8 *src)
+{
+       u32 *dec_ctx = ctx->dec_expkey;
+
+       des3_ede_x86_64_crypt_blk(dec_ctx, dst, src);
+}
+
+static inline void des3_ede_enc_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
+                                        const u8 *src)
+{
+       u32 *enc_ctx = ctx->enc_expkey;
+
+       des3_ede_x86_64_crypt_blk_3way(enc_ctx, dst, src);
+}
+
+static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst,
+                                        const u8 *src)
+{
+       u32 *dec_ctx = ctx->dec_expkey;
+
+       des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src);
+}
+
+static void des3_ede_x86_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+       des3_ede_enc_blk(crypto_tfm_ctx(tfm), dst, src);
+}
+
+static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
+{
+       des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src);
+}
+
+static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
+                    const u32 *expkey)
+{
+       unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+       unsigned int nbytes;
+       int err;
+
+       err = blkcipher_walk_virt(desc, walk);
+
+       while ((nbytes = walk->nbytes)) {
+               u8 *wsrc = walk->src.virt.addr;
+               u8 *wdst = walk->dst.virt.addr;
+
+               /* Process four block batch */
+               if (nbytes >= bsize * 3) {
+                       do {
+                               des3_ede_x86_64_crypt_blk_3way(expkey, wdst,
+                                                              wsrc);
+
+                               wsrc += bsize * 3;
+                               wdst += bsize * 3;
+                               nbytes -= bsize * 3;
+                       } while (nbytes >= bsize * 3);
+
+                       if (nbytes < bsize)
+                               goto done;
+               }
+
+               /* Handle leftovers */
+               do {
+                       des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc);
+
+                       wsrc += bsize;
+                       wdst += bsize;
+                       nbytes -= bsize;
+               } while (nbytes >= bsize);
+
+done:
+               err = blkcipher_walk_done(desc, walk, nbytes);
+       }
+
+       return err;
+}
+
+static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                      struct scatterlist *src, unsigned int nbytes)
+{
+       struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       return ecb_crypt(desc, &walk, ctx->enc_expkey);
+}
+
+static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                      struct scatterlist *src, unsigned int nbytes)
+{
+       struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       return ecb_crypt(desc, &walk, ctx->dec_expkey);
+}
+
+static unsigned int __cbc_encrypt(struct blkcipher_desc *desc,
+                                 struct blkcipher_walk *walk)
+{
+       struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+       unsigned int nbytes = walk->nbytes;
+       u64 *src = (u64 *)walk->src.virt.addr;
+       u64 *dst = (u64 *)walk->dst.virt.addr;
+       u64 *iv = (u64 *)walk->iv;
+
+       do {
+               *dst = *src ^ *iv;
+               des3_ede_enc_blk(ctx, (u8 *)dst, (u8 *)dst);
+               iv = dst;
+
+               src += 1;
+               dst += 1;
+               nbytes -= bsize;
+       } while (nbytes >= bsize);
+
+       *(u64 *)walk->iv = *iv;
+       return nbytes;
+}
+
+static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                      struct scatterlist *src, unsigned int nbytes)
+{
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while ((nbytes = walk.nbytes)) {
+               nbytes = __cbc_encrypt(desc, &walk);
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       return err;
+}
+
+static unsigned int __cbc_decrypt(struct blkcipher_desc *desc,
+                                 struct blkcipher_walk *walk)
+{
+       struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+       unsigned int nbytes = walk->nbytes;
+       u64 *src = (u64 *)walk->src.virt.addr;
+       u64 *dst = (u64 *)walk->dst.virt.addr;
+       u64 ivs[3 - 1];
+       u64 last_iv;
+
+       /* Start of the last block. */
+       src += nbytes / bsize - 1;
+       dst += nbytes / bsize - 1;
+
+       last_iv = *src;
+
+       /* Process four block batch */
+       if (nbytes >= bsize * 3) {
+               do {
+                       nbytes -= bsize * 3 - bsize;
+                       src -= 3 - 1;
+                       dst -= 3 - 1;
+
+                       ivs[0] = src[0];
+                       ivs[1] = src[1];
+
+                       des3_ede_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src);
+
+                       dst[1] ^= ivs[0];
+                       dst[2] ^= ivs[1];
+
+                       nbytes -= bsize;
+                       if (nbytes < bsize)
+                               goto done;
+
+                       *dst ^= *(src - 1);
+                       src -= 1;
+                       dst -= 1;
+               } while (nbytes >= bsize * 3);
+       }
+
+       /* Handle leftovers */
+       for (;;) {
+               des3_ede_dec_blk(ctx, (u8 *)dst, (u8 *)src);
+
+               nbytes -= bsize;
+               if (nbytes < bsize)
+                       break;
+
+               *dst ^= *(src - 1);
+               src -= 1;
+               dst -= 1;
+       }
+
+done:
+       *dst ^= *(u64 *)walk->iv;
+       *(u64 *)walk->iv = last_iv;
+
+       return nbytes;
+}
+
+static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                      struct scatterlist *src, unsigned int nbytes)
+{
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while ((nbytes = walk.nbytes)) {
+               nbytes = __cbc_decrypt(desc, &walk);
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       return err;
+}
+
+static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx,
+                           struct blkcipher_walk *walk)
+{
+       u8 *ctrblk = walk->iv;
+       u8 keystream[DES3_EDE_BLOCK_SIZE];
+       u8 *src = walk->src.virt.addr;
+       u8 *dst = walk->dst.virt.addr;
+       unsigned int nbytes = walk->nbytes;
+
+       des3_ede_enc_blk(ctx, keystream, ctrblk);
+       crypto_xor(keystream, src, nbytes);
+       memcpy(dst, keystream, nbytes);
+
+       crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE);
+}
+
+static unsigned int __ctr_crypt(struct blkcipher_desc *desc,
+                               struct blkcipher_walk *walk)
+{
+       struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       unsigned int bsize = DES3_EDE_BLOCK_SIZE;
+       unsigned int nbytes = walk->nbytes;
+       __be64 *src = (__be64 *)walk->src.virt.addr;
+       __be64 *dst = (__be64 *)walk->dst.virt.addr;
+       u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv);
+       __be64 ctrblocks[3];
+
+       /* Process four block batch */
+       if (nbytes >= bsize * 3) {
+               do {
+                       /* create ctrblks for parallel encrypt */
+                       ctrblocks[0] = cpu_to_be64(ctrblk++);
+                       ctrblocks[1] = cpu_to_be64(ctrblk++);
+                       ctrblocks[2] = cpu_to_be64(ctrblk++);
+
+                       des3_ede_enc_blk_3way(ctx, (u8 *)ctrblocks,
+                                             (u8 *)ctrblocks);
+
+                       dst[0] = src[0] ^ ctrblocks[0];
+                       dst[1] = src[1] ^ ctrblocks[1];
+                       dst[2] = src[2] ^ ctrblocks[2];
+
+                       src += 3;
+                       dst += 3;
+               } while ((nbytes -= bsize * 3) >= bsize * 3);
+
+               if (nbytes < bsize)
+                       goto done;
+       }
+
+       /* Handle leftovers */
+       do {
+               ctrblocks[0] = cpu_to_be64(ctrblk++);
+
+               des3_ede_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks);
+
+               dst[0] = src[0] ^ ctrblocks[0];
+
+               src += 1;
+               dst += 1;
+       } while ((nbytes -= bsize) >= bsize);
+
+done:
+       *(__be64 *)walk->iv = cpu_to_be64(ctrblk);
+       return nbytes;
+}
+
+static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+                    struct scatterlist *src, unsigned int nbytes)
+{
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, DES3_EDE_BLOCK_SIZE);
+
+       while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) {
+               nbytes = __ctr_crypt(desc, &walk);
+               err = blkcipher_walk_done(desc, &walk, nbytes);
+       }
+
+       if (walk.nbytes) {
+               ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk);
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+
+       return err;
+}
+
+static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key,
+                              unsigned int keylen)
+{
+       struct des3_ede_x86_ctx *ctx = crypto_tfm_ctx(tfm);
+       u32 i, j, tmp;
+       int err;
+
+       /* Generate encryption context using generic implementation. */
+       err = __des3_ede_setkey(ctx->enc_expkey, &tfm->crt_flags, key, keylen);
+       if (err < 0)
+               return err;
+
+       /* Fix encryption context for this implementation and form decryption
+        * context. */
+       j = DES3_EDE_EXPKEY_WORDS - 2;
+       for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) {
+               tmp = ror32(ctx->enc_expkey[i + 1], 4);
+               ctx->enc_expkey[i + 1] = tmp;
+
+               ctx->dec_expkey[j + 0] = ctx->enc_expkey[i + 0];
+               ctx->dec_expkey[j + 1] = tmp;
+       }
+
+       return 0;
+}
+
+static struct crypto_alg des3_ede_algs[4] = { {
+       .cra_name               = "des3_ede",
+       .cra_driver_name        = "des3_ede-asm",
+       .cra_priority           = 200,
+       .cra_flags              = CRYPTO_ALG_TYPE_CIPHER,
+       .cra_blocksize          = DES3_EDE_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct des3_ede_x86_ctx),
+       .cra_alignmask          = 0,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .cipher = {
+                       .cia_min_keysize        = DES3_EDE_KEY_SIZE,
+                       .cia_max_keysize        = DES3_EDE_KEY_SIZE,
+                       .cia_setkey             = des3_ede_x86_setkey,
+                       .cia_encrypt            = des3_ede_x86_encrypt,
+                       .cia_decrypt            = des3_ede_x86_decrypt,
+               }
+       }
+}, {
+       .cra_name               = "ecb(des3_ede)",
+       .cra_driver_name        = "ecb-des3_ede-asm",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = DES3_EDE_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct des3_ede_x86_ctx),
+       .cra_alignmask          = 0,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = DES3_EDE_KEY_SIZE,
+                       .max_keysize    = DES3_EDE_KEY_SIZE,
+                       .setkey         = des3_ede_x86_setkey,
+                       .encrypt        = ecb_encrypt,
+                       .decrypt        = ecb_decrypt,
+               },
+       },
+}, {
+       .cra_name               = "cbc(des3_ede)",
+       .cra_driver_name        = "cbc-des3_ede-asm",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = DES3_EDE_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct des3_ede_x86_ctx),
+       .cra_alignmask          = 0,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = DES3_EDE_KEY_SIZE,
+                       .max_keysize    = DES3_EDE_KEY_SIZE,
+                       .ivsize         = DES3_EDE_BLOCK_SIZE,
+                       .setkey         = des3_ede_x86_setkey,
+                       .encrypt        = cbc_encrypt,
+                       .decrypt        = cbc_decrypt,
+               },
+       },
+}, {
+       .cra_name               = "ctr(des3_ede)",
+       .cra_driver_name        = "ctr-des3_ede-asm",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = 1,
+       .cra_ctxsize            = sizeof(struct des3_ede_x86_ctx),
+       .cra_alignmask          = 0,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_u = {
+               .blkcipher = {
+                       .min_keysize    = DES3_EDE_KEY_SIZE,
+                       .max_keysize    = DES3_EDE_KEY_SIZE,
+                       .ivsize         = DES3_EDE_BLOCK_SIZE,
+                       .setkey         = des3_ede_x86_setkey,
+                       .encrypt        = ctr_crypt,
+                       .decrypt        = ctr_crypt,
+               },
+       },
+} };
+
+static bool is_blacklisted_cpu(void)
+{
+       if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
+               return false;
+
+       if (boot_cpu_data.x86 == 0x0f) {
+               /*
+                * On Pentium 4, des3_ede-x86_64 is slower than generic C
+                * implementation because use of 64bit rotates (which are really
+                * slow on P4). Therefore blacklist P4s.
+                */
+               return true;
+       }
+
+       return false;
+}
+
+static int force;
+module_param(force, int, 0);
+MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist");
+
+static int __init des3_ede_x86_init(void)
+{
+       if (!force && is_blacklisted_cpu()) {
+               pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n");
+               return -ENODEV;
+       }
+
+       return crypto_register_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs));
+}
+
+static void __exit des3_ede_x86_fini(void)
+{
+       crypto_unregister_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs));
+}
+
+module_init(des3_ede_x86_init);
+module_exit(des3_ede_x86_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized");
+MODULE_ALIAS("des3_ede");
+MODULE_ALIAS("des3_ede-asm");
+MODULE_ALIAS("des");
+MODULE_ALIAS("des-asm");
+MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>");
index f30cd10..8626b03 100644 (file)
@@ -141,7 +141,7 @@ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out)
 
        /* save number of bits */
        bits[1] = cpu_to_be64(sctx->count[0] << 3);
-       bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61;
+       bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
 
        /* Pad out to 112 mod 128 and append length */
        index = sctx->count[0] & 0x7f;
index 5c7198c..0f4460b 100644 (file)
@@ -99,7 +99,7 @@
 #if defined(CONFIG_X86_PPRO_FENCE)
 
 /*
- * For either of these options x86 doesn't have a strong TSO memory
+ * For this option x86 doesn't have a strong TSO memory
  * model and we should fall back to full barriers.
  */
 
index d47786a..99c105d 100644 (file)
@@ -4,6 +4,8 @@
 #include <linux/compiler.h>
 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
 
+#define __HAVE_ARCH_CMPXCHG 1
+
 /*
  * Non-existant functions to indicate usage errors at link time
  * (or compile-time if the compiler implements __compiletime_error().
@@ -143,7 +145,6 @@ extern void __add_wrong_size(void)
 # include <asm/cmpxchg_64.h>
 #endif
 
-#ifdef __HAVE_ARCH_CMPXCHG
 #define cmpxchg(ptr, old, new)                                         \
        __cmpxchg(ptr, old, new, sizeof(*(ptr)))
 
@@ -152,7 +153,6 @@ extern void __add_wrong_size(void)
 
 #define cmpxchg_local(ptr, old, new)                                   \
        __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
-#endif
 
 /*
  * xadd() adds "inc" to "*ptr" and atomically returns the previous
index f8bf2ee..f7e1429 100644 (file)
@@ -34,8 +34,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 value)
                     : "memory");
 }
 
-#define __HAVE_ARCH_CMPXCHG 1
-
 #ifdef CONFIG_X86_CMPXCHG64
 #define cmpxchg64(ptr, o, n)                                           \
        ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
index 614be87..1af9469 100644 (file)
@@ -6,8 +6,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
        *ptr = val;
 }
 
-#define __HAVE_ARCH_CMPXCHG 1
-
 #define cmpxchg64(ptr, o, n)                                           \
 ({                                                                     \
        BUILD_BUG_ON(sizeof(*(ptr)) != 8);                              \
index 0525a8b..e1f7fec 100644 (file)
@@ -68,6 +68,8 @@ struct dyn_arch_ftrace {
 
 int ftrace_int3_handler(struct pt_regs *regs);
 
+#define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR
+
 #endif /*  CONFIG_DYNAMIC_FTRACE */
 #endif /* __ASSEMBLY__ */
 #endif /* CONFIG_FUNCTION_TRACER */
index cb6cfcd..a80cbb8 100644 (file)
@@ -43,7 +43,7 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
 extern void init_ISA_irqs(void);
 
 #ifdef CONFIG_X86_LOCAL_APIC
-void arch_trigger_all_cpu_backtrace(void);
+void arch_trigger_all_cpu_backtrace(bool);
 #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 #endif
 
index bba3cf8..0a8b519 100644 (file)
@@ -129,7 +129,7 @@ static inline notrace unsigned long arch_local_irq_save(void)
 
 #define PARAVIRT_ADJUST_EXCEPTION_FRAME        /*  */
 
-#define INTERRUPT_RETURN       iretq
+#define INTERRUPT_RETURN       jmp native_iret
 #define USERGS_SYSRET64                                \
        swapgs;                                 \
        sysretq;
index a04fe4e..eb18117 100644 (file)
@@ -37,6 +37,7 @@ struct x86_instruction_info {
        u8  modrm_reg;          /* index of register used               */
        u8  modrm_rm;           /* rm part of modrm                     */
        u64 src_val;            /* value of source operand              */
+       u64 dst_val;            /* value of destination operand         */
        u8  src_bytes;          /* size of source operand               */
        u8  dst_bytes;          /* size of destination operand          */
        u8  ad_bytes;           /* size of src/dst address              */
@@ -194,6 +195,7 @@ struct x86_emulate_ops {
        int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
        int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
        int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
+       int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc);
        int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
        void (*halt)(struct x86_emulate_ctxt *ctxt);
        void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
@@ -231,7 +233,7 @@ struct operand {
        union {
                unsigned long val;
                u64 val64;
-               char valptr[sizeof(unsigned long) + 2];
+               char valptr[sizeof(sse128_t)];
                sse128_t vec_val;
                u64 mm_val;
                void *data;
@@ -240,8 +242,8 @@ struct operand {
 
 struct fetch_cache {
        u8 data[15];
-       unsigned long start;
-       unsigned long end;
+       u8 *ptr;
+       u8 *end;
 };
 
 struct read_cache {
@@ -286,30 +288,36 @@ struct x86_emulate_ctxt {
        u8 opcode_len;
        u8 b;
        u8 intercept;
-       u8 lock_prefix;
-       u8 rep_prefix;
        u8 op_bytes;
        u8 ad_bytes;
-       u8 rex_prefix;
        struct operand src;
        struct operand src2;
        struct operand dst;
-       bool has_seg_override;
-       u8 seg_override;
-       u64 d;
        int (*execute)(struct x86_emulate_ctxt *ctxt);
        int (*check_perm)(struct x86_emulate_ctxt *ctxt);
+       /*
+        * The following six fields are cleared together,
+        * the rest are initialized unconditionally in x86_decode_insn
+        * or elsewhere
+        */
+       bool rip_relative;
+       u8 rex_prefix;
+       u8 lock_prefix;
+       u8 rep_prefix;
+       /* bitmaps of registers in _regs[] that can be read */
+       u32 regs_valid;
+       /* bitmaps of registers in _regs[] that have been written */
+       u32 regs_dirty;
        /* modrm */
        u8 modrm;
        u8 modrm_mod;
        u8 modrm_reg;
        u8 modrm_rm;
        u8 modrm_seg;
-       bool rip_relative;
+       u8 seg_override;
+       u64 d;
        unsigned long _eip;
        struct operand memop;
-       u32 regs_valid;  /* bitmaps of registers in _regs[] that can be read */
-       u32 regs_dirty;  /* bitmaps of registers in _regs[] that have been written */
        /* Fields above regs are cleared together. */
        unsigned long _regs[NR_VCPU_REGS];
        struct operand *memopp;
@@ -407,6 +415,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
 #define EMULATION_OK 0
 #define EMULATION_RESTART 1
 #define EMULATION_INTERCEPTED 2
+void init_decode_cache(struct x86_emulate_ctxt *ctxt);
 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
 int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
                         u16 tss_selector, int idt_index, int reason,
index 4931415..5724601 100644 (file)
@@ -95,7 +95,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 #define KVM_REFILL_PAGES 25
 #define KVM_MAX_CPUID_ENTRIES 80
 #define KVM_NR_FIXED_MTRR_REGION 88
-#define KVM_NR_VAR_MTRR 8
+#define KVM_NR_VAR_MTRR 10
 
 #define ASYNC_PF_PER_VCPU 64
 
@@ -152,14 +152,16 @@ enum {
 
 #define DR6_BD         (1 << 13)
 #define DR6_BS         (1 << 14)
-#define DR6_FIXED_1    0xffff0ff0
-#define DR6_VOLATILE   0x0000e00f
+#define DR6_RTM                (1 << 16)
+#define DR6_FIXED_1    0xfffe0ff0
+#define DR6_INIT       0xffff0ff0
+#define DR6_VOLATILE   0x0001e00f
 
 #define DR7_BP_EN_MASK 0x000000ff
 #define DR7_GE         (1 << 9)
 #define DR7_GD         (1 << 13)
 #define DR7_FIXED_1    0x00000400
-#define DR7_VOLATILE   0xffff23ff
+#define DR7_VOLATILE   0xffff2bff
 
 /* apic attention bits */
 #define KVM_APIC_CHECK_VAPIC   0
@@ -448,7 +450,7 @@ struct kvm_vcpu_arch {
        u64 tsc_offset_adjustment;
        u64 this_tsc_nsec;
        u64 this_tsc_write;
-       u this_tsc_generation;
+       u64 this_tsc_generation;
        bool tsc_catchup;
        bool tsc_always_catchup;
        s8 virtual_tsc_shift;
@@ -461,7 +463,7 @@ struct kvm_vcpu_arch {
        bool nmi_injected;    /* Trying to inject an NMI this entry */
 
        struct mtrr_state_type mtrr_state;
-       u32 pat;
+       u64 pat;
 
        unsigned switch_db_regs;
        unsigned long db[KVM_NR_DB_REGS];
@@ -591,7 +593,7 @@ struct kvm_arch {
        u64 cur_tsc_nsec;
        u64 cur_tsc_write;
        u64 cur_tsc_offset;
-       u cur_tsc_generation;
+       u64 cur_tsc_generation;
        int nr_vcpus_matched_tsc;
 
        spinlock_t pvclock_gtod_sync_lock;
@@ -717,7 +719,7 @@ struct kvm_x86_ops {
        int (*handle_exit)(struct kvm_vcpu *vcpu);
        void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
        void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
-       u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
+       u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
        void (*patch_hypercall)(struct kvm_vcpu *vcpu,
                                unsigned char *hypercall_addr);
        void (*set_irq)(struct kvm_vcpu *vcpu);
@@ -1070,6 +1072,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
 bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
+int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc);
 int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
 void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
 void kvm_deliver_pmi(struct kvm_vcpu *vcpu);
index a55c7ef..0f555cc 100644 (file)
@@ -13,7 +13,7 @@
 #define RTC_ALWAYS_BCD 1       /* RTC operates in binary mode */
 #endif
 
-#if defined(CONFIG_X86_32) && defined(__HAVE_ARCH_CMPXCHG)
+#if defined(CONFIG_X86_32)
 /*
  * This lock provides nmi access to the CMOS/RTC registers.  It has some
  * special properties.  It is owned by a CPU and stores the index register
index 0208c3c..85e6cda 100644 (file)
@@ -100,23 +100,11 @@ do {                                                              \
 static inline int __mutex_fastpath_trylock(atomic_t *count,
                                           int (*fail_fn)(atomic_t *))
 {
-       /*
-        * We have two variants here. The cmpxchg based one is the best one
-        * because it never induce a false contention state.  It is included
-        * here because architectures using the inc/dec algorithms over the
-        * xchg ones are much more likely to support cmpxchg natively.
-        *
-        * If not we fall back to the spinlock based variant - that is
-        * just as efficient (and simpler) as a 'destructive' probing of
-        * the mutex state would be.
-        */
-#ifdef __HAVE_ARCH_CMPXCHG
+       /* cmpxchg because it never induces a false contention state. */
        if (likely(atomic_cmpxchg(count, 1, 0) == 1))
                return 1;
+
        return 0;
-#else
-       return fail_fn(count);
-#endif
 }
 
 #endif /* _ASM_X86_MUTEX_32_H */
index 851bcdc..fd47218 100644 (file)
  * Compared to the generic __my_cpu_offset version, the following
  * saves one instruction and avoids clobbering a temp register.
  */
-#define raw_cpu_ptr(ptr)                               \
+#define arch_raw_cpu_ptr(ptr)                          \
 ({                                                     \
        unsigned long tcp_ptr__;                        \
-       __verify_pcpu_ptr(ptr);                         \
        asm volatile("add " __percpu_arg(1) ", %0"      \
                     : "=r" (tcp_ptr__)                 \
                     : "m" (this_cpu_off), "0" (ptr));  \
index a4ea023..32cc237 100644 (file)
@@ -696,6 +696,8 @@ static inline void cpu_relax(void)
        rep_nop();
 }
 
+#define cpu_relax_lowlatency() cpu_relax()
+
 /* Stop speculative execution and prefetching of modified code. */
 static inline void sync_core(void)
 {
index 14fd6fd..6205f0c 100644 (file)
@@ -231,6 +231,22 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
 
 #define ARCH_HAS_USER_SINGLE_STEP_INFO
 
+/*
+ * When hitting ptrace_stop(), we cannot return using SYSRET because
+ * that does not restore the full CPU state, only a minimal set.  The
+ * ptracer can change arbitrary register values, which is usually okay
+ * because the usual ptrace stops run off the signal delivery path which
+ * forces IRET; however, ptrace_event() stops happen in arbitrary places
+ * in the kernel and don't force IRET path.
+ *
+ * So force IRET path after a ptrace stop.
+ */
+#define arch_ptrace_stop_needed(code, info)                            \
+({                                                                     \
+       set_thread_flag(TIF_NOTIFY_RESUME);                             \
+       false;                                                          \
+})
+
 struct user_desc;
 extern int do_get_thread_area(struct task_struct *p, int idx,
                              struct user_desc __user *info);
index 70f46f0..ae0e241 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <asm-generic/qrwlock_types.h>
 
-#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE)
+#ifndef CONFIG_X86_PPRO_FENCE
 #define queue_write_unlock queue_write_unlock
 static inline void queue_write_unlock(struct qrwlock *lock)
 {
index 44282fb..c4b9dc2 100644 (file)
 #define vga_readb(x) (*(x))
 #define vga_writeb(x, y) (*(y) = (x))
 
-#ifdef CONFIG_FB_EFI
-#define __ARCH_HAS_VGA_DEFAULT_DEVICE
-extern struct pci_dev *vga_default_device(void);
-extern void vga_set_default_device(struct pci_dev *pdev);
-#endif
-
 #endif /* _ASM_X86_VGA_H */
index 7004d21..bcbfade 100644 (file)
@@ -51,6 +51,9 @@
 #define CPU_BASED_MONITOR_EXITING               0x20000000
 #define CPU_BASED_PAUSE_EXITING                 0x40000000
 #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS   0x80000000
+
+#define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR    0x0401e172
+
 /*
  * Definitions of Secondary Processor-Based VM-Execution Controls.
  */
@@ -76,7 +79,7 @@
 
 #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR    0x00000016
 
-#define VM_EXIT_SAVE_DEBUG_CONTROLS             0x00000002
+#define VM_EXIT_SAVE_DEBUG_CONTROLS             0x00000004
 #define VM_EXIT_HOST_ADDR_SPACE_SIZE            0x00000200
 #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL      0x00001000
 #define VM_EXIT_ACK_INTR_ON_EXIT                0x00008000
@@ -89,7 +92,7 @@
 
 #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR      0x00036dff
 
-#define VM_ENTRY_LOAD_DEBUG_CONTROLS            0x00000002
+#define VM_ENTRY_LOAD_DEBUG_CONTROLS            0x00000004
 #define VM_ENTRY_IA32E_MODE                     0x00000200
 #define VM_ENTRY_SMM                            0x00000400
 #define VM_ENTRY_DEACT_DUAL_MONITOR             0x00000800
index 09409c4..3dec769 100644 (file)
@@ -22,6 +22,7 @@ header-y += ipcbuf.h
 header-y += ist.h
 header-y += kvm.h
 header-y += kvm_para.h
+header-y += kvm_perf.h
 header-y += ldt.h
 header-y += mce.h
 header-y += mman.h
index d3a8778..d7dcef5 100644 (file)
 #define GP_VECTOR 13
 #define PF_VECTOR 14
 #define MF_VECTOR 16
+#define AC_VECTOR 17
 #define MC_VECTOR 18
+#define XM_VECTOR 19
+#define VE_VECTOR 20
 
 /* Select x86 specific features in <linux/kvm.h> */
 #define __KVM_HAVE_PIT
diff --git a/arch/x86/include/uapi/asm/kvm_perf.h b/arch/x86/include/uapi/asm/kvm_perf.h
new file mode 100644 (file)
index 0000000..3bb964f
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _ASM_X86_KVM_PERF_H
+#define _ASM_X86_KVM_PERF_H
+
+#include <asm/svm.h>
+#include <asm/vmx.h>
+#include <asm/kvm.h>
+
+#define DECODE_STR_LEN 20
+
+#define VCPU_ID "vcpu_id"
+
+#define KVM_ENTRY_TRACE "kvm:kvm_entry"
+#define KVM_EXIT_TRACE "kvm:kvm_exit"
+#define KVM_EXIT_REASON "exit_reason"
+
+#endif /* _ASM_X86_KVM_PERF_H */
index 5cd1569..eac9e92 100644 (file)
 
 /* VMX_BASIC bits and bitmasks */
 #define VMX_BASIC_VMCS_SIZE_SHIFT      32
+#define VMX_BASIC_TRUE_CTLS            (1ULL << 55)
 #define VMX_BASIC_64           0x0001000000000000LLU
 #define VMX_BASIC_MEM_TYPE_SHIFT       50
 #define VMX_BASIC_MEM_TYPE_MASK        0x003c000000000000LLU
index 86281ff..a531f65 100644 (file)
@@ -74,10 +74,6 @@ int acpi_fix_pin2_polarity __initdata;
 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
 #endif
 
-#ifndef __HAVE_ARCH_CMPXCHG
-#warning ACPI uses CMPXCHG, i486 and later hardware
-#endif
-
 /* --------------------------------------------------------------------------
                               Boot-time Configuration
    -------------------------------------------------------------------------- */
index c3fcb5d..6a1e71b 100644 (file)
@@ -33,31 +33,41 @@ static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
 /* "in progress" flag of arch_trigger_all_cpu_backtrace */
 static unsigned long backtrace_flag;
 
-void arch_trigger_all_cpu_backtrace(void)
+void arch_trigger_all_cpu_backtrace(bool include_self)
 {
        int i;
+       int cpu = get_cpu();
 
-       if (test_and_set_bit(0, &backtrace_flag))
+       if (test_and_set_bit(0, &backtrace_flag)) {
                /*
                 * If there is already a trigger_all_cpu_backtrace() in progress
                 * (backtrace_flag == 1), don't output double cpu dump infos.
                 */
+               put_cpu();
                return;
+       }
 
        cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
+       if (!include_self)
+               cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
 
-       printk(KERN_INFO "sending NMI to all CPUs:\n");
-       apic->send_IPI_all(NMI_VECTOR);
+       if (!cpumask_empty(to_cpumask(backtrace_mask))) {
+               pr_info("sending NMI to %s CPUs:\n",
+                       (include_self ? "all" : "other"));
+               apic->send_IPI_mask(to_cpumask(backtrace_mask), NMI_VECTOR);
+       }
 
        /* Wait for up to 10 seconds for all CPUs to do the backtrace */
        for (i = 0; i < 10 * 1000; i++) {
                if (cpumask_empty(to_cpumask(backtrace_mask)))
                        break;
                mdelay(1);
+               touch_softlockup_watchdog();
        }
 
        clear_bit(0, &backtrace_flag);
        smp_mb__after_atomic();
+       put_cpu();
 }
 
 static int
index f3a1f04..5848744 100644 (file)
@@ -841,7 +841,6 @@ static int apm_do_idle(void)
        u32 eax;
        u8 ret = 0;
        int idled = 0;
-       int polling;
        int err = 0;
 
        if (!need_resched()) {
index 0fd9557..9483ee5 100644 (file)
@@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)
         */
        detect_extended_topology(c);
 
+       if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
+               /*
+                * let's use the legacy cpuid vector 0x1 and 0x4 for topology
+                * detection.
+                */
+               c->x86_max_cores = intel_num_cpu_cores(c);
+#ifdef CONFIG_X86_32
+               detect_ht(c);
+#endif
+       }
+
        l2 = init_intel_cacheinfo(c);
        if (c->cpuid_level > 9) {
                unsigned eax = cpuid_eax(10);
@@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)
                set_cpu_cap(c, X86_FEATURE_P3);
 #endif
 
-       if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
-               /*
-                * let's use the legacy cpuid vector 0x1 and 0x4 for topology
-                * detection.
-                */
-               c->x86_max_cores = intel_num_cpu_cores(c);
-#ifdef CONFIG_X86_32
-               detect_ht(c);
-#endif
-       }
-
        /* Work around errata */
        srat_detect_node(c);
 
index a952e9c..9c8f739 100644 (file)
@@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
 #endif
        }
 
+#ifdef CONFIG_X86_HT
+       /*
+        * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
+        * turns means that the only possibility is SMT (as indicated in
+        * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
+        * that SMT shares all caches, we can unconditionally set cpu_llc_id to
+        * c->phys_proc_id.
+        */
+       if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
+               per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
+#endif
+
        c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
 
        return l2;
index bb92f38..9a79c8d 100644 (file)
@@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void)
        for_each_online_cpu(i) {
                err = mce_device_create(i);
                if (err) {
+                       /*
+                        * Register notifier anyway (and do not unreg it) so
+                        * that we don't leave undeleted timers, see notifier
+                        * callback above.
+                        */
+                       __register_hotcpu_notifier(&mce_cpu_notifier);
                        cpu_notifier_register_done();
                        goto err_device_create;
                }
@@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void)
 err_register:
        unregister_syscore_ops(&mce_syscore_ops);
 
-       cpu_notifier_register_begin();
-       __unregister_hotcpu_notifier(&mce_cpu_notifier);
-       cpu_notifier_register_done();
-
 err_device_create:
        /*
         * We didn't keep track of which devices were created above, but
index 2bdfbff..2879ecd 100644 (file)
@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
                        continue;
                if (event->attr.config1 & ~er->valid_mask)
                        return -EINVAL;
+               /* Check if the extra msrs can be safely accessed*/
+               if (!er->extra_msr_access)
+                       return -ENXIO;
 
                reg->idx = er->idx;
                reg->config = event->attr.config1;
index 3b2f9bd..8ade931 100644 (file)
@@ -295,14 +295,16 @@ struct extra_reg {
        u64                     config_mask;
        u64                     valid_mask;
        int                     idx;  /* per_xxx->regs[] reg index */
+       bool                    extra_msr_access;
 };
 
 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {     \
-       .event = (e),           \
-       .msr = (ms),            \
-       .config_mask = (m),     \
-       .valid_mask = (vm),     \
-       .idx = EXTRA_REG_##i,   \
+       .event = (e),                   \
+       .msr = (ms),                    \
+       .config_mask = (m),             \
+       .valid_mask = (vm),             \
+       .idx = EXTRA_REG_##i,           \
+       .extra_msr_access = true,       \
        }
 
 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)     \
index 3bbdf4c..30790d7 100644 (file)
@@ -294,31 +294,41 @@ static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
                        cpu_to_node(cpu));
 }
 
-static void amd_uncore_cpu_up_prepare(unsigned int cpu)
+static int amd_uncore_cpu_up_prepare(unsigned int cpu)
 {
-       struct amd_uncore *uncore;
+       struct amd_uncore *uncore_nb = NULL, *uncore_l2;
 
        if (amd_uncore_nb) {
-               uncore = amd_uncore_alloc(cpu);
-               uncore->cpu = cpu;
-               uncore->num_counters = NUM_COUNTERS_NB;
-               uncore->rdpmc_base = RDPMC_BASE_NB;
-               uncore->msr_base = MSR_F15H_NB_PERF_CTL;
-               uncore->active_mask = &amd_nb_active_mask;
-               uncore->pmu = &amd_nb_pmu;
-               *per_cpu_ptr(amd_uncore_nb, cpu) = uncore;
+               uncore_nb = amd_uncore_alloc(cpu);
+               if (!uncore_nb)
+                       goto fail;
+               uncore_nb->cpu = cpu;
+               uncore_nb->num_counters = NUM_COUNTERS_NB;
+               uncore_nb->rdpmc_base = RDPMC_BASE_NB;
+               uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL;
+               uncore_nb->active_mask = &amd_nb_active_mask;
+               uncore_nb->pmu = &amd_nb_pmu;
+               *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb;
        }
 
        if (amd_uncore_l2) {
-               uncore = amd_uncore_alloc(cpu);
-               uncore->cpu = cpu;
-               uncore->num_counters = NUM_COUNTERS_L2;
-               uncore->rdpmc_base = RDPMC_BASE_L2;
-               uncore->msr_base = MSR_F16H_L2I_PERF_CTL;
-               uncore->active_mask = &amd_l2_active_mask;
-               uncore->pmu = &amd_l2_pmu;
-               *per_cpu_ptr(amd_uncore_l2, cpu) = uncore;
+               uncore_l2 = amd_uncore_alloc(cpu);
+               if (!uncore_l2)
+                       goto fail;
+               uncore_l2->cpu = cpu;
+               uncore_l2->num_counters = NUM_COUNTERS_L2;
+               uncore_l2->rdpmc_base = RDPMC_BASE_L2;
+               uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL;
+               uncore_l2->active_mask = &amd_l2_active_mask;
+               uncore_l2->pmu = &amd_l2_pmu;
+               *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2;
        }
+
+       return 0;
+
+fail:
+       kfree(uncore_nb);
+       return -ENOMEM;
 }
 
 static struct amd_uncore *
@@ -441,7 +451,7 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
 
        if (!--uncore->refcnt)
                kfree(uncore);
-       *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
+       *per_cpu_ptr(uncores, cpu) = NULL;
 }
 
 static void amd_uncore_cpu_dead(unsigned int cpu)
@@ -461,7 +471,8 @@ amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
 
        switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_UP_PREPARE:
-               amd_uncore_cpu_up_prepare(cpu);
+               if (amd_uncore_cpu_up_prepare(cpu))
+                       return notifier_from_errno(-ENOMEM);
                break;
 
        case CPU_STARTING:
@@ -501,20 +512,33 @@ static void __init init_cpu_already_online(void *dummy)
        amd_uncore_cpu_online(cpu);
 }
 
+static void cleanup_cpu_online(void *dummy)
+{
+       unsigned int cpu = smp_processor_id();
+
+       amd_uncore_cpu_dead(cpu);
+}
+
 static int __init amd_uncore_init(void)
 {
-       unsigned int cpu;
+       unsigned int cpu, cpu2;
        int ret = -ENODEV;
 
        if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
-               return -ENODEV;
+               goto fail_nodev;
 
        if (!cpu_has_topoext)
-               return -ENODEV;
+               goto fail_nodev;
 
        if (cpu_has_perfctr_nb) {
                amd_uncore_nb = alloc_percpu(struct amd_uncore *);
-               perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
+               if (!amd_uncore_nb) {
+                       ret = -ENOMEM;
+                       goto fail_nb;
+               }
+               ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1);
+               if (ret)
+                       goto fail_nb;
 
                printk(KERN_INFO "perf: AMD NB counters detected\n");
                ret = 0;
@@ -522,20 +546,28 @@ static int __init amd_uncore_init(void)
 
        if (cpu_has_perfctr_l2) {
                amd_uncore_l2 = alloc_percpu(struct amd_uncore *);
-               perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
+               if (!amd_uncore_l2) {
+                       ret = -ENOMEM;
+                       goto fail_l2;
+               }
+               ret = perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1);
+               if (ret)
+                       goto fail_l2;
 
                printk(KERN_INFO "perf: AMD L2I counters detected\n");
                ret = 0;
        }
 
        if (ret)
-               return -ENODEV;
+               goto fail_nodev;
 
        cpu_notifier_register_begin();
 
        /* init cpus already online before registering for hotplug notifier */
        for_each_online_cpu(cpu) {
-               amd_uncore_cpu_up_prepare(cpu);
+               ret = amd_uncore_cpu_up_prepare(cpu);
+               if (ret)
+                       goto fail_online;
                smp_call_function_single(cpu, init_cpu_already_online, NULL, 1);
        }
 
@@ -543,5 +575,30 @@ static int __init amd_uncore_init(void)
        cpu_notifier_register_done();
 
        return 0;
+
+
+fail_online:
+       for_each_online_cpu(cpu2) {
+               if (cpu2 == cpu)
+                       break;
+               smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1);
+       }
+       cpu_notifier_register_done();
+
+       /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */
+       amd_uncore_nb = amd_uncore_l2 = NULL;
+       if (cpu_has_perfctr_l2)
+               perf_pmu_unregister(&amd_l2_pmu);
+fail_l2:
+       if (cpu_has_perfctr_nb)
+               perf_pmu_unregister(&amd_nb_pmu);
+       if (amd_uncore_l2)
+               free_percpu(amd_uncore_l2);
+fail_nb:
+       if (amd_uncore_nb)
+               free_percpu(amd_uncore_nb);
+
+fail_nodev:
+       return ret;
 }
 device_initcall(amd_uncore_init);
index adb02aa..2502d0d 100644 (file)
@@ -1381,6 +1381,15 @@ again:
 
        intel_pmu_lbr_read();
 
+       /*
+        * CondChgd bit 63 doesn't mean any overflow status. Ignore
+        * and clear the bit.
+        */
+       if (__test_and_clear_bit(63, (unsigned long *)&status)) {
+               if (!status)
+                       goto done;
+       }
+
        /*
         * PEBS overflow sets bit 62 in the global status register
         */
@@ -2173,6 +2182,41 @@ static void intel_snb_check_microcode(void)
        }
 }
 
+/*
+ * Under certain circumstances, access certain MSR may cause #GP.
+ * The function tests if the input MSR can be safely accessed.
+ */
+static bool check_msr(unsigned long msr, u64 mask)
+{
+       u64 val_old, val_new, val_tmp;
+
+       /*
+        * Read the current value, change it and read it back to see if it
+        * matches, this is needed to detect certain hardware emulators
+        * (qemu/kvm) that don't trap on the MSR access and always return 0s.
+        */
+       if (rdmsrl_safe(msr, &val_old))
+               return false;
+
+       /*
+        * Only change the bits which can be updated by wrmsrl.
+        */
+       val_tmp = val_old ^ mask;
+       if (wrmsrl_safe(msr, val_tmp) ||
+           rdmsrl_safe(msr, &val_new))
+               return false;
+
+       if (val_new != val_tmp)
+               return false;
+
+       /* Here it's sure that the MSR can be safely accessed.
+        * Restore the old value and return.
+        */
+       wrmsrl(msr, val_old);
+
+       return true;
+}
+
 static __init void intel_sandybridge_quirk(void)
 {
        x86_pmu.check_microcode = intel_snb_check_microcode;
@@ -2262,7 +2306,8 @@ __init int intel_pmu_init(void)
        union cpuid10_ebx ebx;
        struct event_constraint *c;
        unsigned int unused;
-       int version;
+       struct extra_reg *er;
+       int version, i;
 
        if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
                switch (boot_cpu_data.x86) {
@@ -2465,6 +2510,9 @@ __init int intel_pmu_init(void)
        case 62: /* IvyBridge EP */
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
+               /* dTLB-load-misses on IVB is different than SNB */
+               hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
+
                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
                       sizeof(hw_cache_extra_regs));
 
@@ -2565,6 +2613,34 @@ __init int intel_pmu_init(void)
                }
        }
 
+       /*
+        * Access LBR MSR may cause #GP under certain circumstances.
+        * E.g. KVM doesn't support LBR MSR
+        * Check all LBT MSR here.
+        * Disable LBR access if any LBR MSRs can not be accessed.
+        */
+       if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+               x86_pmu.lbr_nr = 0;
+       for (i = 0; i < x86_pmu.lbr_nr; i++) {
+               if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
+                     check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
+                       x86_pmu.lbr_nr = 0;
+       }
+
+       /*
+        * Access extra MSR may cause #GP under certain circumstances.
+        * E.g. KVM doesn't support offcore event
+        * Check all extra_regs here.
+        */
+       if (x86_pmu.extra_regs) {
+               for (er = x86_pmu.extra_regs; er->msr; er++) {
+                       er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
+                       /* Disable LBR select mapping */
+                       if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
+                               x86_pmu.lbr_sel_map = NULL;
+               }
+       }
+
        /* Support full width counters using alternative MSR range */
        if (x86_pmu.intel_cap.full_width_write) {
                x86_pmu.max_period = x86_pmu.cntval_mask;
index 980970c..696ade3 100644 (file)
@@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu)
        if (!x86_pmu.bts)
                return 0;
 
-       buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node);
-       if (unlikely(!buffer))
+       buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
+       if (unlikely(!buffer)) {
+               WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
                return -ENOMEM;
+       }
 
        max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
        thresh = max / 16;
index 65bbbea..cfc6f9d 100644 (file)
@@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
@@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
        SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
+
        SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
        SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
@@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
        SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
@@ -2946,10 +2947,7 @@ again:
                 * extra registers. If we failed to take an extra
                 * register, try the alternative.
                 */
-               if (idx % 2)
-                       idx--;
-               else
-                       idx++;
+               idx ^= 1;
                if (idx != reg1->idx % 6) {
                        if (idx == 2)
                                config1 >>= 8;
index f0da82b..47c410d 100644 (file)
@@ -423,8 +423,9 @@ sysenter_past_esp:
        jnz sysenter_audit
 sysenter_do_call:
        cmpl $(NR_syscalls), %eax
-       jae syscall_badsys
+       jae sysenter_badsys
        call *sys_call_table(,%eax,4)
+sysenter_after_call:
        movl %eax,PT_EAX(%esp)
        LOCKDEP_SYS_EXIT
        DISABLE_INTERRUPTS(CLBR_ANY)
@@ -501,6 +502,7 @@ ENTRY(system_call)
        jae syscall_badsys
 syscall_call:
        call *sys_call_table(,%eax,4)
+syscall_after_call:
        movl %eax,PT_EAX(%esp)          # store the return value
 syscall_exit:
        LOCKDEP_SYS_EXIT
@@ -674,8 +676,13 @@ syscall_fault:
 END(syscall_fault)
 
 syscall_badsys:
-       movl $-ENOSYS,PT_EAX(%esp)
-       jmp resume_userspace
+       movl $-ENOSYS,%eax
+       jmp syscall_after_call
+END(syscall_badsys)
+
+sysenter_badsys:
+       movl $-ENOSYS,%eax
+       jmp sysenter_after_call
 END(syscall_badsys)
        CFI_ENDPROC
 
@@ -1052,9 +1059,6 @@ ENTRY(mcount)
 END(mcount)
 
 ENTRY(ftrace_caller)
-       cmpl $0, function_trace_stop
-       jne  ftrace_stub
-
        pushl %eax
        pushl %ecx
        pushl %edx
@@ -1086,8 +1090,6 @@ END(ftrace_caller)
 
 ENTRY(ftrace_regs_caller)
        pushf   /* push flags before compare (in cs location) */
-       cmpl $0, function_trace_stop
-       jne ftrace_restore_flags
 
        /*
         * i386 does not save SS and ESP when coming from kernel.
@@ -1146,7 +1148,6 @@ GLOBAL(ftrace_regs_call)
        popf                    /* Pop flags at end (no addl to corrupt flags) */
        jmp ftrace_ret
 
-ftrace_restore_flags:
        popf
        jmp  ftrace_stub
 #else /* ! CONFIG_DYNAMIC_FTRACE */
@@ -1155,9 +1156,6 @@ ENTRY(mcount)
        cmpl $__PAGE_OFFSET, %esp
        jb ftrace_stub          /* Paging not enabled yet? */
 
-       cmpl $0, function_trace_stop
-       jne  ftrace_stub
-
        cmpl $ftrace_stub, ftrace_trace_function
        jnz trace
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
index b25ca96..2fac134 100644 (file)
@@ -207,7 +207,6 @@ ENDPROC(native_usergs_sysret64)
  */
        .macro XCPT_FRAME start=1 offset=0
        INTR_FRAME \start, RIP+\offset-ORIG_RAX
-       /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/
        .endm
 
 /*
@@ -287,21 +286,21 @@ ENDPROC(native_usergs_sysret64)
 ENTRY(save_paranoid)
        XCPT_FRAME 1 RDI+8
        cld
-       movq_cfi rdi, RDI+8
-       movq_cfi rsi, RSI+8
+       movq %rdi, RDI+8(%rsp)
+       movq %rsi, RSI+8(%rsp)
        movq_cfi rdx, RDX+8
        movq_cfi rcx, RCX+8
        movq_cfi rax, RAX+8
-       movq_cfi r8, R8+8
-       movq_cfi r9, R9+8
-       movq_cfi r10, R10+8
-       movq_cfi r11, R11+8
+       movq %r8, R8+8(%rsp)
+       movq %r9, R9+8(%rsp)
+       movq %r10, R10+8(%rsp)
+       movq %r11, R11+8(%rsp)
        movq_cfi rbx, RBX+8
-       movq_cfi rbp, RBP+8
-       movq_cfi r12, R12+8
-       movq_cfi r13, R13+8
-       movq_cfi r14, R14+8
-       movq_cfi r15, R15+8
+       movq %rbp, RBP+8(%rsp)
+       movq %r12, R12+8(%rsp)
+       movq %r13, R13+8(%rsp)
+       movq %r14, R14+8(%rsp)
+       movq %r15, R15+8(%rsp)
        movl $1,%ebx
        movl $MSR_GS_BASE,%ecx
        rdmsr
@@ -830,27 +829,24 @@ restore_args:
        RESTORE_ARGS 1,8,1
 
 irq_return:
+       INTERRUPT_RETURN
+
+ENTRY(native_iret)
        /*
         * Are we returning to a stack segment from the LDT?  Note: in
         * 64-bit mode SS:RSP on the exception stack is always valid.
         */
 #ifdef CONFIG_X86_ESPFIX64
        testb $4,(SS-RIP)(%rsp)
-       jnz irq_return_ldt
+       jnz native_irq_return_ldt
 #endif
 
-irq_return_iret:
-       INTERRUPT_RETURN
-       _ASM_EXTABLE(irq_return_iret, bad_iret)
-
-#ifdef CONFIG_PARAVIRT
-ENTRY(native_iret)
+native_irq_return_iret:
        iretq
-       _ASM_EXTABLE(native_iret, bad_iret)
-#endif
+       _ASM_EXTABLE(native_irq_return_iret, bad_iret)
 
 #ifdef CONFIG_X86_ESPFIX64
-irq_return_ldt:
+native_irq_return_ldt:
        pushq_cfi %rax
        pushq_cfi %rdi
        SWAPGS
@@ -872,7 +868,7 @@ irq_return_ldt:
        SWAPGS
        movq %rax,%rsp
        popq_cfi %rax
-       jmp irq_return_iret
+       jmp native_irq_return_iret
 #endif
 
        .section .fixup,"ax"
@@ -956,13 +952,8 @@ __do_double_fault:
        cmpl $__KERNEL_CS,CS(%rdi)
        jne do_double_fault
        movq RIP(%rdi),%rax
-       cmpq $irq_return_iret,%rax
-#ifdef CONFIG_PARAVIRT
-       je 1f
-       cmpq $native_iret,%rax
-#endif
+       cmpq $native_irq_return_iret,%rax
        jne do_double_fault             /* This shouldn't happen... */
-1:
        movq PER_CPU_VAR(kernel_stack),%rax
        subq $(6*8-KERNEL_STACK_OFFSET),%rax    /* Reset to original stack */
        movq %rax,RSP(%rdi)
@@ -1395,21 +1386,21 @@ ENTRY(error_entry)
        CFI_ADJUST_CFA_OFFSET 15*8
        /* oldrax contains error code */
        cld
-       movq_cfi rdi, RDI+8
-       movq_cfi rsi, RSI+8
-       movq_cfi rdx, RDX+8
-       movq_cfi rcx, RCX+8
-       movq_cfi rax, RAX+8
-       movq_cfi  r8,  R8+8
-       movq_cfi  r9,  R9+8
-       movq_cfi r10, R10+8
-       movq_cfi r11, R11+8
+       movq %rdi, RDI+8(%rsp)
+       movq %rsi, RSI+8(%rsp)
+       movq %rdx, RDX+8(%rsp)
+       movq %rcx, RCX+8(%rsp)
+       movq %rax, RAX+8(%rsp)
+       movq  %r8,  R8+8(%rsp)
+       movq  %r9,  R9+8(%rsp)
+       movq %r10, R10+8(%rsp)
+       movq %r11, R11+8(%rsp)
        movq_cfi rbx, RBX+8
-       movq_cfi rbp, RBP+8
-       movq_cfi r12, R12+8
-       movq_cfi r13, R13+8
-       movq_cfi r14, R14+8
-       movq_cfi r15, R15+8
+       movq %rbp, RBP+8(%rsp)
+       movq %r12, R12+8(%rsp)
+       movq %r13, R13+8(%rsp)
+       movq %r14, R14+8(%rsp)
+       movq %r15, R15+8(%rsp)
        xorl %ebx,%ebx
        testl $3,CS+8(%rsp)
        je error_kernelspace
@@ -1427,8 +1418,9 @@ error_sti:
  * compat mode. Check for these here too.
  */
 error_kernelspace:
+       CFI_REL_OFFSET rcx, RCX+8
        incl %ebx
-       leaq irq_return_iret(%rip),%rcx
+       leaq native_irq_return_iret(%rip),%rcx
        cmpq %rcx,RIP+8(%rsp)
        je error_swapgs
        movl %ecx,%eax  /* zero extend */
index 6afbb16..94d857f 100644 (file)
@@ -175,7 +175,7 @@ void init_espfix_ap(void)
        if (!pud_present(pud)) {
                pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
                pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
-               paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
+               paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
                for (n = 0; n < ESPFIX_PUD_CLONES; n++)
                        set_pud(&pud_p[n], pud);
        }
@@ -185,7 +185,7 @@ void init_espfix_ap(void)
        if (!pmd_present(pmd)) {
                pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
                pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
-               paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
+               paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
                for (n = 0; n < ESPFIX_PMD_CLONES; n++)
                        set_pmd(&pmd_p[n], pmd);
        }
@@ -193,7 +193,6 @@ void init_espfix_ap(void)
        pte_p = pte_offset_kernel(&pmd, addr);
        stack_page = (void *)__get_free_page(GFP_KERNEL);
        pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
-       paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT);
        for (n = 0; n < ESPFIX_PTE_CLONES; n++)
                set_pte(&pte_p[n*PTE_STRIDE], pte);
 
index cbc4a91..3386dc9 100644 (file)
@@ -703,6 +703,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
        unsigned long return_hooker = (unsigned long)
                                &return_to_handler;
 
+       if (unlikely(ftrace_graph_is_dead()))
+               return;
+
        if (unlikely(atomic_read(&current->tracing_graph_pause)))
                return;
 
index 7596df6..67e6d19 100644 (file)
@@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
        struct kprobe *p;
        struct kprobe_ctlblk *kcb;
 
+       if (user_mode_vm(regs))
+               return 0;
+
        addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
        /*
         * We don't want to be preempted for the entire
index c050a01..c73aecf 100644 (file)
@@ -46,10 +46,6 @@ END(function_hook)
 .endm
 
 ENTRY(ftrace_caller)
-       /* Check if tracing was disabled (quick check) */
-       cmpl $0, function_trace_stop
-       jne  ftrace_stub
-
        ftrace_caller_setup
        /* regs go into 4th parameter (but make it NULL) */
        movq $0, %rcx
@@ -73,10 +69,6 @@ ENTRY(ftrace_regs_caller)
        /* Save the current flags before compare (in SS location)*/
        pushfq
 
-       /* Check if tracing was disabled (quick check) */
-       cmpl $0, function_trace_stop
-       jne  ftrace_restore_flags
-
        /* skip=8 to skip flags saved in SS */
        ftrace_caller_setup 8
 
@@ -131,7 +123,7 @@ GLOBAL(ftrace_regs_call)
        popfq
 
        jmp ftrace_return
-ftrace_restore_flags:
+
        popfq
        jmp  ftrace_stub
 
@@ -141,9 +133,6 @@ END(ftrace_regs_caller)
 #else /* ! CONFIG_DYNAMIC_FTRACE */
 
 ENTRY(function_hook)
-       cmpl $0, function_trace_stop
-       jne  ftrace_stub
-
        cmpq $ftrace_stub, ftrace_trace_function
        jnz trace
 
index 3f08f34..a1da673 100644 (file)
@@ -6,7 +6,6 @@ DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
 DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
 DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
 DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
-DEF_NATIVE(pv_cpu_ops, iret, "iretq");
 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
@@ -50,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_irq_ops, save_fl);
                PATCH_SITE(pv_irq_ops, irq_enable);
                PATCH_SITE(pv_irq_ops, irq_disable);
-               PATCH_SITE(pv_cpu_ops, iret);
                PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
                PATCH_SITE(pv_cpu_ops, usergs_sysret32);
                PATCH_SITE(pv_cpu_ops, usergs_sysret64);
index 2a26819..80eab01 100644 (file)
@@ -37,10 +37,12 @@ static void remove_e820_regions(struct resource *avail)
 
 void arch_remove_reservations(struct resource *avail)
 {
-       /* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */
+       /*
+        * Trim out BIOS area (high 2MB) and E820 regions. We do not remove
+        * the low 1MB unconditionally, as this area is needed for some ISA
+        * cards requiring a memory range, e.g. the i82365 PCMCIA controller.
+        */
        if (avail->flags & IORESOURCE_MEM) {
-               if (avail->start < BIOS_END)
-                       avail->start = BIOS_END;
                resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END);
 
                remove_e820_regions(avail);
index a0da58d..2851d63 100644 (file)
@@ -363,7 +363,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
 
                /* Set up to return from userspace.  */
                restorer = current->mm->context.vdso +
-                       selected_vdso32->sym___kernel_sigreturn;
+                       selected_vdso32->sym___kernel_rt_sigreturn;
                if (ksig->ka.sa.sa_flags & SA_RESTORER)
                        restorer = ksig->ka.sa.sa_restorer;
                put_user_ex(restorer, &frame->pretcode);
index c6eb418..0d0e922 100644 (file)
@@ -343,6 +343,7 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
        if (poke_int3_handler(regs))
                return;
 
+       prev_state = exception_enter();
 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
        if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
                                SIGTRAP) == NOTIFY_STOP)
@@ -351,9 +352,8 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
 
 #ifdef CONFIG_KPROBES
        if (kprobe_int3_handler(regs))
-               return;
+               goto exit;
 #endif
-       prev_state = exception_enter();
 
        if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
                        SIGTRAP) == NOTIFY_STOP)
@@ -433,6 +433,8 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        unsigned long dr6;
        int si_code;
 
+       prev_state = exception_enter();
+
        get_debugreg(dr6, 6);
 
        /* Filter out all the reserved bits which are preset to 1 */
@@ -465,7 +467,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        if (kprobe_debug_handler(regs))
                goto exit;
 #endif
-       prev_state = exception_enter();
 
        if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
                                                        SIGTRAP) == NOTIFY_STOP)
index 57e5ce1..56b0c33 100644 (file)
@@ -234,9 +234,6 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
        return ns;
 }
 
-/* XXX surely we already have this someplace in the kernel?! */
-#define DIV_ROUND(n, d) (((n) + ((d) / 2)) / (d))
-
 static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
 {
        unsigned long long tsc_now, ns_now;
@@ -259,7 +256,9 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)
         * time function is continuous; see the comment near struct
         * cyc2ns_data.
         */
-       data->cyc2ns_mul = DIV_ROUND(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, cpu_khz);
+       data->cyc2ns_mul =
+               DIV_ROUND_CLOSEST(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR,
+                                 cpu_khz);
        data->cyc2ns_shift = CYC2NS_SCALE_FACTOR;
        data->cyc2ns_offset = ns_now -
                mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR);
@@ -920,9 +919,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
                tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
                if (!(freq->flags & CPUFREQ_CONST_LOOPS))
                        mark_tsc_unstable("cpufreq changes");
-       }
 
-       set_cyc2ns_scale(tsc_khz, freq->cpu);
+               set_cyc2ns_scale(tsc_khz, freq->cpu);
+       }
 
        return 0;
 }
index f908731..a538059 100644 (file)
@@ -95,4 +95,12 @@ static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu)
        best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
        return best && (best->edx & bit(X86_FEATURE_GBPAGES));
 }
+
+static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 7, 0);
+       return best && (best->ebx & bit(X86_FEATURE_RTM));
+}
 #endif
index e4e833d..56657b0 100644 (file)
 #define NoWrite     ((u64)1 << 45)  /* No writeback */
 #define SrcWrite    ((u64)1 << 46)  /* Write back src operand */
 #define NoMod      ((u64)1 << 47)  /* Mod field is ignored */
+#define Intercept   ((u64)1 << 48)  /* Has valid intercept field */
+#define CheckPerm   ((u64)1 << 49)  /* Has valid check_perm field */
+#define NoBigReal   ((u64)1 << 50)  /* No big real mode */
+#define PrivUD      ((u64)1 << 51)  /* #UD instead of #GP on CPL > 0 */
 
 #define DstXacc     (DstAccLo | SrcAccHi | SrcWrite)
 
@@ -426,6 +430,7 @@ static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
                .modrm_reg  = ctxt->modrm_reg,
                .modrm_rm   = ctxt->modrm_rm,
                .src_val    = ctxt->src.val64,
+               .dst_val    = ctxt->dst.val64,
                .src_bytes  = ctxt->src.bytes,
                .dst_bytes  = ctxt->dst.bytes,
                .ad_bytes   = ctxt->ad_bytes,
@@ -511,12 +516,6 @@ static u32 desc_limit_scaled(struct desc_struct *desc)
        return desc->g ? (limit << 12) | 0xfff : limit;
 }
 
-static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg)
-{
-       ctxt->has_seg_override = true;
-       ctxt->seg_override = seg;
-}
-
 static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
 {
        if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
@@ -525,14 +524,6 @@ static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
        return ctxt->ops->get_cached_segment_base(ctxt, seg);
 }
 
-static unsigned seg_override(struct x86_emulate_ctxt *ctxt)
-{
-       if (!ctxt->has_seg_override)
-               return 0;
-
-       return ctxt->seg_override;
-}
-
 static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
                             u32 error, bool valid)
 {
@@ -651,7 +642,12 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
                if (!fetch && (desc.type & 8) && !(desc.type & 2))
                        goto bad;
                lim = desc_limit_scaled(&desc);
-               if ((desc.type & 8) || !(desc.type & 4)) {
+               if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
+                   (ctxt->d & NoBigReal)) {
+                       /* la is between zero and 0xffff */
+                       if (la > 0xffff || (u32)(la + size - 1) > 0xffff)
+                               goto bad;
+               } else if ((desc.type & 8) || !(desc.type & 4)) {
                        /* expand-up segment */
                        if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
                                goto bad;
@@ -716,68 +712,71 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
 }
 
 /*
- * Fetch the next byte of the instruction being emulated which is pointed to
- * by ctxt->_eip, then increment ctxt->_eip.
- *
- * Also prefetch the remaining bytes of the instruction without crossing page
+ * Prefetch the remaining bytes of the instruction without crossing page
  * boundary if they are not in fetch_cache yet.
  */
-static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
+static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
 {
-       struct fetch_cache *fc = &ctxt->fetch;
        int rc;
-       int size, cur_size;
-
-       if (ctxt->_eip == fc->end) {
-               unsigned long linear;
-               struct segmented_address addr = { .seg = VCPU_SREG_CS,
-                                                 .ea  = ctxt->_eip };
-               cur_size = fc->end - fc->start;
-               size = min(15UL - cur_size,
-                          PAGE_SIZE - offset_in_page(ctxt->_eip));
-               rc = __linearize(ctxt, addr, size, false, true, &linear);
-               if (unlikely(rc != X86EMUL_CONTINUE))
-                       return rc;
-               rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size,
-                                     size, &ctxt->exception);
-               if (unlikely(rc != X86EMUL_CONTINUE))
-                       return rc;
-               fc->end += size;
-       }
-       *dest = fc->data[ctxt->_eip - fc->start];
-       ctxt->_eip++;
-       return X86EMUL_CONTINUE;
-}
+       unsigned size;
+       unsigned long linear;
+       int cur_size = ctxt->fetch.end - ctxt->fetch.data;
+       struct segmented_address addr = { .seg = VCPU_SREG_CS,
+                                          .ea = ctxt->eip + cur_size };
+
+       size = 15UL ^ cur_size;
+       rc = __linearize(ctxt, addr, size, false, true, &linear);
+       if (unlikely(rc != X86EMUL_CONTINUE))
+               return rc;
 
-static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
-                        void *dest, unsigned size)
-{
-       int rc;
+       size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
 
-       /* x86 instructions are limited to 15 bytes. */
-       if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
+       /*
+        * One instruction can only straddle two pages,
+        * and one has been loaded at the beginning of
+        * x86_decode_insn.  So, if not enough bytes
+        * still, we must have hit the 15-byte boundary.
+        */
+       if (unlikely(size < op_size))
                return X86EMUL_UNHANDLEABLE;
-       while (size--) {
-               rc = do_insn_fetch_byte(ctxt, dest++);
-               if (rc != X86EMUL_CONTINUE)
-                       return rc;
-       }
+       rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
+                             size, &ctxt->exception);
+       if (unlikely(rc != X86EMUL_CONTINUE))
+               return rc;
+       ctxt->fetch.end += size;
        return X86EMUL_CONTINUE;
 }
 
+static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
+                                              unsigned size)
+{
+       if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size))
+               return __do_insn_fetch_bytes(ctxt, size);
+       else
+               return X86EMUL_CONTINUE;
+}
+
 /* Fetch next part of the instruction being emulated. */
 #define insn_fetch(_type, _ctxt)                                       \
-({     unsigned long _x;                                               \
-       rc = do_insn_fetch(_ctxt, &_x, sizeof(_type));                  \
+({     _type _x;                                                       \
+                                                                       \
+       rc = do_insn_fetch_bytes(_ctxt, sizeof(_type));                 \
        if (rc != X86EMUL_CONTINUE)                                     \
                goto done;                                              \
-       (_type)_x;                                                      \
+       ctxt->_eip += sizeof(_type);                                    \
+       _x = *(_type __aligned(1) *) ctxt->fetch.ptr;                   \
+       ctxt->fetch.ptr += sizeof(_type);                               \
+       _x;                                                             \
 })
 
 #define insn_fetch_arr(_arr, _size, _ctxt)                             \
-({     rc = do_insn_fetch(_ctxt, _arr, (_size));                       \
+({                                                                     \
+       rc = do_insn_fetch_bytes(_ctxt, _size);                         \
        if (rc != X86EMUL_CONTINUE)                                     \
                goto done;                                              \
+       ctxt->_eip += (_size);                                          \
+       memcpy(_arr, ctxt->fetch.ptr, _size);                           \
+       ctxt->fetch.ptr += (_size);                                     \
 })
 
 /*
@@ -1063,19 +1062,17 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
                        struct operand *op)
 {
        u8 sib;
-       int index_reg = 0, base_reg = 0, scale;
+       int index_reg, base_reg, scale;
        int rc = X86EMUL_CONTINUE;
        ulong modrm_ea = 0;
 
-       if (ctxt->rex_prefix) {
-               ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1;  /* REX.R */
-               index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */
-               ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
-       }
+       ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */
+       index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */
+       base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */
 
-       ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
+       ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6;
        ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
-       ctxt->modrm_rm |= (ctxt->modrm & 0x07);
+       ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07);
        ctxt->modrm_seg = VCPU_SREG_DS;
 
        if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) {
@@ -1093,7 +1090,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
                if (ctxt->d & Mmx) {
                        op->type = OP_MM;
                        op->bytes = 8;
-                       op->addr.xmm = ctxt->modrm_rm & 7;
+                       op->addr.mm = ctxt->modrm_rm & 7;
                        return rc;
                }
                fetch_register_operand(op);
@@ -1190,6 +1187,9 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
                }
        }
        op->addr.mem.ea = modrm_ea;
+       if (ctxt->ad_bytes != 8)
+               ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
+
 done:
        return rc;
 }
@@ -1220,12 +1220,14 @@ static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
        long sv = 0, mask;
 
        if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) {
-               mask = ~(ctxt->dst.bytes * 8 - 1);
+               mask = ~((long)ctxt->dst.bytes * 8 - 1);
 
                if (ctxt->src.bytes == 2)
                        sv = (s16)ctxt->src.val & (s16)mask;
                else if (ctxt->src.bytes == 4)
                        sv = (s32)ctxt->src.val & (s32)mask;
+               else
+                       sv = (s64)ctxt->src.val & (s64)mask;
 
                ctxt->dst.addr.mem.ea += (sv >> 3);
        }
@@ -1315,8 +1317,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
                in_page = (ctxt->eflags & EFLG_DF) ?
                        offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
                        PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
-               n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
-                       count);
+               n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count);
                if (n == 0)
                        n = 1;
                rc->pos = rc->end = 0;
@@ -1358,17 +1359,19 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
                                     u16 selector, struct desc_ptr *dt)
 {
        const struct x86_emulate_ops *ops = ctxt->ops;
+       u32 base3 = 0;
 
        if (selector & 1 << 2) {
                struct desc_struct desc;
                u16 sel;
 
                memset (dt, 0, sizeof *dt);
-               if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
+               if (!ops->get_segment(ctxt, &sel, &desc, &base3,
+                                     VCPU_SREG_LDTR))
                        return;
 
                dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
-               dt->address = get_desc_base(&desc);
+               dt->address = get_desc_base(&desc) | ((u64)base3 << 32);
        } else
                ops->get_gdt(ctxt, dt);
 }
@@ -1422,6 +1425,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
        ulong desc_addr;
        int ret;
        u16 dummy;
+       u32 base3 = 0;
 
        memset(&seg_desc, 0, sizeof seg_desc);
 
@@ -1538,9 +1542,14 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
                ret = write_segment_descriptor(ctxt, selector, &seg_desc);
                if (ret != X86EMUL_CONTINUE)
                        return ret;
+       } else if (ctxt->mode == X86EMUL_MODE_PROT64) {
+               ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3,
+                               sizeof(base3), &ctxt->exception);
+               if (ret != X86EMUL_CONTINUE)
+                       return ret;
        }
 load:
-       ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
+       ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
        return X86EMUL_CONTINUE;
 exception:
        emulate_exception(ctxt, err_vec, err_code, true);
@@ -1575,34 +1584,28 @@ static void write_register_operand(struct operand *op)
 
 static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op)
 {
-       int rc;
-
        switch (op->type) {
        case OP_REG:
                write_register_operand(op);
                break;
        case OP_MEM:
                if (ctxt->lock_prefix)
-                       rc = segmented_cmpxchg(ctxt,
+                       return segmented_cmpxchg(ctxt,
+                                                op->addr.mem,
+                                                &op->orig_val,
+                                                &op->val,
+                                                op->bytes);
+               else
+                       return segmented_write(ctxt,
                                               op->addr.mem,
-                                              &op->orig_val,
                                               &op->val,
                                               op->bytes);
-               else
-                       rc = segmented_write(ctxt,
-                                            op->addr.mem,
-                                            &op->val,
-                                            op->bytes);
-               if (rc != X86EMUL_CONTINUE)
-                       return rc;
                break;
        case OP_MEM_STR:
-               rc = segmented_write(ctxt,
-                               op->addr.mem,
-                               op->data,
-                               op->bytes * op->count);
-               if (rc != X86EMUL_CONTINUE)
-                       return rc;
+               return segmented_write(ctxt,
+                                      op->addr.mem,
+                                      op->data,
+                                      op->bytes * op->count);
                break;
        case OP_XMM:
                write_sse_reg(ctxt, &op->vec_val, op->addr.xmm);
@@ -1671,7 +1674,7 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
                return rc;
 
        change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
-               | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
+               | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID;
 
        switch(ctxt->mode) {
        case X86EMUL_MODE_PROT64:
@@ -1754,6 +1757,9 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
        if (rc != X86EMUL_CONTINUE)
                return rc;
 
+       if (ctxt->modrm_reg == VCPU_SREG_SS)
+               ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
+
        rc = load_segment_descriptor(ctxt, (u16)selector, seg);
        return rc;
 }
@@ -1991,6 +1997,9 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
 {
        u64 old = ctxt->dst.orig_val64;
 
+       if (ctxt->dst.bytes == 16)
+               return X86EMUL_UNHANDLEABLE;
+
        if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
            ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
                *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
@@ -2017,6 +2026,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
 {
        int rc;
        unsigned long cs;
+       int cpl = ctxt->ops->cpl(ctxt);
 
        rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes);
        if (rc != X86EMUL_CONTINUE)
@@ -2026,6 +2036,9 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
        rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
        if (rc != X86EMUL_CONTINUE)
                return rc;
+       /* Outer-privilege level return is not implemented */
+       if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
+               return X86EMUL_UNHANDLEABLE;
        rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
        return rc;
 }
@@ -2044,8 +2057,10 @@ static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt)
 static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
 {
        /* Save real source value, then compare EAX against destination. */
+       ctxt->dst.orig_val = ctxt->dst.val;
+       ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX);
        ctxt->src.orig_val = ctxt->src.val;
-       ctxt->src.val = reg_read(ctxt, VCPU_REGS_RAX);
+       ctxt->src.val = ctxt->dst.orig_val;
        fastop(ctxt, em_cmp);
 
        if (ctxt->eflags & EFLG_ZF) {
@@ -2055,6 +2070,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
                /* Failure: write the value we saw to EAX. */
                ctxt->dst.type = OP_REG;
                ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
+               ctxt->dst.val = ctxt->dst.orig_val;
        }
        return X86EMUL_CONTINUE;
 }
@@ -2194,7 +2210,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
        *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
        if (efer & EFER_LMA) {
 #ifdef CONFIG_X86_64
-               *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags & ~EFLG_RF;
+               *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags;
 
                ops->get_msr(ctxt,
                             ctxt->mode == X86EMUL_MODE_PROT64 ?
@@ -2202,14 +2218,14 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
                ctxt->_eip = msr_data;
 
                ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
-               ctxt->eflags &= ~(msr_data | EFLG_RF);
+               ctxt->eflags &= ~msr_data;
 #endif
        } else {
                /* legacy mode */
                ops->get_msr(ctxt, MSR_STAR, &msr_data);
                ctxt->_eip = (u32)msr_data;
 
-               ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
+               ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
        }
 
        return X86EMUL_CONTINUE;
@@ -2258,7 +2274,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
                break;
        }
 
-       ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
+       ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
        cs_sel = (u16)msr_data;
        cs_sel &= ~SELECTOR_RPL_MASK;
        ss_sel = cs_sel + 8;
@@ -2964,7 +2980,7 @@ static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
 
 static int em_mov(struct x86_emulate_ctxt *ctxt)
 {
-       memcpy(ctxt->dst.valptr, ctxt->src.valptr, ctxt->op_bytes);
+       memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr));
        return X86EMUL_CONTINUE;
 }
 
@@ -3221,7 +3237,8 @@ static int em_lidt(struct x86_emulate_ctxt *ctxt)
 
 static int em_smsw(struct x86_emulate_ctxt *ctxt)
 {
-       ctxt->dst.bytes = 2;
+       if (ctxt->dst.type == OP_MEM)
+               ctxt->dst.bytes = 2;
        ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0);
        return X86EMUL_CONTINUE;
 }
@@ -3496,7 +3513,7 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
        u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
 
        if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
-           (rcx > 3))
+           ctxt->ops->check_pmc(ctxt, rcx))
                return emulate_gp(ctxt, 0);
 
        return X86EMUL_CONTINUE;
@@ -3521,9 +3538,9 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
 }
 
 #define D(_y) { .flags = (_y) }
-#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
-#define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \
-                     .check_perm = (_p) }
+#define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i }
+#define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \
+                     .intercept = x86_intercept_##_i, .check_perm = (_p) }
 #define N    D(NotImpl)
 #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
 #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
@@ -3532,10 +3549,10 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
 #define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
 #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
 #define II(_f, _e, _i) \
-       { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
+       { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
 #define IIP(_f, _e, _i, _p) \
-       { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \
-         .check_perm = (_p) }
+       { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \
+         .intercept = x86_intercept_##_i, .check_perm = (_p) }
 #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
 
 #define D2bv(_f)      D((_f) | ByteOp), D(_f)
@@ -3634,8 +3651,8 @@ static const struct opcode group6[] = {
 };
 
 static const struct group_dual group7 = { {
-       II(Mov | DstMem | Priv,                 em_sgdt, sgdt),
-       II(Mov | DstMem | Priv,                 em_sidt, sidt),
+       II(Mov | DstMem,                        em_sgdt, sgdt),
+       II(Mov | DstMem,                        em_sidt, sidt),
        II(SrcMem | Priv,                       em_lgdt, lgdt),
        II(SrcMem | Priv,                       em_lidt, lidt),
        II(SrcNone | DstMem | Mov,              em_smsw, smsw), N,
@@ -3899,7 +3916,7 @@ static const struct opcode twobyte_table[256] = {
        N, N,
        N, N, N, N, N, N, N, N,
        /* 0x40 - 0x4F */
-       X16(D(DstReg | SrcMem | ModRM | Mov)),
+       X16(D(DstReg | SrcMem | ModRM)),
        /* 0x50 - 0x5F */
        N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
        /* 0x60 - 0x6F */
@@ -4061,12 +4078,12 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
        mem_common:
                *op = ctxt->memop;
                ctxt->memopp = op;
-               if ((ctxt->d & BitOp) && op == &ctxt->dst)
+               if (ctxt->d & BitOp)
                        fetch_bit_operand(ctxt);
                op->orig_val = op->val;
                break;
        case OpMem64:
-               ctxt->memop.bytes = 8;
+               ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8;
                goto mem_common;
        case OpAcc:
                op->type = OP_REG;
@@ -4150,7 +4167,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
                op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
                op->addr.mem.ea =
                        register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
-               op->addr.mem.seg = seg_override(ctxt);
+               op->addr.mem.seg = ctxt->seg_override;
                op->val = 0;
                op->count = 1;
                break;
@@ -4161,7 +4178,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
                        register_address(ctxt,
                                reg_read(ctxt, VCPU_REGS_RBX) +
                                (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
-               op->addr.mem.seg = seg_override(ctxt);
+               op->addr.mem.seg = ctxt->seg_override;
                op->val = 0;
                break;
        case OpImmFAddr:
@@ -4208,16 +4225,22 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
        int mode = ctxt->mode;
        int def_op_bytes, def_ad_bytes, goffset, simd_prefix;
        bool op_prefix = false;
+       bool has_seg_override = false;
        struct opcode opcode;
 
        ctxt->memop.type = OP_NONE;
        ctxt->memopp = NULL;
        ctxt->_eip = ctxt->eip;
-       ctxt->fetch.start = ctxt->_eip;
-       ctxt->fetch.end = ctxt->fetch.start + insn_len;
+       ctxt->fetch.ptr = ctxt->fetch.data;
+       ctxt->fetch.end = ctxt->fetch.data + insn_len;
        ctxt->opcode_len = 1;
        if (insn_len > 0)
                memcpy(ctxt->fetch.data, insn, insn_len);
+       else {
+               rc = __do_insn_fetch_bytes(ctxt, 1);
+               if (rc != X86EMUL_CONTINUE)
+                       return rc;
+       }
 
        switch (mode) {
        case X86EMUL_MODE_REAL:
@@ -4261,11 +4284,13 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
                case 0x2e:      /* CS override */
                case 0x36:      /* SS override */
                case 0x3e:      /* DS override */
-                       set_seg_override(ctxt, (ctxt->b >> 3) & 3);
+                       has_seg_override = true;
+                       ctxt->seg_override = (ctxt->b >> 3) & 3;
                        break;
                case 0x64:      /* FS override */
                case 0x65:      /* GS override */
-                       set_seg_override(ctxt, ctxt->b & 7);
+                       has_seg_override = true;
+                       ctxt->seg_override = ctxt->b & 7;
                        break;
                case 0x40 ... 0x4f: /* REX */
                        if (mode != X86EMUL_MODE_PROT64)
@@ -4314,6 +4339,13 @@ done_prefixes:
        if (ctxt->d & ModRM)
                ctxt->modrm = insn_fetch(u8, ctxt);
 
+       /* vex-prefix instructions are not implemented */
+       if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
+           (mode == X86EMUL_MODE_PROT64 ||
+           (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
+               ctxt->d = NotImpl;
+       }
+
        while (ctxt->d & GroupMask) {
                switch (ctxt->d & GroupMask) {
                case Group:
@@ -4356,49 +4388,59 @@ done_prefixes:
                ctxt->d |= opcode.flags;
        }
 
-       ctxt->execute = opcode.u.execute;
-       ctxt->check_perm = opcode.check_perm;
-       ctxt->intercept = opcode.intercept;
-
        /* Unrecognised? */
-       if (ctxt->d == 0 || (ctxt->d & NotImpl))
+       if (ctxt->d == 0)
                return EMULATION_FAILED;
 
-       if (!(ctxt->d & EmulateOnUD) && ctxt->ud)
-               return EMULATION_FAILED;
+       ctxt->execute = opcode.u.execute;
 
-       if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
-               ctxt->op_bytes = 8;
+       if (unlikely(ctxt->d &
+                    (NotImpl|EmulateOnUD|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) {
+               /*
+                * These are copied unconditionally here, and checked unconditionally
+                * in x86_emulate_insn.
+                */
+               ctxt->check_perm = opcode.check_perm;
+               ctxt->intercept = opcode.intercept;
+
+               if (ctxt->d & NotImpl)
+                       return EMULATION_FAILED;
+
+               if (!(ctxt->d & EmulateOnUD) && ctxt->ud)
+                       return EMULATION_FAILED;
 
-       if (ctxt->d & Op3264) {
-               if (mode == X86EMUL_MODE_PROT64)
+               if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack))
                        ctxt->op_bytes = 8;
-               else
-                       ctxt->op_bytes = 4;
-       }
 
-       if (ctxt->d & Sse)
-               ctxt->op_bytes = 16;
-       else if (ctxt->d & Mmx)
-               ctxt->op_bytes = 8;
+               if (ctxt->d & Op3264) {
+                       if (mode == X86EMUL_MODE_PROT64)
+                               ctxt->op_bytes = 8;
+                       else
+                               ctxt->op_bytes = 4;
+               }
+
+               if (ctxt->d & Sse)
+                       ctxt->op_bytes = 16;
+               else if (ctxt->d & Mmx)
+                       ctxt->op_bytes = 8;
+       }
 
        /* ModRM and SIB bytes. */
        if (ctxt->d & ModRM) {
                rc = decode_modrm(ctxt, &ctxt->memop);
-               if (!ctxt->has_seg_override)
-                       set_seg_override(ctxt, ctxt->modrm_seg);
+               if (!has_seg_override) {
+                       has_seg_override = true;
+                       ctxt->seg_override = ctxt->modrm_seg;
+               }
        } else if (ctxt->d & MemAbs)
                rc = decode_abs(ctxt, &ctxt->memop);
        if (rc != X86EMUL_CONTINUE)
                goto done;
 
-       if (!ctxt->has_seg_override)
-               set_seg_override(ctxt, VCPU_SREG_DS);
-
-       ctxt->memop.addr.mem.seg = seg_override(ctxt);
+       if (!has_seg_override)
+               ctxt->seg_override = VCPU_SREG_DS;
 
-       if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8)
-               ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea;
+       ctxt->memop.addr.mem.seg = ctxt->seg_override;
 
        /*
         * Decode and fetch the source operand: register, memory
@@ -4420,7 +4462,7 @@ done_prefixes:
        rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
 
 done:
-       if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative)
+       if (ctxt->rip_relative)
                ctxt->memopp->addr.mem.ea += ctxt->_eip;
 
        return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
@@ -4495,6 +4537,16 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *))
        return X86EMUL_CONTINUE;
 }
 
+void init_decode_cache(struct x86_emulate_ctxt *ctxt)
+{
+       memset(&ctxt->rip_relative, 0,
+              (void *)&ctxt->modrm - (void *)&ctxt->rip_relative);
+
+       ctxt->io_read.pos = 0;
+       ctxt->io_read.end = 0;
+       ctxt->mem_read.end = 0;
+}
+
 int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
 {
        const struct x86_emulate_ops *ops = ctxt->ops;
@@ -4503,12 +4555,6 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
 
        ctxt->mem_read.pos = 0;
 
-       if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
-                       (ctxt->d & Undefined)) {
-               rc = emulate_ud(ctxt);
-               goto done;
-       }
-
        /* LOCK prefix is allowed only with some instructions */
        if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) {
                rc = emulate_ud(ctxt);
@@ -4520,69 +4566,82 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
                goto done;
        }
 
-       if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
-           || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
-               rc = emulate_ud(ctxt);
-               goto done;
-       }
-
-       if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
-               rc = emulate_nm(ctxt);
-               goto done;
-       }
+       if (unlikely(ctxt->d &
+                    (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
+               if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
+                               (ctxt->d & Undefined)) {
+                       rc = emulate_ud(ctxt);
+                       goto done;
+               }
 
-       if (ctxt->d & Mmx) {
-               rc = flush_pending_x87_faults(ctxt);
-               if (rc != X86EMUL_CONTINUE)
+               if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
+                   || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
+                       rc = emulate_ud(ctxt);
                        goto done;
-               /*
-                * Now that we know the fpu is exception safe, we can fetch
-                * operands from it.
-                */
-               fetch_possible_mmx_operand(ctxt, &ctxt->src);
-               fetch_possible_mmx_operand(ctxt, &ctxt->src2);
-               if (!(ctxt->d & Mov))
-                       fetch_possible_mmx_operand(ctxt, &ctxt->dst);
-       }
+               }
 
-       if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
-               rc = emulator_check_intercept(ctxt, ctxt->intercept,
-                                             X86_ICPT_PRE_EXCEPT);
-               if (rc != X86EMUL_CONTINUE)
+               if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
+                       rc = emulate_nm(ctxt);
                        goto done;
-       }
+               }
 
-       /* Privileged instruction can be executed only in CPL=0 */
-       if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
-               rc = emulate_gp(ctxt, 0);
-               goto done;
-       }
+               if (ctxt->d & Mmx) {
+                       rc = flush_pending_x87_faults(ctxt);
+                       if (rc != X86EMUL_CONTINUE)
+                               goto done;
+                       /*
+                        * Now that we know the fpu is exception safe, we can fetch
+                        * operands from it.
+                        */
+                       fetch_possible_mmx_operand(ctxt, &ctxt->src);
+                       fetch_possible_mmx_operand(ctxt, &ctxt->src2);
+                       if (!(ctxt->d & Mov))
+                               fetch_possible_mmx_operand(ctxt, &ctxt->dst);
+               }
 
-       /* Instruction can only be executed in protected mode */
-       if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
-               rc = emulate_ud(ctxt);
-               goto done;
-       }
+               if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
+                       rc = emulator_check_intercept(ctxt, ctxt->intercept,
+                                                     X86_ICPT_PRE_EXCEPT);
+                       if (rc != X86EMUL_CONTINUE)
+                               goto done;
+               }
 
-       /* Do instruction specific permission checks */
-       if (ctxt->check_perm) {
-               rc = ctxt->check_perm(ctxt);
-               if (rc != X86EMUL_CONTINUE)
+               /* Privileged instruction can be executed only in CPL=0 */
+               if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
+                       if (ctxt->d & PrivUD)
+                               rc = emulate_ud(ctxt);
+                       else
+                               rc = emulate_gp(ctxt, 0);
                        goto done;
-       }
+               }
 
-       if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
-               rc = emulator_check_intercept(ctxt, ctxt->intercept,
-                                             X86_ICPT_POST_EXCEPT);
-               if (rc != X86EMUL_CONTINUE)
+               /* Instruction can only be executed in protected mode */
+               if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
+                       rc = emulate_ud(ctxt);
                        goto done;
-       }
+               }
 
-       if (ctxt->rep_prefix && (ctxt->d & String)) {
-               /* All REP prefixes have the same first termination condition */
-               if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
-                       ctxt->eip = ctxt->_eip;
-                       goto done;
+               /* Do instruction specific permission checks */
+               if (ctxt->d & CheckPerm) {
+                       rc = ctxt->check_perm(ctxt);
+                       if (rc != X86EMUL_CONTINUE)
+                               goto done;
+               }
+
+               if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
+                       rc = emulator_check_intercept(ctxt, ctxt->intercept,
+                                                     X86_ICPT_POST_EXCEPT);
+                       if (rc != X86EMUL_CONTINUE)
+                               goto done;
+               }
+
+               if (ctxt->rep_prefix && (ctxt->d & String)) {
+                       /* All REP prefixes have the same first termination condition */
+                       if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
+                               ctxt->eip = ctxt->_eip;
+                               ctxt->eflags &= ~EFLG_RF;
+                               goto done;
+                       }
                }
        }
 
@@ -4616,13 +4675,18 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
 
 special_insn:
 
-       if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
+       if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) {
                rc = emulator_check_intercept(ctxt, ctxt->intercept,
                                              X86_ICPT_POST_MEMACCESS);
                if (rc != X86EMUL_CONTINUE)
                        goto done;
        }
 
+       if (ctxt->rep_prefix && (ctxt->d & String))
+               ctxt->eflags |= EFLG_RF;
+       else
+               ctxt->eflags &= ~EFLG_RF;
+
        if (ctxt->execute) {
                if (ctxt->d & Fastop) {
                        void (*fop)(struct fastop *) = (void *)ctxt->execute;
@@ -4657,8 +4721,9 @@ special_insn:
                break;
        case 0x90 ... 0x97: /* nop / xchg reg, rax */
                if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
-                       break;
-               rc = em_xchg(ctxt);
+                       ctxt->dst.type = OP_NONE;
+               else
+                       rc = em_xchg(ctxt);
                break;
        case 0x98: /* cbw/cwde/cdqe */
                switch (ctxt->op_bytes) {
@@ -4709,17 +4774,17 @@ special_insn:
                goto done;
 
 writeback:
-       if (!(ctxt->d & NoWrite)) {
-               rc = writeback(ctxt, &ctxt->dst);
-               if (rc != X86EMUL_CONTINUE)
-                       goto done;
-       }
        if (ctxt->d & SrcWrite) {
                BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR);
                rc = writeback(ctxt, &ctxt->src);
                if (rc != X86EMUL_CONTINUE)
                        goto done;
        }
+       if (!(ctxt->d & NoWrite)) {
+               rc = writeback(ctxt, &ctxt->dst);
+               if (rc != X86EMUL_CONTINUE)
+                       goto done;
+       }
 
        /*
         * restore dst type in case the decoding will be reused
@@ -4761,6 +4826,7 @@ writeback:
                        }
                        goto done; /* skip rip writeback */
                }
+               ctxt->eflags &= ~EFLG_RF;
        }
 
        ctxt->eip = ctxt->_eip;
@@ -4793,8 +4859,10 @@ twobyte_insn:
                ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val);
                break;
        case 0x40 ... 0x4f:     /* cmov */
-               ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val;
-               if (!test_cc(ctxt->b, ctxt->eflags))
+               if (test_cc(ctxt->b, ctxt->eflags))
+                       ctxt->dst.val = ctxt->src.val;
+               else if (ctxt->mode != X86EMUL_MODE_PROT64 ||
+                        ctxt->op_bytes != 4)
                        ctxt->dst.type = OP_NONE; /* no writeback */
                break;
        case 0x80 ... 0x8f: /* jnz rel, etc*/
@@ -4818,8 +4886,8 @@ twobyte_insn:
                break;
        case 0xc3:              /* movnti */
                ctxt->dst.bytes = ctxt->op_bytes;
-               ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val :
-                                                       (u64) ctxt->src.val;
+               ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
+                                                       (u32) ctxt->src.val;
                break;
        default:
                goto cannot_emulate;
index 0069118..3855103 100644 (file)
@@ -1451,7 +1451,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
        vcpu->arch.apic_arb_prio = 0;
        vcpu->arch.apic_attention = 0;
 
-       apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
+       apic_debug("%s: vcpu=%p, id=%d, base_msr="
                   "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
                   vcpu, kvm_apic_id(apic),
                   vcpu->arch.apic_base, apic->base_address);
@@ -1895,7 +1895,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu)
                /* evaluate pending_events before reading the vector */
                smp_rmb();
                sipi_vector = apic->sipi_vector;
-               pr_debug("vcpu %d received sipi with vector # %x\n",
+               apic_debug("vcpu %d received sipi with vector # %x\n",
                         vcpu->vcpu_id, sipi_vector);
                kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector);
                vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
index 9d2e0ff..5aaf356 100644 (file)
@@ -22,7 +22,7 @@
        __entry->unsync = sp->unsync;
 
 #define KVM_MMU_PAGE_PRINTK() ({                                       \
-       const char *ret = p->buffer + p->len;                           \
+       const u32 saved_len = p->len;                                   \
        static const char *access_str[] = {                             \
                "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux"  \
        };                                                              \
@@ -41,7 +41,7 @@
                         role.nxe ? "" : "!",                           \
                         __entry->root_count,                           \
                         __entry->unsync ? "unsync" : "sync", 0);       \
-       ret;                                                            \
+       p->buffer + saved_len;                                          \
                })
 
 #define kvm_mmu_trace_pferr_flags       \
index cbecaa9..3dd6acc 100644 (file)
@@ -428,6 +428,15 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        return 1;
 }
 
+int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc)
+{
+       struct kvm_pmu *pmu = &vcpu->arch.pmu;
+       bool fixed = pmc & (1u << 30);
+       pmc &= ~(3u << 30);
+       return (!fixed && pmc >= pmu->nr_arch_gp_counters) ||
+               (fixed && pmc >= pmu->nr_arch_fixed_counters);
+}
+
 int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
 {
        struct kvm_pmu *pmu = &vcpu->arch.pmu;
index ec8366c..ddf7427 100644 (file)
@@ -486,14 +486,14 @@ static int is_external_interrupt(u32 info)
        return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
 }
 
-static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
+static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        u32 ret = 0;
 
        if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
-               ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
-       return ret & mask;
+               ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
+       return ret;
 }
 
 static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
@@ -1415,7 +1415,16 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
        var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
        var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
        var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
-       var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
+
+       /*
+        * AMD CPUs circa 2014 track the G bit for all segments except CS.
+        * However, the SVM spec states that the G bit is not observed by the
+        * CPU, and some VMware virtual CPUs drop the G bit for all segments.
+        * So let's synthesize a legal G bit for all segments, this helps
+        * running KVM nested. It also helps cross-vendor migration, because
+        * Intel's vmentry has a check on the 'G' bit.
+        */
+       var->g = s->limit > 0xfffff;
 
        /*
         * AMD's VMCB does not have an explicit unusable field, so emulate it
@@ -1424,14 +1433,6 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
        var->unusable = !var->present || (var->type == 0);
 
        switch (seg) {
-       case VCPU_SREG_CS:
-               /*
-                * SVM always stores 0 for the 'G' bit in the CS selector in
-                * the VMCB on a VMEXIT. This hurts cross-vendor migration:
-                * Intel's VMENTRY has a check on the 'G' bit.
-                */
-               var->g = s->limit > 0xfffff;
-               break;
        case VCPU_SREG_TR:
                /*
                 * Work around a bug where the busy flag in the tr selector
@@ -1462,6 +1463,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
                 */
                if (var->unusable)
                        var->db = 0;
+               var->dpl = to_svm(vcpu)->vmcb->save.cpl;
                break;
        }
 }
@@ -2115,22 +2117,27 @@ static void nested_svm_unmap(struct page *page)
 
 static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
 {
-       unsigned port;
-       u8 val, bit;
+       unsigned port, size, iopm_len;
+       u16 val, mask;
+       u8 start_bit;
        u64 gpa;
 
        if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
                return NESTED_EXIT_HOST;
 
        port = svm->vmcb->control.exit_info_1 >> 16;
+       size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
+               SVM_IOIO_SIZE_SHIFT;
        gpa  = svm->nested.vmcb_iopm + (port / 8);
-       bit  = port % 8;
-       val  = 0;
+       start_bit = port % 8;
+       iopm_len = (start_bit + size > 8) ? 2 : 1;
+       mask = (0xf >> (4 - size)) << start_bit;
+       val = 0;
 
-       if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1))
-               val &= (1 << bit);
+       if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, iopm_len))
+               return NESTED_EXIT_DONE;
 
-       return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
+       return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
 }
 
 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
@@ -4204,7 +4211,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
                if (info->intercept == x86_intercept_cr_write)
                        icpt_info.exit_code += info->modrm_reg;
 
-               if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0)
+               if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
+                   info->intercept == x86_intercept_clts)
                        break;
 
                intercept = svm->nested.intercept;
@@ -4249,14 +4257,14 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu,
                u64 exit_info;
                u32 bytes;
 
-               exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16;
-
                if (info->intercept == x86_intercept_in ||
                    info->intercept == x86_intercept_ins) {
-                       exit_info |= SVM_IOIO_TYPE_MASK;
-                       bytes = info->src_bytes;
-               } else {
+                       exit_info = ((info->src_val & 0xffff) << 16) |
+                               SVM_IOIO_TYPE_MASK;
                        bytes = info->dst_bytes;
+               } else {
+                       exit_info = (info->dst_val & 0xffff) << 16;
+                       bytes = info->src_bytes;
                }
 
                if (info->intercept == x86_intercept_outs ||
index 33574c9..e850a7d 100644 (file)
@@ -721,10 +721,10 @@ TRACE_EVENT(kvm_emulate_insn,
                ),
 
        TP_fast_assign(
-               __entry->rip = vcpu->arch.emulate_ctxt.fetch.start;
                __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS);
-               __entry->len = vcpu->arch.emulate_ctxt._eip
-                              - vcpu->arch.emulate_ctxt.fetch.start;
+               __entry->len = vcpu->arch.emulate_ctxt.fetch.ptr
+                              - vcpu->arch.emulate_ctxt.fetch.data;
+               __entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len;
                memcpy(__entry->insn,
                       vcpu->arch.emulate_ctxt.fetch.data,
                       15);
index 801332e..e618f34 100644 (file)
@@ -383,6 +383,9 @@ struct nested_vmx {
 
        struct hrtimer preemption_timer;
        bool preemption_timer_expired;
+
+       /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
+       u64 vmcs01_debugctl;
 };
 
 #define POSTED_INTR_ON  0
@@ -740,7 +743,6 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var);
 static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
-static bool vmx_mpx_supported(void);
 
 static DEFINE_PER_CPU(struct vmcs *, vmxarea);
 static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -820,7 +822,6 @@ static const u32 vmx_msr_index[] = {
 #endif
        MSR_EFER, MSR_TSC_AUX, MSR_STAR,
 };
-#define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index)
 
 static inline bool is_page_fault(u32 intr_info)
 {
@@ -1940,7 +1941,7 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
        vmcs_writel(GUEST_RFLAGS, rflags);
 }
 
-static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
+static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
 {
        u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
        int ret = 0;
@@ -1950,7 +1951,7 @@ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
        if (interruptibility & GUEST_INTR_STATE_MOV_SS)
                ret |= KVM_X86_SHADOW_INT_MOV_SS;
 
-       return ret & mask;
+       return ret;
 }
 
 static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
@@ -2239,10 +2240,13 @@ static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu)
  * or other means.
  */
 static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high;
+static u32 nested_vmx_true_procbased_ctls_low;
 static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high;
 static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high;
 static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high;
+static u32 nested_vmx_true_exit_ctls_low;
 static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high;
+static u32 nested_vmx_true_entry_ctls_low;
 static u32 nested_vmx_misc_low, nested_vmx_misc_high;
 static u32 nested_vmx_ept_caps;
 static __init void nested_vmx_setup_ctls_msrs(void)
@@ -2265,21 +2269,13 @@ static __init void nested_vmx_setup_ctls_msrs(void)
        /* pin-based controls */
        rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
              nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high);
-       /*
-        * According to the Intel spec, if bit 55 of VMX_BASIC is off (as it is
-        * in our case), bits 1, 2 and 4 (i.e., 0x16) must be 1 in this MSR.
-        */
        nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
        nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK |
                PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS;
        nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
                PIN_BASED_VMX_PREEMPTION_TIMER;
 
-       /*
-        * Exit controls
-        * If bit 55 of VMX_BASIC is off, bits 0-8 and 10, 11, 13, 14, 16 and
-        * 17 must be 1.
-        */
+       /* exit controls */
        rdmsr(MSR_IA32_VMX_EXIT_CTLS,
                nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high);
        nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
@@ -2296,10 +2292,13 @@ static __init void nested_vmx_setup_ctls_msrs(void)
        if (vmx_mpx_supported())
                nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
 
+       /* We support free control of debug control saving. */
+       nested_vmx_true_exit_ctls_low = nested_vmx_exit_ctls_low &
+               ~VM_EXIT_SAVE_DEBUG_CONTROLS;
+
        /* entry controls */
        rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
                nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high);
-       /* If bit 55 of VMX_BASIC is off, bits 0-8 and 12 must be 1. */
        nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
        nested_vmx_entry_ctls_high &=
 #ifdef CONFIG_X86_64
@@ -2311,10 +2310,14 @@ static __init void nested_vmx_setup_ctls_msrs(void)
        if (vmx_mpx_supported())
                nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
 
+       /* We support free control of debug control loading. */
+       nested_vmx_true_entry_ctls_low = nested_vmx_entry_ctls_low &
+               ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
+
        /* cpu-based controls */
        rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
                nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high);
-       nested_vmx_procbased_ctls_low = 0;
+       nested_vmx_procbased_ctls_low = CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
        nested_vmx_procbased_ctls_high &=
                CPU_BASED_VIRTUAL_INTR_PENDING |
                CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING |
@@ -2335,7 +2338,12 @@ static __init void nested_vmx_setup_ctls_msrs(void)
         * can use it to avoid exits to L1 - even when L0 runs L2
         * without MSR bitmaps.
         */
-       nested_vmx_procbased_ctls_high |= CPU_BASED_USE_MSR_BITMAPS;
+       nested_vmx_procbased_ctls_high |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
+               CPU_BASED_USE_MSR_BITMAPS;
+
+       /* We support free control of CR3 access interception. */
+       nested_vmx_true_procbased_ctls_low = nested_vmx_procbased_ctls_low &
+               ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
 
        /* secondary cpu-based controls */
        rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
@@ -2394,7 +2402,7 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
                 * guest, and the VMCS structure we give it - not about the
                 * VMX support of the underlying hardware.
                 */
-               *pdata = VMCS12_REVISION |
+               *pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS |
                           ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
                           (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
                break;
@@ -2404,16 +2412,25 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
                                        nested_vmx_pinbased_ctls_high);
                break;
        case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
+               *pdata = vmx_control_msr(nested_vmx_true_procbased_ctls_low,
+                                       nested_vmx_procbased_ctls_high);
+               break;
        case MSR_IA32_VMX_PROCBASED_CTLS:
                *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low,
                                        nested_vmx_procbased_ctls_high);
                break;
        case MSR_IA32_VMX_TRUE_EXIT_CTLS:
+               *pdata = vmx_control_msr(nested_vmx_true_exit_ctls_low,
+                                       nested_vmx_exit_ctls_high);
+               break;
        case MSR_IA32_VMX_EXIT_CTLS:
                *pdata = vmx_control_msr(nested_vmx_exit_ctls_low,
                                        nested_vmx_exit_ctls_high);
                break;
        case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
+               *pdata = vmx_control_msr(nested_vmx_true_entry_ctls_low,
+                                       nested_vmx_entry_ctls_high);
+               break;
        case MSR_IA32_VMX_ENTRY_CTLS:
                *pdata = vmx_control_msr(nested_vmx_entry_ctls_low,
                                        nested_vmx_entry_ctls_high);
@@ -2442,7 +2459,7 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
                *pdata = -1ULL;
                break;
        case MSR_IA32_VMX_VMCS_ENUM:
-               *pdata = 0x1f;
+               *pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */
                break;
        case MSR_IA32_VMX_PROCBASED_CTLS2:
                *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low,
@@ -3653,7 +3670,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
        vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var));
 
 out:
-       vmx->emulation_required |= emulation_required(vcpu);
+       vmx->emulation_required = emulation_required(vcpu);
 }
 
 static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -4422,7 +4439,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
                vmx->vcpu.arch.pat = host_pat;
        }
 
-       for (i = 0; i < NR_VMX_MSR; ++i) {
+       for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) {
                u32 index = vmx_msr_index[i];
                u32 data_low, data_high;
                int j = vmx->nmsrs;
@@ -4873,7 +4890,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
                if (!(vcpu->guest_debug &
                      (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
                        vcpu->arch.dr6 &= ~15;
-                       vcpu->arch.dr6 |= dr6;
+                       vcpu->arch.dr6 |= dr6 | DR6_RTM;
                        if (!(dr6 & ~DR6_RESERVED)) /* icebp */
                                skip_emulated_instruction(vcpu);
 
@@ -5039,7 +5056,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
        reg = (exit_qualification >> 8) & 15;
        switch ((exit_qualification >> 4) & 3) {
        case 0: /* mov to cr */
-               val = kvm_register_read(vcpu, reg);
+               val = kvm_register_readl(vcpu, reg);
                trace_kvm_cr_write(cr, val);
                switch (cr) {
                case 0:
@@ -5056,7 +5073,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
                        return 1;
                case 8: {
                                u8 cr8_prev = kvm_get_cr8(vcpu);
-                               u8 cr8 = kvm_register_read(vcpu, reg);
+                               u8 cr8 = (u8)val;
                                err = kvm_set_cr8(vcpu, cr8);
                                kvm_complete_insn_gp(vcpu, err);
                                if (irqchip_in_kernel(vcpu->kvm))
@@ -5132,7 +5149,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
                        return 0;
                } else {
                        vcpu->arch.dr7 &= ~DR7_GD;
-                       vcpu->arch.dr6 |= DR6_BD;
+                       vcpu->arch.dr6 |= DR6_BD | DR6_RTM;
                        vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
                        kvm_queue_exception(vcpu, DB_VECTOR);
                        return 1;
@@ -5165,7 +5182,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
                        return 1;
                kvm_register_write(vcpu, reg, val);
        } else
-               if (kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg)))
+               if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg)))
                        return 1;
 
        skip_emulated_instruction(vcpu);
@@ -5621,7 +5638,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
        cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
        intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING;
 
-       while (!guest_state_valid(vcpu) && count-- != 0) {
+       while (vmx->emulation_required && count-- != 0) {
                if (intr_window_requested && vmx_interrupt_allowed(vcpu))
                        return handle_interrupt_window(&vmx->vcpu);
 
@@ -5655,7 +5672,6 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
                        schedule();
        }
 
-       vmx->emulation_required = emulation_required(vcpu);
 out:
        return ret;
 }
@@ -5754,22 +5770,27 @@ static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr)
 
 /*
  * Free all VMCSs saved for this vcpu, except the one pointed by
- * vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one
- * currently used, if running L2), and vmcs01 when running L2.
+ * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs
+ * must be &vmx->vmcs01.
  */
 static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx)
 {
        struct vmcs02_list *item, *n;
+
+       WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01);
        list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) {
-               if (vmx->loaded_vmcs != &item->vmcs02)
-                       free_loaded_vmcs(&item->vmcs02);
+               /*
+                * Something will leak if the above WARN triggers.  Better than
+                * a use-after-free.
+                */
+               if (vmx->loaded_vmcs == &item->vmcs02)
+                       continue;
+
+               free_loaded_vmcs(&item->vmcs02);
                list_del(&item->list);
                kfree(item);
+               vmx->nested.vmcs02_num--;
        }
-       vmx->nested.vmcs02_num = 0;
-
-       if (vmx->loaded_vmcs != &vmx->vmcs01)
-               free_loaded_vmcs(&vmx->vmcs01);
 }
 
 /*
@@ -5918,7 +5939,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
                 * which replaces physical address width with 32
                 *
                 */
-               if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) {
+               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
                        nested_vmx_failInvalid(vcpu);
                        skip_emulated_instruction(vcpu);
                        return 1;
@@ -5936,7 +5957,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
                vmx->nested.vmxon_ptr = vmptr;
                break;
        case EXIT_REASON_VMCLEAR:
-               if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) {
+               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
                        nested_vmx_failValid(vcpu,
                                             VMXERR_VMCLEAR_INVALID_ADDRESS);
                        skip_emulated_instruction(vcpu);
@@ -5951,7 +5972,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
                }
                break;
        case EXIT_REASON_VMPTRLD:
-               if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) {
+               if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) {
                        nested_vmx_failValid(vcpu,
                                             VMXERR_VMPTRLD_INVALID_ADDRESS);
                        skip_emulated_instruction(vcpu);
@@ -6086,20 +6107,27 @@ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
 static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
 {
        u32 exec_control;
+       if (vmx->nested.current_vmptr == -1ull)
+               return;
+
+       /* current_vmptr and current_vmcs12 are always set/reset together */
+       if (WARN_ON(vmx->nested.current_vmcs12 == NULL))
+               return;
+
        if (enable_shadow_vmcs) {
-               if (vmx->nested.current_vmcs12 != NULL) {
-                       /* copy to memory all shadowed fields in case
-                          they were modified */
-                       copy_shadow_to_vmcs12(vmx);
-                       vmx->nested.sync_shadow_vmcs = false;
-                       exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
-                       exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
-                       vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
-                       vmcs_write64(VMCS_LINK_POINTER, -1ull);
-               }
+               /* copy to memory all shadowed fields in case
+                  they were modified */
+               copy_shadow_to_vmcs12(vmx);
+               vmx->nested.sync_shadow_vmcs = false;
+               exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
+               exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
+               vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
+               vmcs_write64(VMCS_LINK_POINTER, -1ull);
        }
        kunmap(vmx->nested.current_vmcs12_page);
        nested_release_page(vmx->nested.current_vmcs12_page);
+       vmx->nested.current_vmptr = -1ull;
+       vmx->nested.current_vmcs12 = NULL;
 }
 
 /*
@@ -6110,12 +6138,9 @@ static void free_nested(struct vcpu_vmx *vmx)
 {
        if (!vmx->nested.vmxon)
                return;
+
        vmx->nested.vmxon = false;
-       if (vmx->nested.current_vmptr != -1ull) {
-               nested_release_vmcs12(vmx);
-               vmx->nested.current_vmptr = -1ull;
-               vmx->nested.current_vmcs12 = NULL;
-       }
+       nested_release_vmcs12(vmx);
        if (enable_shadow_vmcs)
                free_vmcs(vmx->nested.current_shadow_vmcs);
        /* Unpin physical memory we referred to in current vmcs02 */
@@ -6152,11 +6177,8 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
        if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
                return 1;
 
-       if (vmptr == vmx->nested.current_vmptr) {
+       if (vmptr == vmx->nested.current_vmptr)
                nested_release_vmcs12(vmx);
-               vmx->nested.current_vmptr = -1ull;
-               vmx->nested.current_vmcs12 = NULL;
-       }
 
        page = nested_get_page(vcpu, vmptr);
        if (page == NULL) {
@@ -6384,7 +6406,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
                return 1;
 
        /* Decode instruction info and find the field to read */
-       field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+       field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
        /* Read the field, zero-extended to a u64 field_value */
        if (!vmcs12_read_any(vcpu, field, &field_value)) {
                nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
@@ -6397,7 +6419,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
         * on the guest's mode (32 or 64 bit), not on the given field's length.
         */
        if (vmx_instruction_info & (1u << 10)) {
-               kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
+               kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf),
                        field_value);
        } else {
                if (get_vmx_mem_address(vcpu, exit_qualification,
@@ -6434,21 +6456,21 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
                return 1;
 
        if (vmx_instruction_info & (1u << 10))
-               field_value = kvm_register_read(vcpu,
+               field_value = kvm_register_readl(vcpu,
                        (((vmx_instruction_info) >> 3) & 0xf));
        else {
                if (get_vmx_mem_address(vcpu, exit_qualification,
                                vmx_instruction_info, &gva))
                        return 1;
                if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
-                          &field_value, (is_long_mode(vcpu) ? 8 : 4), &e)) {
+                          &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
                        kvm_inject_page_fault(vcpu, &e);
                        return 1;
                }
        }
 
 
-       field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
+       field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
        if (vmcs_field_readonly(field)) {
                nested_vmx_failValid(vcpu,
                        VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
@@ -6498,9 +6520,8 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
                        skip_emulated_instruction(vcpu);
                        return 1;
                }
-               if (vmx->nested.current_vmptr != -1ull)
-                       nested_release_vmcs12(vmx);
 
+               nested_release_vmcs12(vmx);
                vmx->nested.current_vmptr = vmptr;
                vmx->nested.current_vmcs12 = new_vmcs12;
                vmx->nested.current_vmcs12_page = page;
@@ -6571,7 +6592,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
        }
 
        vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
-       type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf);
+       type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
 
        types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
 
@@ -6751,7 +6772,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
        unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
        int cr = exit_qualification & 15;
        int reg = (exit_qualification >> 8) & 15;
-       unsigned long val = kvm_register_read(vcpu, reg);
+       unsigned long val = kvm_register_readl(vcpu, reg);
 
        switch ((exit_qualification >> 4) & 3) {
        case 0: /* mov to cr */
@@ -7112,7 +7133,26 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
        if (max_irr == -1)
                return;
 
-       vmx_set_rvi(max_irr);
+       /*
+        * If a vmexit is needed, vmx_check_nested_events handles it.
+        */
+       if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
+               return;
+
+       if (!is_guest_mode(vcpu)) {
+               vmx_set_rvi(max_irr);
+               return;
+       }
+
+       /*
+        * Fall back to pre-APICv interrupt injection since L2
+        * is run without virtual interrupt delivery.
+        */
+       if (!kvm_event_needs_reinjection(vcpu) &&
+           vmx_interrupt_allowed(vcpu)) {
+               kvm_queue_interrupt(vcpu, max_irr, false);
+               vmx_inject_irq(vcpu);
+       }
 }
 
 static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
@@ -7520,13 +7560,31 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        vmx_complete_interrupts(vmx);
 }
 
+static void vmx_load_vmcs01(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       int cpu;
+
+       if (vmx->loaded_vmcs == &vmx->vmcs01)
+               return;
+
+       cpu = get_cpu();
+       vmx->loaded_vmcs = &vmx->vmcs01;
+       vmx_vcpu_put(vcpu);
+       vmx_vcpu_load(vcpu, cpu);
+       vcpu->cpu = cpu;
+       put_cpu();
+}
+
 static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
        free_vpid(vmx);
-       free_loaded_vmcs(vmx->loaded_vmcs);
+       leave_guest_mode(vcpu);
+       vmx_load_vmcs01(vcpu);
        free_nested(vmx);
+       free_loaded_vmcs(vmx->loaded_vmcs);
        kfree(vmx->guest_msrs);
        kvm_vcpu_uninit(vcpu);
        kmem_cache_free(kvm_vcpu_cache, vmx);
@@ -7548,6 +7606,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
                goto free_vcpu;
 
        vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
+                    > PAGE_SIZE);
+
        err = -ENOMEM;
        if (!vmx->guest_msrs) {
                goto uninit_vcpu;
@@ -7836,7 +7897,13 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
        vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
        vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
 
-       vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
+       if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
+               kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
+               vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
+       } else {
+               kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
+               vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
+       }
        vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
                vmcs12->vm_entry_intr_info_field);
        vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
@@ -7846,7 +7913,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
        vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
                vmcs12->guest_interruptibility_info);
        vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
-       kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
        vmx_set_rflags(vcpu, vmcs12->guest_rflags);
        vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
                vmcs12->guest_pending_dbg_exceptions);
@@ -8113,14 +8179,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        }
 
        if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) &&
-                       !IS_ALIGNED(vmcs12->msr_bitmap, PAGE_SIZE)) {
+                       !PAGE_ALIGNED(vmcs12->msr_bitmap)) {
                /*TODO: Also verify bits beyond physical address width are 0*/
                nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
                return 1;
        }
 
        if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
-                       !IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) {
+                       !PAGE_ALIGNED(vmcs12->apic_access_addr)) {
                /*TODO: Also verify bits beyond physical address width are 0*/
                nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
                return 1;
@@ -8136,15 +8202,18 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
        }
 
        if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
-             nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high) ||
+                               nested_vmx_true_procbased_ctls_low,
+                               nested_vmx_procbased_ctls_high) ||
            !vmx_control_verify(vmcs12->secondary_vm_exec_control,
              nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) ||
            !vmx_control_verify(vmcs12->pin_based_vm_exec_control,
              nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) ||
            !vmx_control_verify(vmcs12->vm_exit_controls,
-             nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high) ||
+                               nested_vmx_true_exit_ctls_low,
+                               nested_vmx_exit_ctls_high) ||
            !vmx_control_verify(vmcs12->vm_entry_controls,
-             nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high))
+                               nested_vmx_true_entry_ctls_low,
+                               nested_vmx_entry_ctls_high))
        {
                nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
                return 1;
@@ -8221,6 +8290,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
 
        vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
 
+       if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
+               vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+
        cpu = get_cpu();
        vmx->loaded_vmcs = vmcs02;
        vmx_vcpu_put(vcpu);
@@ -8398,7 +8470,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
        vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
        vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
 
-       kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
        vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
        vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP);
        vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
@@ -8477,9 +8548,13 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
                (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
                (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
 
+       if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) {
+               kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
+               vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
+       }
+
        /* TODO: These cannot have changed unless we have MSR bitmaps and
         * the relevant bit asks not to trap the change */
-       vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
        if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT)
                vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT);
        if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
@@ -8670,7 +8745,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
                              unsigned long exit_qualification)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       int cpu;
        struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
 
        /* trying to cancel vmlaunch/vmresume is a bug */
@@ -8695,12 +8769,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
                                       vmcs12->vm_exit_intr_error_code,
                                       KVM_ISA_VMX);
 
-       cpu = get_cpu();
-       vmx->loaded_vmcs = &vmx->vmcs01;
-       vmx_vcpu_put(vcpu);
-       vmx_vcpu_load(vcpu, cpu);
-       vcpu->cpu = cpu;
-       put_cpu();
+       vmx_load_vmcs01(vcpu);
 
        vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS));
        vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS));
@@ -8890,7 +8959,7 @@ static int __init vmx_init(void)
 
        rdmsrl_safe(MSR_EFER, &host_efer);
 
-       for (i = 0; i < NR_VMX_MSR; ++i)
+       for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i)
                kvm_define_shared_msr(i, vmx_msr_index[i]);
 
        vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL);
index f32a025..b86d329 100644 (file)
@@ -87,6 +87,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
 
 static void update_cr8_intercept(struct kvm_vcpu *vcpu);
 static void process_nmi(struct kvm_vcpu *vcpu);
+static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
 
 struct kvm_x86_ops *kvm_x86_ops;
 EXPORT_SYMBOL_GPL(kvm_x86_ops);
@@ -211,6 +212,7 @@ static void shared_msr_update(unsigned slot, u32 msr)
 
 void kvm_define_shared_msr(unsigned slot, u32 msr)
 {
+       BUG_ON(slot >= KVM_NR_SHARED_MSRS);
        if (slot >= shared_msrs_global.nr)
                shared_msrs_global.nr = slot + 1;
        shared_msrs_global.msrs[slot] = msr;
@@ -310,6 +312,31 @@ static int exception_class(int vector)
        return EXCPT_BENIGN;
 }
 
+#define EXCPT_FAULT            0
+#define EXCPT_TRAP             1
+#define EXCPT_ABORT            2
+#define EXCPT_INTERRUPT                3
+
+static int exception_type(int vector)
+{
+       unsigned int mask;
+
+       if (WARN_ON(vector > 31 || vector == NMI_VECTOR))
+               return EXCPT_INTERRUPT;
+
+       mask = 1 << vector;
+
+       /* #DB is trap, as instruction watchpoints are handled elsewhere */
+       if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR)))
+               return EXCPT_TRAP;
+
+       if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR)))
+               return EXCPT_ABORT;
+
+       /* Reserved exceptions will result in fault */
+       return EXCPT_FAULT;
+}
+
 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
                unsigned nr, bool has_error, u32 error_code,
                bool reinject)
@@ -758,6 +785,15 @@ static void kvm_update_dr7(struct kvm_vcpu *vcpu)
                vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED;
 }
 
+static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu)
+{
+       u64 fixed = DR6_FIXED_1;
+
+       if (!guest_cpuid_has_rtm(vcpu))
+               fixed |= DR6_RTM;
+       return fixed;
+}
+
 static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
 {
        switch (dr) {
@@ -773,7 +809,7 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
        case 6:
                if (val & 0xffffffff00000000ULL)
                        return -1; /* #GP */
-               vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
+               vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu);
                kvm_update_dr6(vcpu);
                break;
        case 5:
@@ -1215,6 +1251,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
        unsigned long flags;
        s64 usdiff;
        bool matched;
+       bool already_matched;
        u64 data = msr->data;
 
        raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
@@ -1279,6 +1316,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
                        pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
                }
                matched = true;
+               already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation);
        } else {
                /*
                 * We split periods of matched TSC writes into generations.
@@ -1294,7 +1332,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
                kvm->arch.cur_tsc_write = data;
                kvm->arch.cur_tsc_offset = offset;
                matched = false;
-               pr_debug("kvm: new tsc generation %u, clock %llu\n",
+               pr_debug("kvm: new tsc generation %llu, clock %llu\n",
                         kvm->arch.cur_tsc_generation, data);
        }
 
@@ -1319,10 +1357,11 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
        raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
 
        spin_lock(&kvm->arch.pvclock_gtod_sync_lock);
-       if (matched)
-               kvm->arch.nr_vcpus_matched_tsc++;
-       else
+       if (!matched) {
                kvm->arch.nr_vcpus_matched_tsc = 0;
+       } else if (!already_matched) {
+               kvm->arch.nr_vcpus_matched_tsc++;
+       }
 
        kvm_track_tsc_matching(vcpu);
        spin_unlock(&kvm->arch.pvclock_gtod_sync_lock);
@@ -1898,7 +1937,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
                        break;
                gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
-               if (kvm_write_guest(kvm, data,
+               if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
                        &tsc_ref, sizeof(tsc_ref)))
                        return 1;
                mark_page_dirty(kvm, gfn);
@@ -2032,6 +2071,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                data &= ~(u64)0x40;     /* ignore flush filter disable */
                data &= ~(u64)0x100;    /* ignore ignne emulation enable */
                data &= ~(u64)0x8;      /* ignore TLB cache disable */
+               data &= ~(u64)0x40000;  /* ignore Mc status write enable */
                if (data != 0) {
                        vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n",
                                    data);
@@ -2974,9 +3014,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
                vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft;
        events->interrupt.nr = vcpu->arch.interrupt.nr;
        events->interrupt.soft = 0;
-       events->interrupt.shadow =
-               kvm_x86_ops->get_interrupt_shadow(vcpu,
-                       KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI);
+       events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
 
        events->nmi.injected = vcpu->arch.nmi_injected;
        events->nmi.pending = vcpu->arch.nmi_pending != 0;
@@ -4082,7 +4120,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
 
                if (gpa == UNMAPPED_GVA)
                        return X86EMUL_PROPAGATE_FAULT;
-               ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
+               ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data,
+                                         offset, toread);
                if (ret < 0) {
                        r = X86EMUL_IO_NEEDED;
                        goto out;
@@ -4103,10 +4142,24 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
 {
        struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
        u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
+       unsigned offset;
+       int ret;
 
-       return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
-                                         access | PFERR_FETCH_MASK,
-                                         exception);
+       /* Inline kvm_read_guest_virt_helper for speed.  */
+       gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK,
+                                                   exception);
+       if (unlikely(gpa == UNMAPPED_GVA))
+               return X86EMUL_PROPAGATE_FAULT;
+
+       offset = addr & (PAGE_SIZE-1);
+       if (WARN_ON(offset + bytes > PAGE_SIZE))
+               bytes = (unsigned)PAGE_SIZE - offset;
+       ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val,
+                                 offset, bytes);
+       if (unlikely(ret < 0))
+               return X86EMUL_IO_NEEDED;
+
+       return X86EMUL_CONTINUE;
 }
 
 int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
@@ -4730,7 +4783,6 @@ static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
        if (desc->g)
                var.limit = (var.limit << 12) | 0xfff;
        var.type = desc->type;
-       var.present = desc->p;
        var.dpl = desc->dpl;
        var.db = desc->d;
        var.s = desc->s;
@@ -4762,6 +4814,12 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
        return kvm_set_msr(emul_to_vcpu(ctxt), &msr);
 }
 
+static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
+                             u32 pmc)
+{
+       return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc);
+}
+
 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
                             u32 pmc, u64 *pdata)
 {
@@ -4838,6 +4896,7 @@ static const struct x86_emulate_ops emulate_ops = {
        .set_dr              = emulator_set_dr,
        .set_msr             = emulator_set_msr,
        .get_msr             = emulator_get_msr,
+       .check_pmc           = emulator_check_pmc,
        .read_pmc            = emulator_read_pmc,
        .halt                = emulator_halt,
        .wbinvd              = emulator_wbinvd,
@@ -4850,7 +4909,7 @@ static const struct x86_emulate_ops emulate_ops = {
 
 static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
 {
-       u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask);
+       u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu);
        /*
         * an sti; sti; sequence only disable interrupts for the first
         * instruction. So, if the last instruction, be it emulated or
@@ -4858,8 +4917,13 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
         * means that the last instruction is an sti. We should not
         * leave the flag on in this case. The same goes for mov ss
         */
-       if (!(int_shadow & mask))
+       if (int_shadow & mask)
+               mask = 0;
+       if (unlikely(int_shadow || mask)) {
                kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
+               if (!mask)
+                       kvm_make_request(KVM_REQ_EVENT, vcpu);
+       }
 }
 
 static void inject_emulated_exception(struct kvm_vcpu *vcpu)
@@ -4874,19 +4938,6 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu)
                kvm_queue_exception(vcpu, ctxt->exception.vector);
 }
 
-static void init_decode_cache(struct x86_emulate_ctxt *ctxt)
-{
-       memset(&ctxt->opcode_len, 0,
-              (void *)&ctxt->_regs - (void *)&ctxt->opcode_len);
-
-       ctxt->fetch.start = 0;
-       ctxt->fetch.end = 0;
-       ctxt->io_read.pos = 0;
-       ctxt->io_read.end = 0;
-       ctxt->mem_read.pos = 0;
-       ctxt->mem_read.end = 0;
-}
-
 static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
 {
        struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
@@ -5085,23 +5136,22 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
        return dr6;
 }
 
-static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r)
+static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
 {
        struct kvm_run *kvm_run = vcpu->run;
 
        /*
-        * Use the "raw" value to see if TF was passed to the processor.
-        * Note that the new value of the flags has not been saved yet.
+        * rflags is the old, "raw" value of the flags.  The new value has
+        * not been saved yet.
         *
         * This is correct even for TF set by the guest, because "the
         * processor will not generate this exception after the instruction
         * that sets the TF flag".
         */
-       unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
-
        if (unlikely(rflags & X86_EFLAGS_TF)) {
                if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
-                       kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1;
+                       kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 |
+                                                 DR6_RTM;
                        kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip;
                        kvm_run->debug.arch.exception = DB_VECTOR;
                        kvm_run->exit_reason = KVM_EXIT_DEBUG;
@@ -5114,7 +5164,7 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r)
                         * cleared by the processor".
                         */
                        vcpu->arch.dr6 &= ~15;
-                       vcpu->arch.dr6 |= DR6_BS;
+                       vcpu->arch.dr6 |= DR6_BS | DR6_RTM;
                        kvm_queue_exception(vcpu, DB_VECTOR);
                }
        }
@@ -5133,7 +5183,7 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
                                           vcpu->arch.eff_db);
 
                if (dr6 != 0) {
-                       kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
+                       kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM;
                        kvm_run->debug.arch.pc = kvm_rip_read(vcpu) +
                                get_segment_base(vcpu, VCPU_SREG_CS);
 
@@ -5144,14 +5194,15 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r)
                }
        }
 
-       if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK)) {
+       if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) &&
+           !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) {
                dr6 = kvm_vcpu_check_hw_bp(eip, 0,
                                           vcpu->arch.dr7,
                                           vcpu->arch.db);
 
                if (dr6 != 0) {
                        vcpu->arch.dr6 &= ~15;
-                       vcpu->arch.dr6 |= dr6;
+                       vcpu->arch.dr6 |= dr6 | DR6_RTM;
                        kvm_queue_exception(vcpu, DB_VECTOR);
                        *r = EMULATE_DONE;
                        return true;
@@ -5215,6 +5266,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
 
        if (emulation_type & EMULTYPE_SKIP) {
                kvm_rip_write(vcpu, ctxt->_eip);
+               if (ctxt->eflags & X86_EFLAGS_RF)
+                       kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF);
                return EMULATE_DONE;
        }
 
@@ -5265,13 +5318,22 @@ restart:
                r = EMULATE_DONE;
 
        if (writeback) {
+               unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
                toggle_interruptibility(vcpu, ctxt->interruptibility);
-               kvm_make_request(KVM_REQ_EVENT, vcpu);
                vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
                kvm_rip_write(vcpu, ctxt->eip);
                if (r == EMULATE_DONE)
-                       kvm_vcpu_check_singlestep(vcpu, &r);
-               kvm_set_rflags(vcpu, ctxt->eflags);
+                       kvm_vcpu_check_singlestep(vcpu, rflags, &r);
+               __kvm_set_rflags(vcpu, ctxt->eflags);
+
+               /*
+                * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
+                * do nothing, and it will be requested again as soon as
+                * the shadow expires.  But we still need to check here,
+                * because POPF has no interrupt shadow.
+                */
+               if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
+                       kvm_make_request(KVM_REQ_EVENT, vcpu);
        } else
                vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
 
@@ -5662,7 +5724,6 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
        u64 param, ingpa, outgpa, ret;
        uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0;
        bool fast, longmode;
-       int cs_db, cs_l;
 
        /*
         * hypercall generates UD from non zero cpl and real mode
@@ -5673,8 +5734,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
                return 0;
        }
 
-       kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
-       longmode = is_long_mode(vcpu) && cs_l == 1;
+       longmode = is_64_bit_mode(vcpu);
 
        if (!longmode) {
                param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) |
@@ -5739,7 +5799,7 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
 int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 {
        unsigned long nr, a0, a1, a2, a3, ret;
-       int r = 1;
+       int op_64_bit, r = 1;
 
        if (kvm_hv_hypercall_enabled(vcpu->kvm))
                return kvm_hv_hypercall(vcpu);
@@ -5752,7 +5812,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
 
        trace_kvm_hypercall(nr, a0, a1, a2, a3);
 
-       if (!is_long_mode(vcpu)) {
+       op_64_bit = is_64_bit_mode(vcpu);
+       if (!op_64_bit) {
                nr &= 0xFFFFFFFF;
                a0 &= 0xFFFFFFFF;
                a1 &= 0xFFFFFFFF;
@@ -5778,6 +5839,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
                break;
        }
 out:
+       if (!op_64_bit)
+               ret = (u32)ret;
        kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
        ++vcpu->stat.hypercalls;
        return r;
@@ -5856,6 +5919,11 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
                trace_kvm_inj_exception(vcpu->arch.exception.nr,
                                        vcpu->arch.exception.has_error_code,
                                        vcpu->arch.exception.error_code);
+
+               if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT)
+                       __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) |
+                                            X86_EFLAGS_RF);
+
                kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr,
                                          vcpu->arch.exception.has_error_code,
                                          vcpu->arch.exception.error_code,
@@ -5887,6 +5955,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
                        kvm_x86_ops->set_nmi(vcpu);
                }
        } else if (kvm_cpu_has_injectable_intr(vcpu)) {
+               /*
+                * Because interrupts can be injected asynchronously, we are
+                * calling check_nested_events again here to avoid a race condition.
+                * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
+                * proposal and current concerns.  Perhaps we should be setting
+                * KVM_REQ_EVENT only on certain events and not unconditionally?
+                */
+               if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
+                       r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
+                       if (r != 0)
+                               return r;
+               }
                if (kvm_x86_ops->interrupt_allowed(vcpu)) {
                        kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
                                            false);
@@ -6835,9 +6915,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
        atomic_set(&vcpu->arch.nmi_queued, 0);
        vcpu->arch.nmi_pending = 0;
        vcpu->arch.nmi_injected = false;
+       kvm_clear_interrupt_queue(vcpu);
+       kvm_clear_exception_queue(vcpu);
 
        memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
-       vcpu->arch.dr6 = DR6_FIXED_1;
+       vcpu->arch.dr6 = DR6_INIT;
        kvm_update_dr6(vcpu);
        vcpu->arch.dr7 = DR7_FIXED_1;
        kvm_update_dr7(vcpu);
@@ -7393,12 +7475,17 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_get_rflags);
 
-void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
 {
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
            kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
                rflags |= X86_EFLAGS_TF;
        kvm_x86_ops->set_rflags(vcpu, rflags);
+}
+
+void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
+{
+       __kvm_set_rflags(vcpu, rflags);
        kvm_make_request(KVM_REQ_EVENT, vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_set_rflags);
index 8c97bac..306a1b7 100644 (file)
@@ -47,6 +47,16 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)
 #endif
 }
 
+static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu)
+{
+       int cs_db, cs_l;
+
+       if (!is_long_mode(vcpu))
+               return false;
+       kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
+       return cs_l;
+}
+
 static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
@@ -108,6 +118,23 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
        return false;
 }
 
+static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu,
+                                              enum kvm_reg reg)
+{
+       unsigned long val = kvm_register_read(vcpu, reg);
+
+       return is_64_bit_mode(vcpu) ? val : (u32)val;
+}
+
+static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
+                                      enum kvm_reg reg,
+                                      unsigned long val)
+{
+       if (!is_64_bit_mode(vcpu))
+               val = (u32)val;
+       return kvm_register_write(vcpu, reg, val);
+}
+
 void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
 void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
 int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip);
index b5e6026..c61ea57 100644 (file)
@@ -326,6 +326,27 @@ static void pci_fixup_video(struct pci_dev *pdev)
        struct pci_bus *bus;
        u16 config;
 
+       if (!vga_default_device()) {
+               resource_size_t start, end;
+               int i;
+
+               /* Does firmware framebuffer belong to us? */
+               for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+                       if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM))
+                               continue;
+
+                       start = pci_resource_start(pdev, i);
+                       end  = pci_resource_end(pdev, i);
+
+                       if (!start || !end)
+                               continue;
+
+                       if (screen_info.lfb_base >= start &&
+                           (screen_info.lfb_base + screen_info.lfb_size) < end)
+                               vga_set_default_device(pdev);
+               }
+       }
+
        /* Is VGA routed to us? */
        bus = pdev->bus;
        while (bus) {
index a19ed92..2ae525e 100644 (file)
@@ -162,6 +162,10 @@ pcibios_align_resource(void *data, const struct resource *res,
                        return start;
                if (start & 0x300)
                        start = (start + 0x3ff) & ~0x3ff;
+       } else if (res->flags & IORESOURCE_MEM) {
+               /* The low 1MB range is reserved for ISA cards */
+               if (start < BIOS_END)
+                       start = BIOS_END;
        }
        return start;
 }
index 424f4c9..6ec7910 100644 (file)
@@ -165,7 +165,7 @@ static void fix_processor_context(void)
  *             by __save_processor_state()
  *     @ctxt - structure to load the registers contents from
  */
-static void __restore_processor_state(struct saved_context *ctxt)
+static void notrace __restore_processor_state(struct saved_context *ctxt)
 {
        if (ctxt->misc_enable_saved)
                wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
@@ -239,7 +239,7 @@ static void __restore_processor_state(struct saved_context *ctxt)
 }
 
 /* Needed by apm.c */
-void restore_processor_state(void)
+void notrace restore_processor_state(void)
 {
        __restore_processor_state(&saved_context);
 }
index 04f82e0..2a206d2 100644 (file)
@@ -25,7 +25,8 @@ static inline void rep_nop(void)
        __asm__ __volatile__("rep;nop": : :"memory");
 }
 
-#define cpu_relax()    rep_nop()
+#define cpu_relax()            rep_nop()
+#define cpu_relax_lowlatency() cpu_relax()
 
 #include <asm/processor-generic.h>
 
index 3c0809a..61b04fe 100644 (file)
@@ -11,7 +11,6 @@ VDSO32-$(CONFIG_COMPAT)               := y
 
 # files to link into the vdso
 vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o
-vobjs-nox32 := vdso-fakesections.o
 
 # files to link into kernel
 obj-y                          += vma.o
@@ -67,7 +66,8 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso2c FORCE
 #
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
        $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
-       -fno-omit-frame-pointer -foptimize-sibling-calls
+       -fno-omit-frame-pointer -foptimize-sibling-calls \
+       -DDISABLE_BRANCH_PROFILING
 
 $(vobjs): KBUILD_CFLAGS += $(CFL)
 
@@ -134,7 +134,7 @@ override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
 
 targets += vdso32/vdso32.lds
 targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o)
-targets += vdso32/vclock_gettime.o
+targets += vdso32/vclock_gettime.o vdso32/vdso-fakesections.o
 
 $(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%)
 
@@ -150,11 +150,13 @@ KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
 KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
 KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
 KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
+KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
 $(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
 
 $(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
                                 $(obj)/vdso32/vdso32.lds \
                                 $(obj)/vdso32/vclock_gettime.o \
+                                $(obj)/vdso32/vdso-fakesections.o \
                                 $(obj)/vdso32/note.o \
                                 $(obj)/vdso32/%.o
        $(call if_changed,vdso)
@@ -169,14 +171,24 @@ quiet_cmd_vdso = VDSO    $@
                 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
 VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
-       -Wl,-Bsymbolic $(LTO_CFLAGS)
+       $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
 GCOV_PROFILE := n
 
 #
-# Install the unstripped copies of vdso*.so.
+# Install the unstripped copies of vdso*.so.  If our toolchain supports
+# build-id, install .build-id links as well.
 #
 quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
-      cmd_vdso_install = cp $< $(MODLIB)/vdso/$(@:install_%=%)
+define cmd_vdso_install
+       cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
+       if readelf -n $< |grep -q 'Build ID'; then \
+         buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
+         first=`echo $$buildid | cut -b-2`; \
+         last=`echo $$buildid | cut -b3-`; \
+         mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
+         ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
+       fi
+endef
 
 vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
 
index b2e4f49..9793322 100644 (file)
@@ -11,9 +11,6 @@
  * Check with readelf after changing.
  */
 
-/* Disable profiling for userspace code: */
-#define DISABLE_BRANCH_PROFILING
-
 #include <uapi/linux/time.h>
 #include <asm/vgtod.h>
 #include <asm/hpet.h>
index cb8a8d7..aa5fbfa 100644 (file)
@@ -2,31 +2,20 @@
  * Copyright 2014 Andy Lutomirski
  * Subject to the GNU Public License, v.2
  *
- * Hack to keep broken Go programs working.
- *
- * The Go runtime had a couple of bugs: it would read the section table to try
- * to figure out how many dynamic symbols there were (it shouldn't have looked
- * at the section table at all) and, if there were no SHT_SYNDYM section table
- * entry, it would use an uninitialized value for the number of symbols.  As a
- * workaround, we supply a minimal section table.  vdso2c will adjust the
- * in-memory image so that "vdso_fake_sections" becomes the section table.
- *
- * The bug was introduced by:
- * https://code.google.com/p/go/source/detail?r=56ea40aac72b (2012-08-31)
- * and is being addressed in the Go runtime in this issue:
- * https://code.google.com/p/go/issues/detail?id=8197
+ * String table for loadable section headers.  See vdso2c.h for why
+ * this exists.
  */
 
-#ifndef __x86_64__
-#error This hack is specific to the 64-bit vDSO
-#endif
-
-#include <linux/elf.h>
-
-extern const __visible struct elf64_shdr vdso_fake_sections[];
-const __visible struct elf64_shdr vdso_fake_sections[] = {
-       {
-               .sh_type = SHT_DYNSYM,
-               .sh_entsize = sizeof(Elf64_Sym),
-       }
-};
+const char fake_shstrtab[] __attribute__((section(".fake_shstrtab"))) =
+       ".hash\0"
+       ".dynsym\0"
+       ".dynstr\0"
+       ".gnu.version\0"
+       ".gnu.version_d\0"
+       ".dynamic\0"
+       ".rodata\0"
+       ".fake_shstrtab\0"  /* Yay, self-referential code. */
+       ".note\0"
+       ".eh_frame_hdr\0"
+       ".eh_frame\0"
+       ".text";
index 2ec72f6..9197544 100644 (file)
@@ -6,6 +6,16 @@
  * This script controls its layout.
  */
 
+#if defined(BUILD_VDSO64)
+# define SHDR_SIZE 64
+#elif defined(BUILD_VDSO32) || defined(BUILD_VDSOX32)
+# define SHDR_SIZE 40
+#else
+# error unknown VDSO target
+#endif
+
+#define NUM_FAKE_SHDRS 13
+
 SECTIONS
 {
        . = SIZEOF_HEADERS;
@@ -18,35 +28,52 @@ SECTIONS
        .gnu.version_d  : { *(.gnu.version_d) }
        .gnu.version_r  : { *(.gnu.version_r) }
 
+       .dynamic        : { *(.dynamic) }               :text   :dynamic
+
+       .rodata         : {
+               *(.rodata*)
+               *(.data*)
+               *(.sdata*)
+               *(.got.plt) *(.got)
+               *(.gnu.linkonce.d.*)
+               *(.bss*)
+               *(.dynbss*)
+               *(.gnu.linkonce.b.*)
+
+               /*
+                * Ideally this would live in a C file, but that won't
+                * work cleanly for x32 until we start building the x32
+                * C code using an x32 toolchain.
+                */
+               VDSO_FAKE_SECTION_TABLE_START = .;
+               . = . + NUM_FAKE_SHDRS * SHDR_SIZE;
+               VDSO_FAKE_SECTION_TABLE_END = .;
+       }                                               :text
+
+       .fake_shstrtab  : { *(.fake_shstrtab) }         :text
+
+
        .note           : { *(.note.*) }                :text   :note
 
        .eh_frame_hdr   : { *(.eh_frame_hdr) }          :text   :eh_frame_hdr
        .eh_frame       : { KEEP (*(.eh_frame)) }       :text
 
-       .dynamic        : { *(.dynamic) }               :text   :dynamic
-
-       .rodata         : { *(.rodata*) }               :text
-       .data           : {
-             *(.data*)
-             *(.sdata*)
-             *(.got.plt) *(.got)
-             *(.gnu.linkonce.d.*)
-             *(.bss*)
-             *(.dynbss*)
-             *(.gnu.linkonce.b.*)
-       }
-
-       .altinstructions        : { *(.altinstructions) }
-       .altinstr_replacement   : { *(.altinstr_replacement) }
 
        /*
-        * Align the actual code well away from the non-instruction data.
-        * This is the best thing for the I-cache.
+        * Text is well-separated from actual data: there's plenty of
+        * stuff that isn't used at runtime in between.
         */
-       . = ALIGN(0x100);
 
        .text           : { *(.text*) }                 :text   =0x90909090,
 
+       /*
+        * At the end so that eu-elflint stays happy when vdso2c strips
+        * these.  A better implementation would avoid allocating space
+        * for these.
+        */
+       .altinstructions        : { *(.altinstructions) }       :text
+       .altinstr_replacement   : { *(.altinstr_replacement) }  :text
+
        /*
         * The remainder of the vDSO consists of special pages that are
         * shared between the kernel and userspace.  It needs to be at the
@@ -75,6 +102,7 @@ SECTIONS
        /DISCARD/ : {
                *(.discard)
                *(.discard.*)
+               *(__bug_table)
        }
 }
 
index 75e3404..6807932 100644 (file)
@@ -6,6 +6,8 @@
  * the DSO.
  */
 
+#define BUILD_VDSO64
+
 #include "vdso-layout.lds.S"
 
 /*
index 7a6bf50..238dbe8 100644 (file)
@@ -23,6 +23,8 @@ enum {
        sym_vvar_page,
        sym_hpet_page,
        sym_end_mapping,
+       sym_VDSO_FAKE_SECTION_TABLE_START,
+       sym_VDSO_FAKE_SECTION_TABLE_END,
 };
 
 const int special_pages[] = {
@@ -30,15 +32,26 @@ const int special_pages[] = {
        sym_hpet_page,
 };
 
-char const * const required_syms[] = {
-       [sym_vvar_page] = "vvar_page",
-       [sym_hpet_page] = "hpet_page",
-       [sym_end_mapping] = "end_mapping",
-       "VDSO32_NOTE_MASK",
-       "VDSO32_SYSENTER_RETURN",
-       "__kernel_vsyscall",
-       "__kernel_sigreturn",
-       "__kernel_rt_sigreturn",
+struct vdso_sym {
+       const char *name;
+       bool export;
+};
+
+struct vdso_sym required_syms[] = {
+       [sym_vvar_page] = {"vvar_page", true},
+       [sym_hpet_page] = {"hpet_page", true},
+       [sym_end_mapping] = {"end_mapping", true},
+       [sym_VDSO_FAKE_SECTION_TABLE_START] = {
+               "VDSO_FAKE_SECTION_TABLE_START", false
+       },
+       [sym_VDSO_FAKE_SECTION_TABLE_END] = {
+               "VDSO_FAKE_SECTION_TABLE_END", false
+       },
+       {"VDSO32_NOTE_MASK", true},
+       {"VDSO32_SYSENTER_RETURN", true},
+       {"__kernel_vsyscall", true},
+       {"__kernel_sigreturn", true},
+       {"__kernel_rt_sigreturn", true},
 };
 
 __attribute__((format(printf, 1, 2))) __attribute__((noreturn))
@@ -83,37 +96,21 @@ extern void bad_put_le(void);
 
 #define NSYMS (sizeof(required_syms) / sizeof(required_syms[0]))
 
-#define BITS 64
-#define GOFUNC go64
-#define Elf_Ehdr Elf64_Ehdr
-#define Elf_Shdr Elf64_Shdr
-#define Elf_Phdr Elf64_Phdr
-#define Elf_Sym Elf64_Sym
-#define Elf_Dyn Elf64_Dyn
+#define BITSFUNC3(name, bits) name##bits
+#define BITSFUNC2(name, bits) BITSFUNC3(name, bits)
+#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS)
+
+#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
+#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
+#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
+
+#define ELF_BITS 64
 #include "vdso2c.h"
-#undef BITS
-#undef GOFUNC
-#undef Elf_Ehdr
-#undef Elf_Shdr
-#undef Elf_Phdr
-#undef Elf_Sym
-#undef Elf_Dyn
-
-#define BITS 32
-#define GOFUNC go32
-#define Elf_Ehdr Elf32_Ehdr
-#define Elf_Shdr Elf32_Shdr
-#define Elf_Phdr Elf32_Phdr
-#define Elf_Sym Elf32_Sym
-#define Elf_Dyn Elf32_Dyn
+#undef ELF_BITS
+
+#define ELF_BITS 32
 #include "vdso2c.h"
-#undef BITS
-#undef GOFUNC
-#undef Elf_Ehdr
-#undef Elf_Shdr
-#undef Elf_Phdr
-#undef Elf_Sym
-#undef Elf_Dyn
+#undef ELF_BITS
 
 static void go(void *addr, size_t len, FILE *outfile, const char *name)
 {
index c6eefaf..11b65d4 100644 (file)
  * are built for 32-bit userspace.
  */
 
-static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
+/*
+ * We're writing a section table for a few reasons:
+ *
+ * The Go runtime had a couple of bugs: it would read the section
+ * table to try to figure out how many dynamic symbols there were (it
+ * shouldn't have looked at the section table at all) and, if there
+ * were no SHT_SYNDYM section table entry, it would use an
+ * uninitialized value for the number of symbols.  An empty DYNSYM
+ * table would work, but I see no reason not to write a valid one (and
+ * keep full performance for old Go programs).  This hack is only
+ * needed on x86_64.
+ *
+ * The bug was introduced on 2012-08-31 by:
+ * https://code.google.com/p/go/source/detail?r=56ea40aac72b
+ * and was fixed on 2014-06-13 by:
+ * https://code.google.com/p/go/source/detail?r=fc1cd5e12595
+ *
+ * Binutils has issues debugging the vDSO: it reads the section table to
+ * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which
+ * would break build-id if we removed the section table.  Binutils
+ * also requires that shstrndx != 0.  See:
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=17064
+ *
+ * elfutils might not look for PT_NOTE if there is a section table at
+ * all.  I don't know whether this matters for any practical purpose.
+ *
+ * For simplicity, rather than hacking up a partial section table, we
+ * just write a mostly complete one.  We omit non-dynamic symbols,
+ * though, since they're rather large.
+ *
+ * Once binutils gets fixed, we might be able to drop this for all but
+ * the 64-bit vdso, since build-id only works in kernel RPMs, and
+ * systems that update to new enough kernel RPMs will likely update
+ * binutils in sync.  build-id has never worked for home-built kernel
+ * RPMs without manual symlinking, and I suspect that no one ever does
+ * that.
+ */
+struct BITSFUNC(fake_sections)
+{
+       ELF(Shdr) *table;
+       unsigned long table_offset;
+       int count, max_count;
+
+       int in_shstrndx;
+       unsigned long shstr_offset;
+       const char *shstrtab;
+       size_t shstrtab_len;
+
+       int out_shstrndx;
+};
+
+static unsigned int BITSFUNC(find_shname)(struct BITSFUNC(fake_sections) *out,
+                                         const char *name)
+{
+       const char *outname = out->shstrtab;
+       while (outname - out->shstrtab < out->shstrtab_len) {
+               if (!strcmp(name, outname))
+                       return (outname - out->shstrtab) + out->shstr_offset;
+               outname += strlen(outname) + 1;
+       }
+
+       if (*name)
+               printf("Warning: could not find output name \"%s\"\n", name);
+       return out->shstr_offset + out->shstrtab_len - 1;  /* Use a null. */
+}
+
+static void BITSFUNC(init_sections)(struct BITSFUNC(fake_sections) *out)
+{
+       if (!out->in_shstrndx)
+               fail("didn't find the fake shstrndx\n");
+
+       memset(out->table, 0, out->max_count * sizeof(ELF(Shdr)));
+
+       if (out->max_count < 1)
+               fail("we need at least two fake output sections\n");
+
+       PUT_LE(&out->table[0].sh_type, SHT_NULL);
+       PUT_LE(&out->table[0].sh_name, BITSFUNC(find_shname)(out, ""));
+
+       out->count = 1;
+}
+
+static void BITSFUNC(copy_section)(struct BITSFUNC(fake_sections) *out,
+                                  int in_idx, const ELF(Shdr) *in,
+                                  const char *name)
+{
+       uint64_t flags = GET_LE(&in->sh_flags);
+
+       bool copy = flags & SHF_ALLOC &&
+               (GET_LE(&in->sh_size) ||
+                (GET_LE(&in->sh_type) != SHT_RELA &&
+                 GET_LE(&in->sh_type) != SHT_REL)) &&
+               strcmp(name, ".altinstructions") &&
+               strcmp(name, ".altinstr_replacement");
+
+       if (!copy)
+               return;
+
+       if (out->count >= out->max_count)
+               fail("too many copied sections (max = %d)\n", out->max_count);
+
+       if (in_idx == out->in_shstrndx)
+               out->out_shstrndx = out->count;
+
+       out->table[out->count] = *in;
+       PUT_LE(&out->table[out->count].sh_name,
+              BITSFUNC(find_shname)(out, name));
+
+       /* elfutils requires that a strtab have the correct type. */
+       if (!strcmp(name, ".fake_shstrtab"))
+               PUT_LE(&out->table[out->count].sh_type, SHT_STRTAB);
+
+       out->count++;
+}
+
+static void BITSFUNC(go)(void *addr, size_t len,
+                        FILE *outfile, const char *name)
 {
        int found_load = 0;
        unsigned long load_size = -1;  /* Work around bogus warning */
        unsigned long data_size;
-       Elf_Ehdr *hdr = (Elf_Ehdr *)addr;
+       ELF(Ehdr) *hdr = (ELF(Ehdr) *)addr;
        int i;
        unsigned long j;
-       Elf_Shdr *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
+       ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
                *alt_sec = NULL;
-       Elf_Dyn *dyn = 0, *dyn_end = 0;
+       ELF(Dyn) *dyn = 0, *dyn_end = 0;
        const char *secstrings;
        uint64_t syms[NSYMS] = {};
 
-       uint64_t fake_sections_value = 0, fake_sections_size = 0;
+       struct BITSFUNC(fake_sections) fake_sections = {};
 
-       Elf_Phdr *pt = (Elf_Phdr *)(addr + GET_LE(&hdr->e_phoff));
+       ELF(Phdr) *pt = (ELF(Phdr) *)(addr + GET_LE(&hdr->e_phoff));
 
        /* Walk the segment table. */
        for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
@@ -51,7 +167,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
        for (i = 0; dyn + i < dyn_end &&
                     GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
                typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag);
-               if (tag == DT_REL || tag == DT_RELSZ ||
+               if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA ||
                    tag == DT_RELENT || tag == DT_TEXTREL)
                        fail("vdso image contains dynamic relocations\n");
        }
@@ -61,7 +177,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
                GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx);
        secstrings = addr + GET_LE(&secstrings_hdr->sh_offset);
        for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
-               Elf_Shdr *sh = addr + GET_LE(&hdr->e_shoff) +
+               ELF(Shdr) *sh = addr + GET_LE(&hdr->e_shoff) +
                        GET_LE(&hdr->e_shentsize) * i;
                if (GET_LE(&sh->sh_type) == SHT_SYMTAB)
                        symtab_hdr = sh;
@@ -82,29 +198,63 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
             i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
             i++) {
                int k;
-               Elf_Sym *sym = addr + GET_LE(&symtab_hdr->sh_offset) +
+               ELF(Sym) *sym = addr + GET_LE(&symtab_hdr->sh_offset) +
                        GET_LE(&symtab_hdr->sh_entsize) * i;
                const char *name = addr + GET_LE(&strtab_hdr->sh_offset) +
                        GET_LE(&sym->st_name);
 
                for (k = 0; k < NSYMS; k++) {
-                       if (!strcmp(name, required_syms[k])) {
+                       if (!strcmp(name, required_syms[k].name)) {
                                if (syms[k]) {
                                        fail("duplicate symbol %s\n",
-                                            required_syms[k]);
+                                            required_syms[k].name);
                                }
                                syms[k] = GET_LE(&sym->st_value);
                        }
                }
 
-               if (!strcmp(name, "vdso_fake_sections")) {
-                       if (fake_sections_value)
-                               fail("duplicate vdso_fake_sections\n");
-                       fake_sections_value = GET_LE(&sym->st_value);
-                       fake_sections_size = GET_LE(&sym->st_size);
+               if (!strcmp(name, "fake_shstrtab")) {
+                       ELF(Shdr) *sh;
+
+                       fake_sections.in_shstrndx = GET_LE(&sym->st_shndx);
+                       fake_sections.shstrtab = addr + GET_LE(&sym->st_value);
+                       fake_sections.shstrtab_len = GET_LE(&sym->st_size);
+                       sh = addr + GET_LE(&hdr->e_shoff) +
+                               GET_LE(&hdr->e_shentsize) *
+                               fake_sections.in_shstrndx;
+                       fake_sections.shstr_offset = GET_LE(&sym->st_value) -
+                               GET_LE(&sh->sh_addr);
                }
        }
 
+       /* Build the output section table. */
+       if (!syms[sym_VDSO_FAKE_SECTION_TABLE_START] ||
+           !syms[sym_VDSO_FAKE_SECTION_TABLE_END])
+               fail("couldn't find fake section table\n");
+       if ((syms[sym_VDSO_FAKE_SECTION_TABLE_END] -
+            syms[sym_VDSO_FAKE_SECTION_TABLE_START]) % sizeof(ELF(Shdr)))
+               fail("fake section table size isn't a multiple of sizeof(Shdr)\n");
+       fake_sections.table = addr + syms[sym_VDSO_FAKE_SECTION_TABLE_START];
+       fake_sections.table_offset = syms[sym_VDSO_FAKE_SECTION_TABLE_START];
+       fake_sections.max_count = (syms[sym_VDSO_FAKE_SECTION_TABLE_END] -
+                                  syms[sym_VDSO_FAKE_SECTION_TABLE_START]) /
+               sizeof(ELF(Shdr));
+
+       BITSFUNC(init_sections)(&fake_sections);
+       for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
+               ELF(Shdr) *sh = addr + GET_LE(&hdr->e_shoff) +
+                       GET_LE(&hdr->e_shentsize) * i;
+               BITSFUNC(copy_section)(&fake_sections, i, sh,
+                                      secstrings + GET_LE(&sh->sh_name));
+       }
+       if (!fake_sections.out_shstrndx)
+               fail("didn't generate shstrndx?!?\n");
+
+       PUT_LE(&hdr->e_shoff, fake_sections.table_offset);
+       PUT_LE(&hdr->e_shentsize, sizeof(ELF(Shdr)));
+       PUT_LE(&hdr->e_shnum, fake_sections.count);
+       PUT_LE(&hdr->e_shstrndx, fake_sections.out_shstrndx);
+
        /* Validate mapping addresses. */
        for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
                if (!syms[i])
@@ -112,25 +262,17 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
 
                if (syms[i] % 4096)
                        fail("%s must be a multiple of 4096\n",
-                            required_syms[i]);
+                            required_syms[i].name);
                if (syms[i] < data_size)
                        fail("%s must be after the text mapping\n",
-                            required_syms[i]);
+                            required_syms[i].name);
                if (syms[sym_end_mapping] < syms[i] + 4096)
-                       fail("%s overruns end_mapping\n", required_syms[i]);
+                       fail("%s overruns end_mapping\n",
+                            required_syms[i].name);
        }
        if (syms[sym_end_mapping] % 4096)
                fail("end_mapping must be a multiple of 4096\n");
 
-       /* Remove sections or use fakes */
-       if (fake_sections_size % sizeof(Elf_Shdr))
-               fail("vdso_fake_sections size is not a multiple of %ld\n",
-                    (long)sizeof(Elf_Shdr));
-       PUT_LE(&hdr->e_shoff, fake_sections_value);
-       PUT_LE(&hdr->e_shentsize, fake_sections_value ? sizeof(Elf_Shdr) : 0);
-       PUT_LE(&hdr->e_shnum, fake_sections_size / sizeof(Elf_Shdr));
-       PUT_LE(&hdr->e_shstrndx, SHN_UNDEF);
-
        if (!name) {
                fwrite(addr, load_size, 1, outfile);
                return;
@@ -168,9 +310,9 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
                        (unsigned long)GET_LE(&alt_sec->sh_size));
        }
        for (i = 0; i < NSYMS; i++) {
-               if (syms[i])
+               if (required_syms[i].export && syms[i])
                        fprintf(outfile, "\t.sym_%s = 0x%" PRIx64 ",\n",
-                               required_syms[i], syms[i]);
+                               required_syms[i].name, syms[i]);
        }
        fprintf(outfile, "};\n");
 }
diff --git a/arch/x86/vdso/vdso32/vdso-fakesections.c b/arch/x86/vdso/vdso32/vdso-fakesections.c
new file mode 100644 (file)
index 0000000..541468e
--- /dev/null
@@ -0,0 +1 @@
+#include "../vdso-fakesections.c"
index 46b991b..697c11e 100644 (file)
@@ -6,6 +6,8 @@
  * the DSO.
  */
 
+#define BUILD_VDSOX32
+
 #include "vdso-layout.lds.S"
 
 /*
index e1513c4..5a5176d 100644 (file)
@@ -62,6 +62,9 @@ struct linux_binprm;
    Only used for the 64-bit and x32 vdsos. */
 static unsigned long vdso_addr(unsigned long start, unsigned len)
 {
+#ifdef CONFIG_X86_32
+       return 0;
+#else
        unsigned long addr, end;
        unsigned offset;
        end = (start + PMD_SIZE - 1) & PMD_MASK;
@@ -83,6 +86,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
        addr = align_vdso_addr(addr);
 
        return addr;
+#endif
 }
 
 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
index f17b292..ffb101e 100644 (file)
@@ -1537,7 +1537,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
        if (!xen_pvh_domain())
                pv_cpu_ops = xen_cpu_ops;
 
-       x86_init.resources.memory_setup = xen_memory_setup;
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
+       else
+               x86_init.resources.memory_setup = xen_memory_setup;
        x86_init.oem.arch_setup = xen_arch_setup;
        x86_init.oem.banner = xen_banner;
 
index c985835..ebfa9b2 100644 (file)
 
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/slab.h>
 #include <linux/vmalloc.h>
 
 #include <xen/interface/xen.h>
 #include <xen/page.h>
 #include <xen/grant_table.h>
+#include <xen/xen.h>
 
 #include <asm/pgtable.h>
 
-static int map_pte_fn(pte_t *pte, struct page *pmd_page,
-                     unsigned long addr, void *data)
+static struct gnttab_vm_area {
+       struct vm_struct *area;
+       pte_t **ptes;
+} gnttab_shared_vm_area, gnttab_status_vm_area;
+
+int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
+                          unsigned long max_nr_gframes,
+                          void **__shared)
 {
-       unsigned long **frames = (unsigned long **)data;
+       void *shared = *__shared;
+       unsigned long addr;
+       unsigned long i;
 
-       set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
-       (*frames)++;
-       return 0;
-}
+       if (shared == NULL)
+               *__shared = shared = gnttab_shared_vm_area.area->addr;
 
-/*
- * This function is used to map shared frames to store grant status. It is
- * different from map_pte_fn above, the frames type here is uint64_t.
- */
-static int map_pte_fn_status(pte_t *pte, struct page *pmd_page,
-                            unsigned long addr, void *data)
-{
-       uint64_t **frames = (uint64_t **)data;
+       addr = (unsigned long)shared;
+
+       for (i = 0; i < nr_gframes; i++) {
+               set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i],
+                          mfn_pte(frames[i], PAGE_KERNEL));
+               addr += PAGE_SIZE;
+       }
 
-       set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
-       (*frames)++;
        return 0;
 }
 
-static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
-                       unsigned long addr, void *data)
+int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
+                          unsigned long max_nr_gframes,
+                          grant_status_t **__shared)
 {
+       grant_status_t *shared = *__shared;
+       unsigned long addr;
+       unsigned long i;
+
+       if (shared == NULL)
+               *__shared = shared = gnttab_status_vm_area.area->addr;
+
+       addr = (unsigned long)shared;
+
+       for (i = 0; i < nr_gframes; i++) {
+               set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i],
+                          mfn_pte(frames[i], PAGE_KERNEL));
+               addr += PAGE_SIZE;
+       }
 
-       set_pte_at(&init_mm, addr, pte, __pte(0));
        return 0;
 }
 
-int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
-                          unsigned long max_nr_gframes,
-                          void **__shared)
+void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
 {
-       int rc;
-       void *shared = *__shared;
+       pte_t **ptes;
+       unsigned long addr;
+       unsigned long i;
 
-       if (shared == NULL) {
-               struct vm_struct *area =
-                       alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
-               BUG_ON(area == NULL);
-               shared = area->addr;
-               *__shared = shared;
-       }
+       if (shared == gnttab_status_vm_area.area->addr)
+               ptes = gnttab_status_vm_area.ptes;
+       else
+               ptes = gnttab_shared_vm_area.ptes;
 
-       rc = apply_to_page_range(&init_mm, (unsigned long)shared,
-                                PAGE_SIZE * nr_gframes,
-                                map_pte_fn, &frames);
-       return rc;
+       addr = (unsigned long)shared;
+
+       for (i = 0; i < nr_gframes; i++) {
+               set_pte_at(&init_mm, addr, ptes[i], __pte(0));
+               addr += PAGE_SIZE;
+       }
 }
 
-int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
-                          unsigned long max_nr_gframes,
-                          grant_status_t **__shared)
+static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
 {
-       int rc;
-       grant_status_t *shared = *__shared;
+       area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL);
+       if (area->ptes == NULL)
+               return -ENOMEM;
 
-       if (shared == NULL) {
-               /* No need to pass in PTE as we are going to do it
-                * in apply_to_page_range anyhow. */
-               struct vm_struct *area =
-                       alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
-               BUG_ON(area == NULL);
-               shared = area->addr;
-               *__shared = shared;
+       area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes);
+       if (area->area == NULL) {
+               kfree(area->ptes);
+               return -ENOMEM;
        }
 
-       rc = apply_to_page_range(&init_mm, (unsigned long)shared,
-                                PAGE_SIZE * nr_gframes,
-                                map_pte_fn_status, &frames);
-       return rc;
+       return 0;
 }
 
-void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
+static void arch_gnttab_vfree(struct gnttab_vm_area *area)
+{
+       free_vm_area(area->area);
+       kfree(area->ptes);
+}
+
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
 {
-       apply_to_page_range(&init_mm, (unsigned long)shared,
-                           PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL);
+       int ret;
+
+       if (!xen_pv_domain())
+               return 0;
+
+       ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Always allocate the space for the status frames in case
+        * we're migrated to a host with V2 support.
+        */
+       ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status);
+       if (ret < 0)
+               goto err;
+
+       return 0;
+  err:
+       arch_gnttab_vfree(&gnttab_shared_vm_area);
+       return -ENOMEM;
 }
+
 #ifdef CONFIG_XEN_PVH
 #include <xen/balloon.h>
 #include <xen/events.h>
-#include <xen/xen.h>
 #include <linux/slab.h>
 static int __init xlated_setup_gnttab_pages(void)
 {
index 821a11a..2e55516 100644 (file)
@@ -27,7 +27,6 @@
 #include <xen/interface/memory.h>
 #include <xen/interface/physdev.h>
 #include <xen/features.h>
-#include "mmu.h"
 #include "xen-ops.h"
 #include "vdso.h"
 
@@ -82,9 +81,6 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
 
        memblock_reserve(start, size);
 
-       if (xen_feature(XENFEAT_auto_translated_physmap))
-               return;
-
        xen_max_p2m_pfn = PFN_DOWN(start + size);
        for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
                unsigned long mfn = pfn_to_mfn(pfn);
@@ -107,7 +103,6 @@ static unsigned long __init xen_do_chunk(unsigned long start,
                .domid        = DOMID_SELF
        };
        unsigned long len = 0;
-       int xlated_phys = xen_feature(XENFEAT_auto_translated_physmap);
        unsigned long pfn;
        int ret;
 
@@ -121,7 +116,7 @@ static unsigned long __init xen_do_chunk(unsigned long start,
                                continue;
                        frame = mfn;
                } else {
-                       if (!xlated_phys && mfn != INVALID_P2M_ENTRY)
+                       if (mfn != INVALID_P2M_ENTRY)
                                continue;
                        frame = pfn;
                }
@@ -159,13 +154,6 @@ static unsigned long __init xen_do_chunk(unsigned long start,
 static unsigned long __init xen_release_chunk(unsigned long start,
                                              unsigned long end)
 {
-       /*
-        * Xen already ballooned out the E820 non RAM regions for us
-        * and set them up properly in EPT.
-        */
-       if (xen_feature(XENFEAT_auto_translated_physmap))
-               return end - start;
-
        return xen_do_chunk(start, end, true);
 }
 
@@ -234,13 +222,7 @@ static void __init xen_set_identity_and_release_chunk(
         * (except for the ISA region which must be 1:1 mapped) to
         * release the refcounts (in Xen) on the original frames.
         */
-
-       /*
-        * PVH E820 matches the hypervisor's P2M which means we need to
-        * account for the proper values of *release and *identity.
-        */
-       for (pfn = start_pfn; !xen_feature(XENFEAT_auto_translated_physmap) &&
-            pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
+       for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
                pte_t pte = __pte_ma(0);
 
                if (pfn < PFN_UP(ISA_END_ADDRESS))
@@ -517,6 +499,35 @@ char * __init xen_memory_setup(void)
        return "Xen";
 }
 
+/*
+ * Machine specific memory setup for auto-translated guests.
+ */
+char * __init xen_auto_xlated_memory_setup(void)
+{
+       static struct e820entry map[E820MAX] __initdata;
+
+       struct xen_memory_map memmap;
+       int i;
+       int rc;
+
+       memmap.nr_entries = E820MAX;
+       set_xen_guest_handle(memmap.buffer, map);
+
+       rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
+       if (rc < 0)
+               panic("No memory map (%d)\n", rc);
+
+       sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries);
+
+       for (i = 0; i < memmap.nr_entries; i++)
+               e820_add_region(map[i].addr, map[i].size, map[i].type);
+
+       memblock_reserve(__pa(xen_start_info->mfn_list),
+                        xen_start_info->pt_base - xen_start_info->mfn_list);
+
+       return "Xen";
+}
+
 /*
  * Set the bit indicating "nosegneg" library variants should be used.
  * We only need to bother in pure 32-bit mode; compat 32-bit processes
@@ -590,13 +601,7 @@ void xen_enable_syscall(void)
        }
 #endif /* CONFIG_X86_64 */
 }
-void xen_enable_nmi(void)
-{
-#ifdef CONFIG_X86_64
-       if (register_callback(CALLBACKTYPE_nmi, (char *)nmi))
-               BUG();
-#endif
-}
+
 void __init xen_pvmmu_arch_setup(void)
 {
        HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
@@ -611,7 +616,6 @@ void __init xen_pvmmu_arch_setup(void)
 
        xen_enable_sysenter();
        xen_enable_syscall();
-       xen_enable_nmi();
 }
 
 /* This function is not called for HVM domains */
index c834d4b..97d8765 100644 (file)
@@ -36,6 +36,7 @@ void xen_mm_unpin_all(void);
 void xen_set_pat(u64);
 
 char * __init xen_memory_setup(void);
+char * xen_auto_xlated_memory_setup(void);
 void __init xen_arch_setup(void);
 void xen_enable_sysenter(void);
 void xen_enable_syscall(void);
index abb5970..b61bdf0 100644 (file)
@@ -182,6 +182,7 @@ extern unsigned long get_wchan(struct task_struct *p);
 #define KSTK_ESP(tsk)          (task_pt_regs(tsk)->areg[1])
 
 #define cpu_relax()  barrier()
+#define cpu_relax_lowlatency() cpu_relax()
 
 /* Special register access. */
 
index f9e1ec3..8453e6e 100644 (file)
@@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
        beqz    a2, 1f          # if at start of vector, don't restore
 
        addi    a0, a0, -128
-       bbsi    a0, 8, 1f       # don't restore except for overflow 8 and 12
-       bbsi    a0, 7, 2f
+       bbsi.l  a0, 8, 1f       # don't restore except for overflow 8 and 12
+
+       /*
+        * This fixup handler is for the extremely unlikely case where the
+        * overflow handler's reference thru a0 gets a hardware TLB refill
+        * that bumps out the (distinct, aliasing) TLB entry that mapped its
+        * prior references thru a9/a13, and where our reference now thru
+        * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
+        */
+       movi    a2, window_overflow_restore_a0_fixup
+       s32i    a2, a3, EXC_TABLE_FIXUP
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+
+       bbsi.l  a0, 7, 2f
 
        /*
         * Restore a0 as saved by _WindowOverflow8().
-        *
-        * FIXME:  we really need a fixup handler for this L32E,
-        * for the extremely unlikely case where the overflow handler's
-        * reference thru a0 gets a hardware TLB refill that bumps out
-        * the (distinct, aliasing) TLB entry that mapped its prior
-        * references thru a9, and where our reference now thru a9
-        * gets a 2nd-level miss exception (not hardware TLB refill).
         */
 
-       l32e    a2, a9, -16
-       wsr     a2, depc        # replace the saved a0
-       j       1f
+       l32e    a0, a9, -16
+       wsr     a0, depc        # replace the saved a0
+       j       3f
 
 2:
        /*
         * Restore a0 as saved by _WindowOverflow12().
-        *
-        * FIXME:  we really need a fixup handler for this L32E,
-        * for the extremely unlikely case where the overflow handler's
-        * reference thru a0 gets a hardware TLB refill that bumps out
-        * the (distinct, aliasing) TLB entry that mapped its prior
-        * references thru a13, and where our reference now thru a13
-        * gets a 2nd-level miss exception (not hardware TLB refill).
         */
 
-       l32e    a2, a13, -16
-       wsr     a2, depc        # replace the saved a0
+       l32e    a0, a13, -16
+       wsr     a0, depc        # replace the saved a0
+3:
+       xsr     a3, excsave1
+       movi    a0, 0
+       s32i    a0, a3, EXC_TABLE_FIXUP
+       s32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
 1:
        /*
         * Restore WindowBase while leaving all address registers restored.
@@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
 
        s32i    a0, a2, PT_DEPC
 
+_DoubleExceptionVector_handle_exception:
        addx4   a0, a0, a3
        l32i    a0, a0, EXC_TABLE_FAST_USER
        xsr     a3, excsave1
@@ -464,10 +469,119 @@ _DoubleExceptionVector_WindowOverflow:
        rotw    -3
        j       1b
 
-       .end literal_prefix
 
 ENDPROC(_DoubleExceptionVector)
 
+/*
+ * Fixup handler for TLB miss in double exception handler for window owerflow.
+ * We get here with windowbase set to the window that was being spilled and
+ * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
+ * (bit set) window.
+ *
+ * We do the following here:
+ * - go to the original window retaining a0 value;
+ * - set up exception stack to return back to appropriate a0 restore code
+ *   (we'll need to rotate window back and there's no place to save this
+ *    information, use different return address for that);
+ * - handle the exception;
+ * - go to the window that was being spilled;
+ * - set up window_overflow_restore_a0_fixup as a fixup routine;
+ * - reload a0;
+ * - restore the original window;
+ * - reset the default fixup routine;
+ * - return to user. By the time we get to this fixup handler all information
+ *   about the conditions of the original double exception that happened in
+ *   the window overflow handler is lost, so we just return to userspace to
+ *   retry overflow from start.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(window_overflow_restore_a0_fixup)
+
+       rsr     a0, ps
+       extui   a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+       rsr     a2, windowbase
+       sub     a0, a2, a0
+       extui   a0, a0, 0, 3
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+
+       _beqi   a0, 1, .Lhandle_1
+       _beqi   a0, 3, .Lhandle_3
+
+       .macro  overflow_fixup_handle_exception_pane n
+
+       rsr     a0, depc
+       rotw    -\n
+
+       xsr     a3, excsave1
+       wsr     a2, depc
+       l32i    a2, a3, EXC_TABLE_KSTK
+       s32i    a0, a2, PT_AREG0
+
+       movi    a0, .Lrestore_\n
+       s32i    a0, a2, PT_DEPC
+       rsr     a0, exccause
+       j       _DoubleExceptionVector_handle_exception
+
+       .endm
+
+       overflow_fixup_handle_exception_pane 2
+.Lhandle_1:
+       overflow_fixup_handle_exception_pane 1
+.Lhandle_3:
+       overflow_fixup_handle_exception_pane 3
+
+       .macro  overflow_fixup_restore_a0_pane n
+
+       rotw    \n
+       /* Need to preserve a0 value here to be able to handle exception
+        * that may occur on a0 reload from stack. It may occur because
+        * TLB miss handler may not be atomic and pointer to page table
+        * may be lost before we get here. There are no free registers,
+        * so we need to use EXC_TABLE_DOUBLE_SAVE area.
+        */
+       xsr     a3, excsave1
+       s32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       movi    a2, window_overflow_restore_a0_fixup
+       s32i    a2, a3, EXC_TABLE_FIXUP
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+       bbsi.l  a0, 7, 1f
+       l32e    a0, a9, -16
+       j       2f
+1:
+       l32e    a0, a13, -16
+2:
+       rotw    -\n
+
+       .endm
+
+.Lrestore_2:
+       overflow_fixup_restore_a0_pane 2
+
+.Lset_default_fixup:
+       xsr     a3, excsave1
+       s32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       movi    a2, 0
+       s32i    a2, a3, EXC_TABLE_FIXUP
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+       rfe
+
+.Lrestore_1:
+       overflow_fixup_restore_a0_pane 1
+       j       .Lset_default_fixup
+.Lrestore_3:
+       overflow_fixup_restore_a0_pane 3
+       j       .Lset_default_fixup
+
+ENDPROC(window_overflow_restore_a0_fixup)
+
+       .end literal_prefix
 /*
  * Debug interrupt vector
  *
index ee32c00..d16db6d 100644 (file)
@@ -269,13 +269,13 @@ SECTIONS
                  .UserExceptionVector.literal)
   SECTION_VECTOR (_DoubleExceptionVector_literal,
                  .DoubleExceptionVector.literal,
-                 DOUBLEEXC_VECTOR_VADDR - 16,
+                 DOUBLEEXC_VECTOR_VADDR - 40,
                  SIZEOF(.UserExceptionVector.text),
                  .UserExceptionVector.text)
   SECTION_VECTOR (_DoubleExceptionVector_text,
                  .DoubleExceptionVector.text,
                  DOUBLEEXC_VECTOR_VADDR,
-                 32,
+                 40,
                  .DoubleExceptionVector.literal)
 
   . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
index 4224256..77ed202 100644 (file)
@@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
                return -EINVAL;
        }
 
-       if (it && start - it->start < bank_sz) {
+       if (it && start - it->start <= bank_sz) {
                if (start == it->start) {
                        if (end - it->start < bank_sz) {
                                it->start = end;
index 8c2e55e..0ec61c9 100644 (file)
@@ -746,6 +746,14 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
 
                        goto done;
                }
+
+               /*
+                * If the queue doesn't support SG gaps and adding this
+                * offset would create a gap, disallow it.
+                */
+               if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
+                   bvec_gap_to_prev(prev, offset))
+                       return 0;
        }
 
        if (bio->bi_vcnt >= bio->bi_max_vecs)
index 069bc20..e17da94 100644 (file)
@@ -80,7 +80,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
        blkg->q = q;
        INIT_LIST_HEAD(&blkg->q_node);
        blkg->blkcg = blkcg;
-       blkg->refcnt = 1;
+       atomic_set(&blkg->refcnt, 1);
 
        /* root blkg uses @q->root_rl, init rl only for !root blkgs */
        if (blkcg != &blkcg_root) {
@@ -399,11 +399,8 @@ void __blkg_release_rcu(struct rcu_head *rcu_head)
 
        /* release the blkcg and parent blkg refs this blkg has been holding */
        css_put(&blkg->blkcg->css);
-       if (blkg->parent) {
-               spin_lock_irq(blkg->q->queue_lock);
+       if (blkg->parent)
                blkg_put(blkg->parent);
-               spin_unlock_irq(blkg->q->queue_lock);
-       }
 
        blkg_free(blkg);
 }
@@ -875,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q)
 {
        lockdep_assert_held(q->queue_lock);
 
+       /*
+        * @q could be exiting and already have destroyed all blkgs as
+        * indicated by NULL root_blkg.  If so, don't confuse policies.
+        */
+       if (!q->root_blkg)
+               return;
+
        blk_throtl_drain(q);
 }
 
@@ -924,7 +928,15 @@ struct cgroup_subsys blkio_cgrp_subsys = {
        .css_offline = blkcg_css_offline,
        .css_free = blkcg_css_free,
        .can_attach = blkcg_can_attach,
-       .base_cftypes = blkcg_files,
+       .legacy_cftypes = blkcg_files,
+#ifdef CONFIG_MEMCG
+       /*
+        * This ensures that, if available, memcg is automatically enabled
+        * together on the default hierarchy so that the owner cgroup can
+        * be retrieved from writeback pages.
+        */
+       .depends_on = 1 << memory_cgrp_id,
+#endif
 };
 EXPORT_SYMBOL_GPL(blkio_cgrp_subsys);
 
@@ -1093,7 +1105,7 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
  * Register @pol with blkcg core.  Might sleep and @pol may be modified on
  * successful registration.  Returns 0 on success and -errno on failure.
  */
-int __init blkcg_policy_register(struct blkcg_policy *pol)
+int blkcg_policy_register(struct blkcg_policy *pol)
 {
        int i, ret;
 
@@ -1116,7 +1128,8 @@ int __init blkcg_policy_register(struct blkcg_policy *pol)
 
        /* everything is in place, add intf files for the new policy */
        if (pol->cftypes)
-               WARN_ON(cgroup_add_cftypes(&blkio_cgrp_subsys, pol->cftypes));
+               WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys,
+                                                 pol->cftypes));
        ret = 0;
 out_unlock:
        mutex_unlock(&blkcg_pol_mutex);
index cbb7f94..d3fd7aa 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/seq_file.h>
 #include <linux/radix-tree.h>
 #include <linux/blkdev.h>
+#include <linux/atomic.h>
 
 /* Max limits for throttle policy */
 #define THROTL_IOPS_MAX                UINT_MAX
@@ -104,7 +105,7 @@ struct blkcg_gq {
        struct request_list             rl;
 
        /* reference count */
-       int                             refcnt;
+       atomic_t                        refcnt;
 
        /* is this blkg online? protected by both blkcg and q locks */
        bool                            online;
@@ -145,7 +146,7 @@ void blkcg_drain_queue(struct request_queue *q);
 void blkcg_exit_queue(struct request_queue *q);
 
 /* Blkio controller policy registration */
-int __init blkcg_policy_register(struct blkcg_policy *pol);
+int blkcg_policy_register(struct blkcg_policy *pol);
 void blkcg_policy_unregister(struct blkcg_policy *pol);
 int blkcg_activate_policy(struct request_queue *q,
                          const struct blkcg_policy *pol);
@@ -257,13 +258,12 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
  * blkg_get - get a blkg reference
  * @blkg: blkg to get
  *
- * The caller should be holding queue_lock and an existing reference.
+ * The caller should be holding an existing reference.
  */
 static inline void blkg_get(struct blkcg_gq *blkg)
 {
-       lockdep_assert_held(blkg->q->queue_lock);
-       WARN_ON_ONCE(!blkg->refcnt);
-       blkg->refcnt++;
+       WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+       atomic_inc(&blkg->refcnt);
 }
 
 void __blkg_release_rcu(struct rcu_head *rcu);
@@ -271,14 +271,11 @@ void __blkg_release_rcu(struct rcu_head *rcu);
 /**
  * blkg_put - put a blkg reference
  * @blkg: blkg to put
- *
- * The caller should be holding queue_lock.
  */
 static inline void blkg_put(struct blkcg_gq *blkg)
 {
-       lockdep_assert_held(blkg->q->queue_lock);
-       WARN_ON_ONCE(blkg->refcnt <= 0);
-       if (!--blkg->refcnt)
+       WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+       if (atomic_dec_and_test(&blkg->refcnt))
                call_rcu(&blkg->rcu_head, __blkg_release_rcu);
 }
 
@@ -580,7 +577,7 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret
 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
 static inline void blkcg_drain_queue(struct request_queue *q) { }
 static inline void blkcg_exit_queue(struct request_queue *q) { }
-static inline int __init blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
+static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
 static inline int blkcg_activate_policy(struct request_queue *q,
                                        const struct blkcg_policy *pol) { return 0; }
index f6f6b9a..6f8dba1 100644 (file)
@@ -3312,8 +3312,7 @@ int __init blk_dev_init(void)
 
        /* used for unplugging and affects IO latency/throughput - HIGHPRI */
        kblockd_workqueue = alloc_workqueue("kblockd",
-                                           WQ_MEM_RECLAIM | WQ_HIGHPRI |
-                                           WQ_POWER_EFFICIENT, 0);
+                                           WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
        if (!kblockd_workqueue)
                panic("Failed to create kblockd\n");
 
index 8ffee4b..3cb5e9e 100644 (file)
@@ -421,44 +421,6 @@ void blk_insert_flush(struct request *rq)
        blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
 }
 
-/**
- * blk_abort_flushes - @q is being aborted, abort flush requests
- * @q: request_queue being aborted
- *
- * To be called from elv_abort_queue().  @q is being aborted.  Prepare all
- * FLUSH/FUA requests for abortion.
- *
- * CONTEXT:
- * spin_lock_irq(q->queue_lock)
- */
-void blk_abort_flushes(struct request_queue *q)
-{
-       struct request *rq, *n;
-       int i;
-
-       /*
-        * Requests in flight for data are already owned by the dispatch
-        * queue or the device driver.  Just restore for normal completion.
-        */
-       list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
-               list_del_init(&rq->flush.list);
-               blk_flush_restore_request(rq);
-       }
-
-       /*
-        * We need to give away requests on flush queues.  Restore for
-        * normal completion and put them on the dispatch queue.
-        */
-       for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
-               list_for_each_entry_safe(rq, n, &q->flush_queue[i],
-                                        flush.list) {
-                       list_del_init(&rq->flush.list);
-                       blk_flush_restore_request(rq);
-                       list_add_tail(&rq->queuelist, &q->queue_head);
-               }
-       }
-}
-
 /**
  * blkdev_issue_flush - queue a flush
  * @bdev:      blockdev to issue flush for
index b3bf0df..5453583 100644 (file)
@@ -568,6 +568,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 
 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 {
+       struct request_queue *q = rq->q;
+
        if (!rq_mergeable(rq) || !bio_mergeable(bio))
                return false;
 
@@ -591,6 +593,14 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
            !blk_write_same_mergeable(rq->bio, bio))
                return false;
 
+       if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
+               struct bio_vec *bprev;
+
+               bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
+               if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
+                       return false;
+       }
+
        return true;
 }
 
index 1aab39f..c1b9242 100644 (file)
@@ -43,9 +43,16 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
        return bt_has_free_tags(&tags->bitmap_tags);
 }
 
-static inline void bt_index_inc(unsigned int *index)
+static inline int bt_index_inc(int index)
 {
-       *index = (*index + 1) & (BT_WAIT_QUEUES - 1);
+       return (index + 1) & (BT_WAIT_QUEUES - 1);
+}
+
+static inline void bt_index_atomic_inc(atomic_t *index)
+{
+       int old = atomic_read(index);
+       int new = bt_index_inc(old);
+       atomic_cmpxchg(index, old, new);
 }
 
 /*
@@ -69,14 +76,14 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
        int i, wake_index;
 
        bt = &tags->bitmap_tags;
-       wake_index = bt->wake_index;
+       wake_index = atomic_read(&bt->wake_index);
        for (i = 0; i < BT_WAIT_QUEUES; i++) {
                struct bt_wait_state *bs = &bt->bs[wake_index];
 
                if (waitqueue_active(&bs->wait))
                        wake_up(&bs->wait);
 
-               bt_index_inc(&wake_index);
+               wake_index = bt_index_inc(wake_index);
        }
 }
 
@@ -212,12 +219,14 @@ static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
                                         struct blk_mq_hw_ctx *hctx)
 {
        struct bt_wait_state *bs;
+       int wait_index;
 
        if (!hctx)
                return &bt->bs[0];
 
-       bs = &bt->bs[hctx->wait_index];
-       bt_index_inc(&hctx->wait_index);
+       wait_index = atomic_read(&hctx->wait_index);
+       bs = &bt->bs[wait_index];
+       bt_index_atomic_inc(&hctx->wait_index);
        return bs;
 }
 
@@ -239,18 +248,12 @@ static int bt_get(struct blk_mq_alloc_data *data,
 
        bs = bt_wait_ptr(bt, hctx);
        do {
-               bool was_empty;
-
-               was_empty = list_empty(&wait.task_list);
                prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
 
                tag = __bt_get(hctx, bt, last_tag);
                if (tag != -1)
                        break;
 
-               if (was_empty)
-                       atomic_set(&bs->wait_cnt, bt->wake_cnt);
-
                blk_mq_put_ctx(data->ctx);
 
                io_schedule();
@@ -313,18 +316,19 @@ static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
 {
        int i, wake_index;
 
-       wake_index = bt->wake_index;
+       wake_index = atomic_read(&bt->wake_index);
        for (i = 0; i < BT_WAIT_QUEUES; i++) {
                struct bt_wait_state *bs = &bt->bs[wake_index];
 
                if (waitqueue_active(&bs->wait)) {
-                       if (wake_index != bt->wake_index)
-                               bt->wake_index = wake_index;
+                       int o = atomic_read(&bt->wake_index);
+                       if (wake_index != o)
+                               atomic_cmpxchg(&bt->wake_index, o, wake_index);
 
                        return bs;
                }
 
-               bt_index_inc(&wake_index);
+               wake_index = bt_index_inc(wake_index);
        }
 
        return NULL;
@@ -334,6 +338,7 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
 {
        const int index = TAG_TO_INDEX(bt, tag);
        struct bt_wait_state *bs;
+       int wait_cnt;
 
        /*
         * The unlock memory barrier need to order access to req in free
@@ -342,10 +347,19 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
        clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
 
        bs = bt_wake_ptr(bt);
-       if (bs && atomic_dec_and_test(&bs->wait_cnt)) {
-               atomic_set(&bs->wait_cnt, bt->wake_cnt);
-               bt_index_inc(&bt->wake_index);
+       if (!bs)
+               return;
+
+       wait_cnt = atomic_dec_return(&bs->wait_cnt);
+       if (wait_cnt == 0) {
+wake:
+               atomic_add(bt->wake_cnt, &bs->wait_cnt);
+               bt_index_atomic_inc(&bt->wake_index);
                wake_up(&bs->wait);
+       } else if (wait_cnt < 0) {
+               wait_cnt = atomic_inc_return(&bs->wait_cnt);
+               if (!wait_cnt)
+                       goto wake;
        }
 }
 
@@ -499,10 +513,13 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
                return -ENOMEM;
        }
 
-       for (i = 0; i < BT_WAIT_QUEUES; i++)
+       bt_update_count(bt, depth);
+
+       for (i = 0; i < BT_WAIT_QUEUES; i++) {
                init_waitqueue_head(&bt->bs[i].wait);
+               atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
+       }
 
-       bt_update_count(bt, depth);
        return 0;
 }
 
index 98696a6..6206ed1 100644 (file)
@@ -24,7 +24,7 @@ struct blk_mq_bitmap_tags {
        unsigned int map_nr;
        struct blk_align_bitmap *map;
 
-       unsigned int wake_index;
+       atomic_t wake_index;
        struct bt_wait_state *bs;
 };
 
index e11f5f8..ad69ef6 100644 (file)
@@ -109,7 +109,7 @@ static void blk_mq_queue_exit(struct request_queue *q)
        __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
 }
 
-static void __blk_mq_drain_queue(struct request_queue *q)
+void blk_mq_drain_queue(struct request_queue *q)
 {
        while (true) {
                s64 count;
@@ -120,7 +120,7 @@ static void __blk_mq_drain_queue(struct request_queue *q)
 
                if (count == 0)
                        break;
-               blk_mq_run_queues(q, false);
+               blk_mq_start_hw_queues(q);
                msleep(10);
        }
 }
@@ -139,12 +139,7 @@ static void blk_mq_freeze_queue(struct request_queue *q)
        spin_unlock_irq(q->queue_lock);
 
        if (drain)
-               __blk_mq_drain_queue(q);
-}
-
-void blk_mq_drain_queue(struct request_queue *q)
-{
-       __blk_mq_drain_queue(q);
+               blk_mq_drain_queue(q);
 }
 
 static void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -883,7 +878,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
        clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
 
        preempt_disable();
-       __blk_mq_run_hw_queue(hctx);
+       blk_mq_run_hw_queue(hctx, false);
        preempt_enable();
 }
 EXPORT_SYMBOL(blk_mq_start_hw_queue);
index 3f33d86..a185b86 100644 (file)
@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
 EXPORT_SYMBOL(blk_queue_find_tag);
 
 /**
- * __blk_free_tags - release a given set of tag maintenance info
+ * blk_free_tags - release a given set of tag maintenance info
  * @bqt:       the tag map to free
  *
- * Tries to free the specified @bqt.  Returns true if it was
- * actually freed and false if there are still references using it
+ * Drop the reference count on @bqt and frees it when the last reference
+ * is dropped.
  */
-static int __blk_free_tags(struct blk_queue_tag *bqt)
+void blk_free_tags(struct blk_queue_tag *bqt)
 {
-       int retval;
-
-       retval = atomic_dec_and_test(&bqt->refcnt);
-       if (retval) {
+       if (atomic_dec_and_test(&bqt->refcnt)) {
                BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
                                                        bqt->max_depth);
 
@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
 
                kfree(bqt);
        }
-
-       return retval;
 }
+EXPORT_SYMBOL(blk_free_tags);
 
 /**
  * __blk_queue_free_tags - release tag maintenance info
@@ -69,27 +65,12 @@ void __blk_queue_free_tags(struct request_queue *q)
        if (!bqt)
                return;
 
-       __blk_free_tags(bqt);
+       blk_free_tags(bqt);
 
        q->queue_tags = NULL;
        queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
 }
 
-/**
- * blk_free_tags - release a given set of tag maintenance info
- * @bqt:       the tag map to free
- *
- * For externally managed @bqt frees the map.  Callers of this
- * function must guarantee to have released all the queues that
- * might have been using this tag map.
- */
-void blk_free_tags(struct blk_queue_tag *bqt)
-{
-       if (unlikely(!__blk_free_tags(bqt)))
-               BUG();
-}
-EXPORT_SYMBOL(blk_free_tags);
-
 /**
  * blk_queue_free_tags - release tag maintenance info
  * @q:  the request queue for the device
index 3fdb21a..9273d09 100644 (file)
@@ -412,13 +412,13 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
        int rw;
 
        /*
-        * If sane_hierarchy is enabled, we switch to properly hierarchical
+        * If on the default hierarchy, we switch to properly hierarchical
         * behavior where limits on a given throtl_grp are applied to the
         * whole subtree rather than just the group itself.  e.g. If 16M
         * read_bps limit is set on the root group, the whole system can't
         * exceed 16M for the device.
         *
-        * If sane_hierarchy is not enabled, the broken flat hierarchy
+        * If not on the default hierarchy, the broken flat hierarchy
         * behavior is retained where all throtl_grps are treated as if
         * they're all separate root groups right below throtl_data.
         * Limits of a group don't interact with limits of other groups
@@ -426,7 +426,7 @@ static void throtl_pd_init(struct blkcg_gq *blkg)
         */
        parent_sq = &td->service_queue;
 
-       if (cgroup_sane_behavior(blkg->blkcg->css.cgroup) && blkg->parent)
+       if (cgroup_on_dfl(blkg->blkcg->css.cgroup) && blkg->parent)
                parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
 
        throtl_service_queue_init(&tg->service_queue, parent_sq);
index 45385e9..6748c4f 100644 (file)
@@ -84,7 +84,6 @@ static inline void blk_clear_rq_complete(struct request *rq)
 #define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
 
 void blk_insert_flush(struct request *rq);
-void blk_abort_flushes(struct request_queue *q);
 
 static inline struct request *__elv_next_request(struct request_queue *q)
 {
index fbd5a67..a0926a6 100644 (file)
@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        case BLKROSET:
        case BLKDISCARD:
        case BLKSECDISCARD:
+       case BLKZEROOUT:
        /*
         * the ones below are implemented in blkdev_locked_ioctl,
         * but we call blkdev_ioctl, which gets the lock for us
index f35eddd..24c28b6 100644 (file)
@@ -729,26 +729,6 @@ int elv_may_queue(struct request_queue *q, int rw)
        return ELV_MQUEUE_MAY;
 }
 
-void elv_abort_queue(struct request_queue *q)
-{
-       struct request *rq;
-
-       blk_abort_flushes(q);
-
-       while (!list_empty(&q->queue_head)) {
-               rq = list_entry_rq(q->queue_head.next);
-               rq->cmd_flags |= REQ_QUIET;
-               trace_block_rq_abort(q, rq);
-               /*
-                * Mark this request as started so we don't trigger
-                * any debug logic in the end I/O path.
-                */
-               blk_start_request(rq);
-               __blk_end_request_all(rq, -EIO);
-       }
-}
-EXPORT_SYMBOL(elv_abort_queue);
-
 void elv_completed_request(struct request_queue *q, struct request *rq)
 {
        struct elevator_queue *e = q->elevator;
@@ -845,7 +825,7 @@ void elv_unregister_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL(elv_unregister_queue);
 
-int __init elv_register(struct elevator_type *e)
+int elv_register(struct elevator_type *e)
 {
        char *def = "";
 
index ce4012a..6345c47 100644 (file)
@@ -23,7 +23,8 @@ comment "Crypto core or helper"
 
 config CRYPTO_FIPS
        bool "FIPS 200 compliance"
-       depends on CRYPTO_ANSI_CPRNG && !CRYPTO_MANAGER_DISABLE_TESTS
+       depends on (CRYPTO_ANSI_CPRNG || CRYPTO_DRBG) && !CRYPTO_MANAGER_DISABLE_TESTS
+       depends on MODULE_SIG
        help
          This options enables the fips boot option which is
          required if you want to system to operate in a FIPS 200
@@ -1019,6 +1020,19 @@ config CRYPTO_DES_SPARC64
          DES cipher algorithm (FIPS 46-2), and Triple DES EDE (FIPS 46-3),
          optimized using SPARC64 crypto opcodes.
 
+config CRYPTO_DES3_EDE_X86_64
+       tristate "Triple DES EDE cipher algorithm (x86-64)"
+       depends on X86 && 64BIT
+       select CRYPTO_ALGAPI
+       select CRYPTO_DES
+       help
+         Triple DES EDE (FIPS 46-3) algorithm.
+
+         This module provides implementation of the Triple DES EDE cipher
+         algorithm that is optimized for x86-64 processors. Two versions of
+         algorithm are provided; regular processing one input block and
+         one that processes three blocks parallel.
+
 config CRYPTO_FCRYPT
        tristate "FCrypt cipher algorithm"
        select CRYPTO_ALGAPI
@@ -1380,6 +1394,40 @@ config CRYPTO_ANSI_CPRNG
          ANSI X9.31 A.2.4. Note that this option must be enabled if
          CRYPTO_FIPS is selected
 
+menuconfig CRYPTO_DRBG_MENU
+       tristate "NIST SP800-90A DRBG"
+       help
+         NIST SP800-90A compliant DRBG. In the following submenu, one or
+         more of the DRBG types must be selected.
+
+if CRYPTO_DRBG_MENU
+
+config CRYPTO_DRBG_HMAC
+       bool "Enable HMAC DRBG"
+       default y
+       select CRYPTO_HMAC
+       help
+         Enable the HMAC DRBG variant as defined in NIST SP800-90A.
+
+config CRYPTO_DRBG_HASH
+       bool "Enable Hash DRBG"
+       select CRYPTO_HASH
+       help
+         Enable the Hash DRBG variant as defined in NIST SP800-90A.
+
+config CRYPTO_DRBG_CTR
+       bool "Enable CTR DRBG"
+       select CRYPTO_AES
+       help
+         Enable the CTR DRBG variant as defined in NIST SP800-90A.
+
+config CRYPTO_DRBG
+       tristate
+       default CRYPTO_DRBG_MENU if (CRYPTO_DRBG_HMAC || CRYPTO_DRBG_HASH || CRYPTO_DRBG_CTR)
+       select CRYPTO_RNG
+
+endif  # if CRYPTO_DRBG_MENU
+
 config CRYPTO_USER_API
        tristate
 
index 38e6423..cfa57b3 100644 (file)
@@ -92,6 +92,7 @@ obj-$(CONFIG_CRYPTO_842) += 842.o
 obj-$(CONFIG_CRYPTO_RNG2) += rng.o
 obj-$(CONFIG_CRYPTO_RNG2) += krng.o
 obj-$(CONFIG_CRYPTO_ANSI_CPRNG) += ansi_cprng.o
+obj-$(CONFIG_CRYPTO_DRBG) += drbg.o
 obj-$(CONFIG_CRYPTO_TEST) += tcrypt.o
 obj-$(CONFIG_CRYPTO_GHASH) += ghash-generic.o
 obj-$(CONFIG_CRYPTO_USER_API) += af_alg.o
index 966f893..6a3ad80 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/module.h>
 #include <linux/net.h>
 #include <linux/rwsem.h>
+#include <linux/security.h>
 
 struct alg_type_list {
        const struct af_alg_type *type;
@@ -243,6 +244,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
 
        sock_init_data(newsock, sk2);
        sock_graft(sk2, newsock);
+       security_sk_clone(sk, sk2);
 
        err = type->accept(ask->private, sk2);
        if (err) {
index 7a1ae87..e8d3a7d 100644 (file)
@@ -41,8 +41,20 @@ static inline int crypto_set_driver_name(struct crypto_alg *alg)
        return 0;
 }
 
+static inline void crypto_check_module_sig(struct module *mod)
+{
+#ifdef CONFIG_CRYPTO_FIPS
+       if (fips_enabled && mod && !mod->sig_ok)
+               panic("Module %s signature verification failed in FIPS mode\n",
+                     mod->name);
+#endif
+       return;
+}
+
 static int crypto_check_alg(struct crypto_alg *alg)
 {
+       crypto_check_module_sig(alg->cra_module);
+
        if (alg->cra_alignmask & (alg->cra_alignmask + 1))
                return -EINVAL;
 
@@ -430,6 +442,8 @@ int crypto_register_template(struct crypto_template *tmpl)
 
        down_write(&crypto_alg_sem);
 
+       crypto_check_module_sig(tmpl->module);
+
        list_for_each_entry(q, &crypto_template_list, list) {
                if (q == tmpl)
                        goto out;
index 7bdd61b..e592c90 100644 (file)
@@ -233,7 +233,7 @@ static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
 }
 
 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
-                                   crypto_completion_t complete)
+                                   crypto_completion_t compl)
 {
        struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
        struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
@@ -241,7 +241,7 @@ static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
 
        queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
        rctx->complete = req->base.complete;
-       req->base.complete = complete;
+       req->base.complete = compl;
 
        return cryptd_enqueue_request(queue, &req->base);
 }
@@ -414,7 +414,7 @@ static int cryptd_hash_setkey(struct crypto_ahash *parent,
 }
 
 static int cryptd_hash_enqueue(struct ahash_request *req,
-                               crypto_completion_t complete)
+                               crypto_completion_t compl)
 {
        struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
        struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -422,7 +422,7 @@ static int cryptd_hash_enqueue(struct ahash_request *req,
                cryptd_get_queue(crypto_ahash_tfm(tfm));
 
        rctx->complete = req->base.complete;
-       req->base.complete = complete;
+       req->base.complete = compl;
 
        return cryptd_enqueue_request(queue, &req->base);
 }
@@ -667,14 +667,14 @@ static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
 }
 
 static int cryptd_aead_enqueue(struct aead_request *req,
-                                   crypto_completion_t complete)
+                                   crypto_completion_t compl)
 {
        struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
        struct crypto_aead *tfm = crypto_aead_reqtfm(req);
        struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
 
        rctx->complete = req->base.complete;
-       req->base.complete = complete;
+       req->base.complete = compl;
        return cryptd_enqueue_request(queue, &req->base);
 }
 
index f6cf63f..298d464 100644 (file)
@@ -859,13 +859,10 @@ static void des_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
  *   property.
  *
  */
-static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
-                          unsigned int keylen)
+int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
+                     unsigned int keylen)
 {
        const u32 *K = (const u32 *)key;
-       struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
-       u32 *expkey = dctx->expkey;
-       u32 *flags = &tfm->crt_flags;
 
        if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
                     !((K[2] ^ K[4]) | (K[3] ^ K[5]))) &&
@@ -880,6 +877,17 @@ static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(__des3_ede_setkey);
+
+static int des3_ede_setkey(struct crypto_tfm *tfm, const u8 *key,
+                          unsigned int keylen)
+{
+       struct des3_ede_ctx *dctx = crypto_tfm_ctx(tfm);
+       u32 *flags = &tfm->crt_flags;
+       u32 *expkey = dctx->expkey;
+
+       return __des3_ede_setkey(expkey, flags, key, keylen);
+}
 
 static void des3_ede_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
@@ -945,6 +953,8 @@ static void des3_ede_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 
 static struct crypto_alg des_algs[2] = { {
        .cra_name               =       "des",
+       .cra_driver_name        =       "des-generic",
+       .cra_priority           =       100,
        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          =       DES_BLOCK_SIZE,
        .cra_ctxsize            =       sizeof(struct des_ctx),
@@ -958,6 +968,8 @@ static struct crypto_alg des_algs[2] = { {
        .cia_decrypt            =       des_decrypt } }
 }, {
        .cra_name               =       "des3_ede",
+       .cra_driver_name        =       "des3_ede-generic",
+       .cra_priority           =       100,
        .cra_flags              =       CRYPTO_ALG_TYPE_CIPHER,
        .cra_blocksize          =       DES3_EDE_BLOCK_SIZE,
        .cra_ctxsize            =       sizeof(struct des3_ede_ctx),
diff --git a/crypto/drbg.c b/crypto/drbg.c
new file mode 100644 (file)
index 0000000..7894db9
--- /dev/null
@@ -0,0 +1,2044 @@
+/*
+ * DRBG: Deterministic Random Bits Generator
+ *       Based on NIST Recommended DRBG from NIST SP800-90A with the following
+ *       properties:
+ *             * CTR DRBG with DF with AES-128, AES-192, AES-256 cores
+ *             * Hash DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores
+ *             * HMAC DRBG with DF with SHA-1, SHA-256, SHA-384, SHA-512 cores
+ *             * with and without prediction resistance
+ *
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2014
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * DRBG Usage
+ * ==========
+ * The SP 800-90A DRBG allows the user to specify a personalization string
+ * for initialization as well as an additional information string for each
+ * random number request. The following code fragments show how a caller
+ * uses the kernel crypto API to use the full functionality of the DRBG.
+ *
+ * Usage without any additional data
+ * ---------------------------------
+ * struct crypto_rng *drng;
+ * int err;
+ * char data[DATALEN];
+ *
+ * drng = crypto_alloc_rng(drng_name, 0, 0);
+ * err = crypto_rng_get_bytes(drng, &data, DATALEN);
+ * crypto_free_rng(drng);
+ *
+ *
+ * Usage with personalization string during initialization
+ * -------------------------------------------------------
+ * struct crypto_rng *drng;
+ * int err;
+ * char data[DATALEN];
+ * struct drbg_string pers;
+ * char personalization[11] = "some-string";
+ *
+ * drbg_string_fill(&pers, personalization, strlen(personalization));
+ * drng = crypto_alloc_rng(drng_name, 0, 0);
+ * // The reset completely re-initializes the DRBG with the provided
+ * // personalization string
+ * err = crypto_rng_reset(drng, &personalization, strlen(personalization));
+ * err = crypto_rng_get_bytes(drng, &data, DATALEN);
+ * crypto_free_rng(drng);
+ *
+ *
+ * Usage with additional information string during random number request
+ * ---------------------------------------------------------------------
+ * struct crypto_rng *drng;
+ * int err;
+ * char data[DATALEN];
+ * char addtl_string[11] = "some-string";
+ * string drbg_string addtl;
+ *
+ * drbg_string_fill(&addtl, addtl_string, strlen(addtl_string));
+ * drng = crypto_alloc_rng(drng_name, 0, 0);
+ * // The following call is a wrapper to crypto_rng_get_bytes() and returns
+ * // the same error codes.
+ * err = crypto_drbg_get_bytes_addtl(drng, &data, DATALEN, &addtl);
+ * crypto_free_rng(drng);
+ *
+ *
+ * Usage with personalization and additional information strings
+ * -------------------------------------------------------------
+ * Just mix both scenarios above.
+ */
+
+#include <crypto/drbg.h>
+
+/***************************************************************
+ * Backend cipher definitions available to DRBG
+ ***************************************************************/
+
+/*
+ * The order of the DRBG definitions here matter: every DRBG is registered
+ * as stdrng. Each DRBG receives an increasing cra_priority values the later
+ * they are defined in this array (see drbg_fill_array).
+ *
+ * HMAC DRBGs are favored over Hash DRBGs over CTR DRBGs, and
+ * the SHA256 / AES 256 over other ciphers. Thus, the favored
+ * DRBGs are the latest entries in this array.
+ */
+static const struct drbg_core drbg_cores[] = {
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+       {
+               .flags = DRBG_CTR | DRBG_STRENGTH128,
+               .statelen = 32, /* 256 bits as defined in 10.2.1 */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 16,
+               .cra_name = "ctr_aes128",
+               .backend_cra_name = "ecb(aes)",
+       }, {
+               .flags = DRBG_CTR | DRBG_STRENGTH192,
+               .statelen = 40, /* 320 bits as defined in 10.2.1 */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 16,
+               .cra_name = "ctr_aes192",
+               .backend_cra_name = "ecb(aes)",
+       }, {
+               .flags = DRBG_CTR | DRBG_STRENGTH256,
+               .statelen = 48, /* 384 bits as defined in 10.2.1 */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 16,
+               .cra_name = "ctr_aes256",
+               .backend_cra_name = "ecb(aes)",
+       },
+#endif /* CONFIG_CRYPTO_DRBG_CTR */
+#ifdef CONFIG_CRYPTO_DRBG_HASH
+       {
+               .flags = DRBG_HASH | DRBG_STRENGTH128,
+               .statelen = 55, /* 440 bits */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 20,
+               .cra_name = "sha1",
+               .backend_cra_name = "sha1",
+       }, {
+               .flags = DRBG_HASH | DRBG_STRENGTH256,
+               .statelen = 111, /* 888 bits */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 48,
+               .cra_name = "sha384",
+               .backend_cra_name = "sha384",
+       }, {
+               .flags = DRBG_HASH | DRBG_STRENGTH256,
+               .statelen = 111, /* 888 bits */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 64,
+               .cra_name = "sha512",
+               .backend_cra_name = "sha512",
+       }, {
+               .flags = DRBG_HASH | DRBG_STRENGTH256,
+               .statelen = 55, /* 440 bits */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 32,
+               .cra_name = "sha256",
+               .backend_cra_name = "sha256",
+       },
+#endif /* CONFIG_CRYPTO_DRBG_HASH */
+#ifdef CONFIG_CRYPTO_DRBG_HMAC
+       {
+               .flags = DRBG_HMAC | DRBG_STRENGTH128,
+               .statelen = 20, /* block length of cipher */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 20,
+               .cra_name = "hmac_sha1",
+               .backend_cra_name = "hmac(sha1)",
+       }, {
+               .flags = DRBG_HMAC | DRBG_STRENGTH256,
+               .statelen = 48, /* block length of cipher */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 48,
+               .cra_name = "hmac_sha384",
+               .backend_cra_name = "hmac(sha384)",
+       }, {
+               .flags = DRBG_HMAC | DRBG_STRENGTH256,
+               .statelen = 64, /* block length of cipher */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 64,
+               .cra_name = "hmac_sha512",
+               .backend_cra_name = "hmac(sha512)",
+       }, {
+               .flags = DRBG_HMAC | DRBG_STRENGTH256,
+               .statelen = 32, /* block length of cipher */
+               .max_addtllen = 35,
+               .max_bits = 19,
+               .max_req = 48,
+               .blocklen_bytes = 32,
+               .cra_name = "hmac_sha256",
+               .backend_cra_name = "hmac(sha256)",
+       },
+#endif /* CONFIG_CRYPTO_DRBG_HMAC */
+};
+
+/******************************************************************
+ * Generic helper functions
+ ******************************************************************/
+
+/*
+ * Return strength of DRBG according to SP800-90A section 8.4
+ *
+ * @flags DRBG flags reference
+ *
+ * Return: normalized strength in *bytes* value or 32 as default
+ *        to counter programming errors
+ */
+static inline unsigned short drbg_sec_strength(drbg_flag_t flags)
+{
+       switch (flags & DRBG_STRENGTH_MASK) {
+       case DRBG_STRENGTH128:
+               return 16;
+       case DRBG_STRENGTH192:
+               return 24;
+       case DRBG_STRENGTH256:
+               return 32;
+       default:
+               return 32;
+       }
+}
+
+/*
+ * FIPS 140-2 continuous self test
+ * The test is performed on the result of one round of the output
+ * function. Thus, the function implicitly knows the size of the
+ * buffer.
+ *
+ * The FIPS test can be called in an endless loop until it returns
+ * true. Although the code looks like a potential for a deadlock, it
+ * is not the case, because returning a false cannot mathematically
+ * occur (except once when a reseed took place and the updated state
+ * would is now set up such that the generation of new value returns
+ * an identical one -- this is most unlikely and would happen only once).
+ * Thus, if this function repeatedly returns false and thus would cause
+ * a deadlock, the integrity of the entire kernel is lost.
+ *
+ * @drbg DRBG handle
+ * @buf output buffer of random data to be checked
+ *
+ * return:
+ *     true on success
+ *     false on error
+ */
+static bool drbg_fips_continuous_test(struct drbg_state *drbg,
+                                     const unsigned char *buf)
+{
+#ifdef CONFIG_CRYPTO_FIPS
+       int ret = 0;
+       /* skip test if we test the overall system */
+       if (drbg->test_data)
+               return true;
+       /* only perform test in FIPS mode */
+       if (0 == fips_enabled)
+               return true;
+       if (!drbg->fips_primed) {
+               /* Priming of FIPS test */
+               memcpy(drbg->prev, buf, drbg_blocklen(drbg));
+               drbg->fips_primed = true;
+               /* return false due to priming, i.e. another round is needed */
+               return false;
+       }
+       ret = memcmp(drbg->prev, buf, drbg_blocklen(drbg));
+       memcpy(drbg->prev, buf, drbg_blocklen(drbg));
+       /* the test shall pass when the two compared values are not equal */
+       return ret != 0;
+#else
+       return true;
+#endif /* CONFIG_CRYPTO_FIPS */
+}
+
+/*
+ * Convert an integer into a byte representation of this integer.
+ * The byte representation is big-endian
+ *
+ * @buf buffer holding the converted integer
+ * @val value to be converted
+ * @buflen length of buffer
+ */
+#if (defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR))
+static inline void drbg_int2byte(unsigned char *buf, uint64_t val,
+                                size_t buflen)
+{
+       unsigned char *byte;
+       uint64_t i;
+
+       byte = buf + (buflen - 1);
+       for (i = 0; i < buflen; i++)
+               *(byte--) = val >> (i * 8) & 0xff;
+}
+
+/*
+ * Increment buffer
+ *
+ * @dst buffer to increment
+ * @add value to add
+ */
+static inline void drbg_add_buf(unsigned char *dst, size_t dstlen,
+                               const unsigned char *add, size_t addlen)
+{
+       /* implied: dstlen > addlen */
+       unsigned char *dstptr;
+       const unsigned char *addptr;
+       unsigned int remainder = 0;
+       size_t len = addlen;
+
+       dstptr = dst + (dstlen-1);
+       addptr = add + (addlen-1);
+       while (len) {
+               remainder += *dstptr + *addptr;
+               *dstptr = remainder & 0xff;
+               remainder >>= 8;
+               len--; dstptr--; addptr--;
+       }
+       len = dstlen - addlen;
+       while (len && remainder > 0) {
+               remainder = *dstptr + 1;
+               *dstptr = remainder & 0xff;
+               remainder >>= 8;
+               len--; dstptr--;
+       }
+}
+#endif /* defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_CTR) */
+
+/******************************************************************
+ * CTR DRBG callback functions
+ ******************************************************************/
+
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+#define CRYPTO_DRBG_CTR_STRING "CTR "
+static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
+                         unsigned char *outval, const struct drbg_string *in);
+static int drbg_init_sym_kernel(struct drbg_state *drbg);
+static int drbg_fini_sym_kernel(struct drbg_state *drbg);
+
+/* BCC function for CTR DRBG as defined in 10.4.3 */
+static int drbg_ctr_bcc(struct drbg_state *drbg,
+                       unsigned char *out, const unsigned char *key,
+                       struct list_head *in)
+{
+       int ret = 0;
+       struct drbg_string *curr = NULL;
+       struct drbg_string data;
+       short cnt = 0;
+
+       drbg_string_fill(&data, out, drbg_blocklen(drbg));
+
+       /* 10.4.3 step 1 */
+       memset(out, 0, drbg_blocklen(drbg));
+
+       /* 10.4.3 step 2 / 4 */
+       list_for_each_entry(curr, in, list) {
+               const unsigned char *pos = curr->buf;
+               size_t len = curr->len;
+               /* 10.4.3 step 4.1 */
+               while (len) {
+                       /* 10.4.3 step 4.2 */
+                       if (drbg_blocklen(drbg) == cnt) {
+                               cnt = 0;
+                               ret = drbg_kcapi_sym(drbg, key, out, &data);
+                               if (ret)
+                                       return ret;
+                       }
+                       out[cnt] ^= *pos;
+                       pos++;
+                       cnt++;
+                       len--;
+               }
+       }
+       /* 10.4.3 step 4.2 for last block */
+       if (cnt)
+               ret = drbg_kcapi_sym(drbg, key, out, &data);
+
+       return ret;
+}
+
+/*
+ * scratchpad usage: drbg_ctr_update is interlinked with drbg_ctr_df
+ * (and drbg_ctr_bcc, but this function does not need any temporary buffers),
+ * the scratchpad is used as follows:
+ * drbg_ctr_update:
+ *     temp
+ *             start: drbg->scratchpad
+ *             length: drbg_statelen(drbg) + drbg_blocklen(drbg)
+ *                     note: the cipher writing into this variable works
+ *                     blocklen-wise. Now, when the statelen is not a multiple
+ *                     of blocklen, the generateion loop below "spills over"
+ *                     by at most blocklen. Thus, we need to give sufficient
+ *                     memory.
+ *     df_data
+ *             start: drbg->scratchpad +
+ *                             drbg_statelen(drbg) + drbg_blocklen(drbg)
+ *             length: drbg_statelen(drbg)
+ *
+ * drbg_ctr_df:
+ *     pad
+ *             start: df_data + drbg_statelen(drbg)
+ *             length: drbg_blocklen(drbg)
+ *     iv
+ *             start: pad + drbg_blocklen(drbg)
+ *             length: drbg_blocklen(drbg)
+ *     temp
+ *             start: iv + drbg_blocklen(drbg)
+ *             length: drbg_satelen(drbg) + drbg_blocklen(drbg)
+ *                     note: temp is the buffer that the BCC function operates
+ *                     on. BCC operates blockwise. drbg_statelen(drbg)
+ *                     is sufficient when the DRBG state length is a multiple
+ *                     of the block size. For AES192 (and maybe other ciphers)
+ *                     this is not correct and the length for temp is
+ *                     insufficient (yes, that also means for such ciphers,
+ *                     the final output of all BCC rounds are truncated).
+ *                     Therefore, add drbg_blocklen(drbg) to cover all
+ *                     possibilities.
+ */
+
+/* Derivation Function for CTR DRBG as defined in 10.4.2 */
+static int drbg_ctr_df(struct drbg_state *drbg,
+                      unsigned char *df_data, size_t bytes_to_return,
+                      struct list_head *seedlist)
+{
+       int ret = -EFAULT;
+       unsigned char L_N[8];
+       /* S3 is input */
+       struct drbg_string S1, S2, S4, cipherin;
+       LIST_HEAD(bcc_list);
+       unsigned char *pad = df_data + drbg_statelen(drbg);
+       unsigned char *iv = pad + drbg_blocklen(drbg);
+       unsigned char *temp = iv + drbg_blocklen(drbg);
+       size_t padlen = 0;
+       unsigned int templen = 0;
+       /* 10.4.2 step 7 */
+       unsigned int i = 0;
+       /* 10.4.2 step 8 */
+       const unsigned char *K = (unsigned char *)
+                          "\x00\x01\x02\x03\x04\x05\x06\x07"
+                          "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
+                          "\x10\x11\x12\x13\x14\x15\x16\x17"
+                          "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f";
+       unsigned char *X;
+       size_t generated_len = 0;
+       size_t inputlen = 0;
+       struct drbg_string *seed = NULL;
+
+       memset(pad, 0, drbg_blocklen(drbg));
+       memset(iv, 0, drbg_blocklen(drbg));
+       memset(temp, 0, drbg_statelen(drbg));
+
+       /* 10.4.2 step 1 is implicit as we work byte-wise */
+
+       /* 10.4.2 step 2 */
+       if ((512/8) < bytes_to_return)
+               return -EINVAL;
+
+       /* 10.4.2 step 2 -- calculate the entire length of all input data */
+       list_for_each_entry(seed, seedlist, list)
+               inputlen += seed->len;
+       drbg_int2byte(&L_N[0], inputlen, 4);
+
+       /* 10.4.2 step 3 */
+       drbg_int2byte(&L_N[4], bytes_to_return, 4);
+
+       /* 10.4.2 step 5: length is L_N, input_string, one byte, padding */
+       padlen = (inputlen + sizeof(L_N) + 1) % (drbg_blocklen(drbg));
+       /* wrap the padlen appropriately */
+       if (padlen)
+               padlen = drbg_blocklen(drbg) - padlen;
+       /*
+        * pad / padlen contains the 0x80 byte and the following zero bytes.
+        * As the calculated padlen value only covers the number of zero
+        * bytes, this value has to be incremented by one for the 0x80 byte.
+        */
+       padlen++;
+       pad[0] = 0x80;
+
+       /* 10.4.2 step 4 -- first fill the linked list and then order it */
+       drbg_string_fill(&S1, iv, drbg_blocklen(drbg));
+       list_add_tail(&S1.list, &bcc_list);
+       drbg_string_fill(&S2, L_N, sizeof(L_N));
+       list_add_tail(&S2.list, &bcc_list);
+       list_splice_tail(seedlist, &bcc_list);
+       drbg_string_fill(&S4, pad, padlen);
+       list_add_tail(&S4.list, &bcc_list);
+
+       /* 10.4.2 step 9 */
+       while (templen < (drbg_keylen(drbg) + (drbg_blocklen(drbg)))) {
+               /*
+                * 10.4.2 step 9.1 - the padding is implicit as the buffer
+                * holds zeros after allocation -- even the increment of i
+                * is irrelevant as the increment remains within length of i
+                */
+               drbg_int2byte(iv, i, 4);
+               /* 10.4.2 step 9.2 -- BCC and concatenation with temp */
+               ret = drbg_ctr_bcc(drbg, temp + templen, K, &bcc_list);
+               if (ret)
+                       goto out;
+               /* 10.4.2 step 9.3 */
+               i++;
+               templen += drbg_blocklen(drbg);
+       }
+
+       /* 10.4.2 step 11 */
+       X = temp + (drbg_keylen(drbg));
+       drbg_string_fill(&cipherin, X, drbg_blocklen(drbg));
+
+       /* 10.4.2 step 12: overwriting of outval is implemented in next step */
+
+       /* 10.4.2 step 13 */
+       while (generated_len < bytes_to_return) {
+               short blocklen = 0;
+               /*
+                * 10.4.2 step 13.1: the truncation of the key length is
+                * implicit as the key is only drbg_blocklen in size based on
+                * the implementation of the cipher function callback
+                */
+               ret = drbg_kcapi_sym(drbg, temp, X, &cipherin);
+               if (ret)
+                       goto out;
+               blocklen = (drbg_blocklen(drbg) <
+                               (bytes_to_return - generated_len)) ?
+                           drbg_blocklen(drbg) :
+                               (bytes_to_return - generated_len);
+               /* 10.4.2 step 13.2 and 14 */
+               memcpy(df_data + generated_len, X, blocklen);
+               generated_len += blocklen;
+       }
+
+       ret = 0;
+
+out:
+       memset(iv, 0, drbg_blocklen(drbg));
+       memset(temp, 0, drbg_statelen(drbg));
+       memset(pad, 0, drbg_blocklen(drbg));
+       return ret;
+}
+
+/*
+ * update function of CTR DRBG as defined in 10.2.1.2
+ *
+ * The reseed variable has an enhanced meaning compared to the update
+ * functions of the other DRBGs as follows:
+ * 0 => initial seed from initialization
+ * 1 => reseed via drbg_seed
+ * 2 => first invocation from drbg_ctr_update when addtl is present. In
+ *      this case, the df_data scratchpad is not deleted so that it is
+ *      available for another calls to prevent calling the DF function
+ *      again.
+ * 3 => second invocation from drbg_ctr_update. When the update function
+ *      was called with addtl, the df_data memory already contains the
+ *      DFed addtl information and we do not need to call DF again.
+ */
+static int drbg_ctr_update(struct drbg_state *drbg, struct list_head *seed,
+                          int reseed)
+{
+       int ret = -EFAULT;
+       /* 10.2.1.2 step 1 */
+       unsigned char *temp = drbg->scratchpad;
+       unsigned char *df_data = drbg->scratchpad + drbg_statelen(drbg) +
+                                drbg_blocklen(drbg);
+       unsigned char *temp_p, *df_data_p; /* pointer to iterate over buffers */
+       unsigned int len = 0;
+       struct drbg_string cipherin;
+       unsigned char prefix = DRBG_PREFIX1;
+
+       memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
+       if (3 > reseed)
+               memset(df_data, 0, drbg_statelen(drbg));
+
+       /* 10.2.1.3.2 step 2 and 10.2.1.4.2 step 2 */
+       if (seed) {
+               ret = drbg_ctr_df(drbg, df_data, drbg_statelen(drbg), seed);
+               if (ret)
+                       goto out;
+       }
+
+       drbg_string_fill(&cipherin, drbg->V, drbg_blocklen(drbg));
+       /*
+        * 10.2.1.3.2 steps 2 and 3 are already covered as the allocation
+        * zeroizes all memory during initialization
+        */
+       while (len < (drbg_statelen(drbg))) {
+               /* 10.2.1.2 step 2.1 */
+               drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+               /*
+                * 10.2.1.2 step 2.2 */
+               ret = drbg_kcapi_sym(drbg, drbg->C, temp + len, &cipherin);
+               if (ret)
+                       goto out;
+               /* 10.2.1.2 step 2.3 and 3 */
+               len += drbg_blocklen(drbg);
+       }
+
+       /* 10.2.1.2 step 4 */
+       temp_p = temp;
+       df_data_p = df_data;
+       for (len = 0; len < drbg_statelen(drbg); len++) {
+               *temp_p ^= *df_data_p;
+               df_data_p++; temp_p++;
+       }
+
+       /* 10.2.1.2 step 5 */
+       memcpy(drbg->C, temp, drbg_keylen(drbg));
+       /* 10.2.1.2 step 6 */
+       memcpy(drbg->V, temp + drbg_keylen(drbg), drbg_blocklen(drbg));
+       ret = 0;
+
+out:
+       memset(temp, 0, drbg_statelen(drbg) + drbg_blocklen(drbg));
+       if (2 != reseed)
+               memset(df_data, 0, drbg_statelen(drbg));
+       return ret;
+}
+
+/*
+ * scratchpad use: drbg_ctr_update is called independently from
+ * drbg_ctr_extract_bytes. Therefore, the scratchpad is reused
+ */
+/* Generate function of CTR DRBG as defined in 10.2.1.5.2 */
+static int drbg_ctr_generate(struct drbg_state *drbg,
+                            unsigned char *buf, unsigned int buflen,
+                            struct list_head *addtl)
+{
+       int len = 0;
+       int ret = 0;
+       struct drbg_string data;
+       unsigned char prefix = DRBG_PREFIX1;
+
+       memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+
+       /* 10.2.1.5.2 step 2 */
+       if (addtl && !list_empty(addtl)) {
+               ret = drbg_ctr_update(drbg, addtl, 2);
+               if (ret)
+                       return 0;
+       }
+
+       /* 10.2.1.5.2 step 4.1 */
+       drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+       drbg_string_fill(&data, drbg->V, drbg_blocklen(drbg));
+       while (len < buflen) {
+               int outlen = 0;
+               /* 10.2.1.5.2 step 4.2 */
+               ret = drbg_kcapi_sym(drbg, drbg->C, drbg->scratchpad, &data);
+               if (ret) {
+                       len = ret;
+                       goto out;
+               }
+               outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
+                         drbg_blocklen(drbg) : (buflen - len);
+               if (!drbg_fips_continuous_test(drbg, drbg->scratchpad)) {
+                       /* 10.2.1.5.2 step 6 */
+                       drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+                       continue;
+               }
+               /* 10.2.1.5.2 step 4.3 */
+               memcpy(buf + len, drbg->scratchpad, outlen);
+               len += outlen;
+               /* 10.2.1.5.2 step 6 */
+               if (len < buflen)
+                       drbg_add_buf(drbg->V, drbg_blocklen(drbg), &prefix, 1);
+       }
+
+       /* 10.2.1.5.2 step 6 */
+       ret = drbg_ctr_update(drbg, NULL, 3);
+       if (ret)
+               len = ret;
+
+out:
+       memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+       return len;
+}
+
+static struct drbg_state_ops drbg_ctr_ops = {
+       .update         = drbg_ctr_update,
+       .generate       = drbg_ctr_generate,
+       .crypto_init    = drbg_init_sym_kernel,
+       .crypto_fini    = drbg_fini_sym_kernel,
+};
+#endif /* CONFIG_CRYPTO_DRBG_CTR */
+
+/******************************************************************
+ * HMAC DRBG callback functions
+ ******************************************************************/
+
+#if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
+static int drbg_kcapi_hash(struct drbg_state *drbg, const unsigned char *key,
+                          unsigned char *outval, const struct list_head *in);
+static int drbg_init_hash_kernel(struct drbg_state *drbg);
+static int drbg_fini_hash_kernel(struct drbg_state *drbg);
+#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
+
+#ifdef CONFIG_CRYPTO_DRBG_HMAC
+#define CRYPTO_DRBG_HMAC_STRING "HMAC "
+/* update function of HMAC DRBG as defined in 10.1.2.2 */
+static int drbg_hmac_update(struct drbg_state *drbg, struct list_head *seed,
+                           int reseed)
+{
+       int ret = -EFAULT;
+       int i = 0;
+       struct drbg_string seed1, seed2, vdata;
+       LIST_HEAD(seedlist);
+       LIST_HEAD(vdatalist);
+
+       if (!reseed) {
+               /* 10.1.2.3 step 2 */
+               memset(drbg->C, 0, drbg_statelen(drbg));
+               memset(drbg->V, 1, drbg_statelen(drbg));
+       }
+
+       drbg_string_fill(&seed1, drbg->V, drbg_statelen(drbg));
+       list_add_tail(&seed1.list, &seedlist);
+       /* buffer of seed2 will be filled in for loop below with one byte */
+       drbg_string_fill(&seed2, NULL, 1);
+       list_add_tail(&seed2.list, &seedlist);
+       /* input data of seed is allowed to be NULL at this point */
+       if (seed)
+               list_splice_tail(seed, &seedlist);
+
+       drbg_string_fill(&vdata, drbg->V, drbg_statelen(drbg));
+       list_add_tail(&vdata.list, &vdatalist);
+       for (i = 2; 0 < i; i--) {
+               /* first round uses 0x0, second 0x1 */
+               unsigned char prefix = DRBG_PREFIX0;
+               if (1 == i)
+                       prefix = DRBG_PREFIX1;
+               /* 10.1.2.2 step 1 and 4 -- concatenation and HMAC for key */
+               seed2.buf = &prefix;
+               ret = drbg_kcapi_hash(drbg, drbg->C, drbg->C, &seedlist);
+               if (ret)
+                       return ret;
+
+               /* 10.1.2.2 step 2 and 5 -- HMAC for V */
+               ret = drbg_kcapi_hash(drbg, drbg->C, drbg->V, &vdatalist);
+               if (ret)
+                       return ret;
+
+               /* 10.1.2.2 step 3 */
+               if (!seed)
+                       return ret;
+       }
+
+       return 0;
+}
+
+/* generate function of HMAC DRBG as defined in 10.1.2.5 */
+static int drbg_hmac_generate(struct drbg_state *drbg,
+                             unsigned char *buf,
+                             unsigned int buflen,
+                             struct list_head *addtl)
+{
+       int len = 0;
+       int ret = 0;
+       struct drbg_string data;
+       LIST_HEAD(datalist);
+
+       /* 10.1.2.5 step 2 */
+       if (addtl && !list_empty(addtl)) {
+               ret = drbg_hmac_update(drbg, addtl, 1);
+               if (ret)
+                       return ret;
+       }
+
+       drbg_string_fill(&data, drbg->V, drbg_statelen(drbg));
+       list_add_tail(&data.list, &datalist);
+       while (len < buflen) {
+               unsigned int outlen = 0;
+               /* 10.1.2.5 step 4.1 */
+               ret = drbg_kcapi_hash(drbg, drbg->C, drbg->V, &datalist);
+               if (ret)
+                       return ret;
+               outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
+                         drbg_blocklen(drbg) : (buflen - len);
+               if (!drbg_fips_continuous_test(drbg, drbg->V))
+                       continue;
+
+               /* 10.1.2.5 step 4.2 */
+               memcpy(buf + len, drbg->V, outlen);
+               len += outlen;
+       }
+
+       /* 10.1.2.5 step 6 */
+       if (addtl && !list_empty(addtl))
+               ret = drbg_hmac_update(drbg, addtl, 1);
+       else
+               ret = drbg_hmac_update(drbg, NULL, 1);
+       if (ret)
+               return ret;
+
+       return len;
+}
+
+static struct drbg_state_ops drbg_hmac_ops = {
+       .update         = drbg_hmac_update,
+       .generate       = drbg_hmac_generate,
+       .crypto_init    = drbg_init_hash_kernel,
+       .crypto_fini    = drbg_fini_hash_kernel,
+
+};
+#endif /* CONFIG_CRYPTO_DRBG_HMAC */
+
+/******************************************************************
+ * Hash DRBG callback functions
+ ******************************************************************/
+
+#ifdef CONFIG_CRYPTO_DRBG_HASH
+#define CRYPTO_DRBG_HASH_STRING "HASH "
+/*
+ * scratchpad usage: as drbg_hash_update and drbg_hash_df are used
+ * interlinked, the scratchpad is used as follows:
+ * drbg_hash_update
+ *     start: drbg->scratchpad
+ *     length: drbg_statelen(drbg)
+ * drbg_hash_df:
+ *     start: drbg->scratchpad + drbg_statelen(drbg)
+ *     length: drbg_blocklen(drbg)
+ *
+ * drbg_hash_process_addtl uses the scratchpad, but fully completes
+ * before either of the functions mentioned before are invoked. Therefore,
+ * drbg_hash_process_addtl does not need to be specifically considered.
+ */
+
+/* Derivation Function for Hash DRBG as defined in 10.4.1 */
+static int drbg_hash_df(struct drbg_state *drbg,
+                       unsigned char *outval, size_t outlen,
+                       struct list_head *entropylist)
+{
+       int ret = 0;
+       size_t len = 0;
+       unsigned char input[5];
+       unsigned char *tmp = drbg->scratchpad + drbg_statelen(drbg);
+       struct drbg_string data;
+
+       memset(tmp, 0, drbg_blocklen(drbg));
+
+       /* 10.4.1 step 3 */
+       input[0] = 1;
+       drbg_int2byte(&input[1], (outlen * 8), 4);
+
+       /* 10.4.1 step 4.1 -- concatenation of data for input into hash */
+       drbg_string_fill(&data, input, 5);
+       list_add(&data.list, entropylist);
+
+       /* 10.4.1 step 4 */
+       while (len < outlen) {
+               short blocklen = 0;
+               /* 10.4.1 step 4.1 */
+               ret = drbg_kcapi_hash(drbg, NULL, tmp, entropylist);
+               if (ret)
+                       goto out;
+               /* 10.4.1 step 4.2 */
+               input[0]++;
+               blocklen = (drbg_blocklen(drbg) < (outlen - len)) ?
+                           drbg_blocklen(drbg) : (outlen - len);
+               memcpy(outval + len, tmp, blocklen);
+               len += blocklen;
+       }
+
+out:
+       memset(tmp, 0, drbg_blocklen(drbg));
+       return ret;
+}
+
+/* update function for Hash DRBG as defined in 10.1.1.2 / 10.1.1.3 */
+static int drbg_hash_update(struct drbg_state *drbg, struct list_head *seed,
+                           int reseed)
+{
+       int ret = 0;
+       struct drbg_string data1, data2;
+       LIST_HEAD(datalist);
+       LIST_HEAD(datalist2);
+       unsigned char *V = drbg->scratchpad;
+       unsigned char prefix = DRBG_PREFIX1;
+
+       memset(drbg->scratchpad, 0, drbg_statelen(drbg));
+       if (!seed)
+               return -EINVAL;
+
+       if (reseed) {
+               /* 10.1.1.3 step 1 */
+               memcpy(V, drbg->V, drbg_statelen(drbg));
+               drbg_string_fill(&data1, &prefix, 1);
+               list_add_tail(&data1.list, &datalist);
+               drbg_string_fill(&data2, V, drbg_statelen(drbg));
+               list_add_tail(&data2.list, &datalist);
+       }
+       list_splice_tail(seed, &datalist);
+
+       /* 10.1.1.2 / 10.1.1.3 step 2 and 3 */
+       ret = drbg_hash_df(drbg, drbg->V, drbg_statelen(drbg), &datalist);
+       if (ret)
+               goto out;
+
+       /* 10.1.1.2 / 10.1.1.3 step 4  */
+       prefix = DRBG_PREFIX0;
+       drbg_string_fill(&data1, &prefix, 1);
+       list_add_tail(&data1.list, &datalist2);
+       drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
+       list_add_tail(&data2.list, &datalist2);
+       /* 10.1.1.2 / 10.1.1.3 step 4 */
+       ret = drbg_hash_df(drbg, drbg->C, drbg_statelen(drbg), &datalist2);
+
+out:
+       memset(drbg->scratchpad, 0, drbg_statelen(drbg));
+       return ret;
+}
+
+/* processing of additional information string for Hash DRBG */
+static int drbg_hash_process_addtl(struct drbg_state *drbg,
+                                  struct list_head *addtl)
+{
+       int ret = 0;
+       struct drbg_string data1, data2;
+       LIST_HEAD(datalist);
+       unsigned char prefix = DRBG_PREFIX2;
+
+       /* this is value w as per documentation */
+       memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+
+       /* 10.1.1.4 step 2 */
+       if (!addtl || list_empty(addtl))
+               return 0;
+
+       /* 10.1.1.4 step 2a */
+       drbg_string_fill(&data1, &prefix, 1);
+       drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
+       list_add_tail(&data1.list, &datalist);
+       list_add_tail(&data2.list, &datalist);
+       list_splice_tail(addtl, &datalist);
+       ret = drbg_kcapi_hash(drbg, NULL, drbg->scratchpad, &datalist);
+       if (ret)
+               goto out;
+
+       /* 10.1.1.4 step 2b */
+       drbg_add_buf(drbg->V, drbg_statelen(drbg),
+                    drbg->scratchpad, drbg_blocklen(drbg));
+
+out:
+       memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+       return ret;
+}
+
+/* Hashgen defined in 10.1.1.4 */
+static int drbg_hash_hashgen(struct drbg_state *drbg,
+                            unsigned char *buf,
+                            unsigned int buflen)
+{
+       int len = 0;
+       int ret = 0;
+       unsigned char *src = drbg->scratchpad;
+       unsigned char *dst = drbg->scratchpad + drbg_statelen(drbg);
+       struct drbg_string data;
+       LIST_HEAD(datalist);
+       unsigned char prefix = DRBG_PREFIX1;
+
+       memset(src, 0, drbg_statelen(drbg));
+       memset(dst, 0, drbg_blocklen(drbg));
+
+       /* 10.1.1.4 step hashgen 2 */
+       memcpy(src, drbg->V, drbg_statelen(drbg));
+
+       drbg_string_fill(&data, src, drbg_statelen(drbg));
+       list_add_tail(&data.list, &datalist);
+       while (len < buflen) {
+               unsigned int outlen = 0;
+               /* 10.1.1.4 step hashgen 4.1 */
+               ret = drbg_kcapi_hash(drbg, NULL, dst, &datalist);
+               if (ret) {
+                       len = ret;
+                       goto out;
+               }
+               outlen = (drbg_blocklen(drbg) < (buflen - len)) ?
+                         drbg_blocklen(drbg) : (buflen - len);
+               if (!drbg_fips_continuous_test(drbg, dst)) {
+                       drbg_add_buf(src, drbg_statelen(drbg), &prefix, 1);
+                       continue;
+               }
+               /* 10.1.1.4 step hashgen 4.2 */
+               memcpy(buf + len, dst, outlen);
+               len += outlen;
+               /* 10.1.1.4 hashgen step 4.3 */
+               if (len < buflen)
+                       drbg_add_buf(src, drbg_statelen(drbg), &prefix, 1);
+       }
+
+out:
+       memset(drbg->scratchpad, 0,
+              (drbg_statelen(drbg) + drbg_blocklen(drbg)));
+       return len;
+}
+
+/* generate function for Hash DRBG as defined in  10.1.1.4 */
+static int drbg_hash_generate(struct drbg_state *drbg,
+                             unsigned char *buf, unsigned int buflen,
+                             struct list_head *addtl)
+{
+       int len = 0;
+       int ret = 0;
+       unsigned char req[8];
+       unsigned char prefix = DRBG_PREFIX3;
+       struct drbg_string data1, data2;
+       LIST_HEAD(datalist);
+
+       /* 10.1.1.4 step 2 */
+       ret = drbg_hash_process_addtl(drbg, addtl);
+       if (ret)
+               return ret;
+       /* 10.1.1.4 step 3 */
+       len = drbg_hash_hashgen(drbg, buf, buflen);
+
+       /* this is the value H as documented in 10.1.1.4 */
+       memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+       /* 10.1.1.4 step 4 */
+       drbg_string_fill(&data1, &prefix, 1);
+       list_add_tail(&data1.list, &datalist);
+       drbg_string_fill(&data2, drbg->V, drbg_statelen(drbg));
+       list_add_tail(&data2.list, &datalist);
+       ret = drbg_kcapi_hash(drbg, NULL, drbg->scratchpad, &datalist);
+       if (ret) {
+               len = ret;
+               goto out;
+       }
+
+       /* 10.1.1.4 step 5 */
+       drbg_add_buf(drbg->V, drbg_statelen(drbg),
+                    drbg->scratchpad, drbg_blocklen(drbg));
+       drbg_add_buf(drbg->V, drbg_statelen(drbg),
+                    drbg->C, drbg_statelen(drbg));
+       drbg_int2byte(req, drbg->reseed_ctr, sizeof(req));
+       drbg_add_buf(drbg->V, drbg_statelen(drbg), req, 8);
+
+out:
+       memset(drbg->scratchpad, 0, drbg_blocklen(drbg));
+       return len;
+}
+
+/*
+ * scratchpad usage: as update and generate are used isolated, both
+ * can use the scratchpad
+ */
+static struct drbg_state_ops drbg_hash_ops = {
+       .update         = drbg_hash_update,
+       .generate       = drbg_hash_generate,
+       .crypto_init    = drbg_init_hash_kernel,
+       .crypto_fini    = drbg_fini_hash_kernel,
+};
+#endif /* CONFIG_CRYPTO_DRBG_HASH */
+
+/******************************************************************
+ * Functions common for DRBG implementations
+ ******************************************************************/
+
+/*
+ * Seeding or reseeding of the DRBG
+ *
+ * @drbg: DRBG state struct
+ * @pers: personalization / additional information buffer
+ * @reseed: 0 for initial seed process, 1 for reseeding
+ *
+ * return:
+ *     0 on success
+ *     error value otherwise
+ */
+static int drbg_seed(struct drbg_state *drbg, struct drbg_string *pers,
+                    bool reseed)
+{
+       int ret = 0;
+       unsigned char *entropy = NULL;
+       size_t entropylen = 0;
+       struct drbg_string data1;
+       LIST_HEAD(seedlist);
+
+       /* 9.1 / 9.2 / 9.3.1 step 3 */
+       if (pers && pers->len > (drbg_max_addtl(drbg))) {
+               pr_devel("DRBG: personalization string too long %zu\n",
+                        pers->len);
+               return -EINVAL;
+       }
+
+       if (drbg->test_data && drbg->test_data->testentropy) {
+               drbg_string_fill(&data1, drbg->test_data->testentropy->buf,
+                                drbg->test_data->testentropy->len);
+               pr_devel("DRBG: using test entropy\n");
+       } else {
+               /*
+                * Gather entropy equal to the security strength of the DRBG.
+                * With a derivation function, a nonce is required in addition
+                * to the entropy. A nonce must be at least 1/2 of the security
+                * strength of the DRBG in size. Thus, entropy * nonce is 3/2
+                * of the strength. The consideration of a nonce is only
+                * applicable during initial seeding.
+                */
+               entropylen = drbg_sec_strength(drbg->core->flags);
+               if (!entropylen)
+                       return -EFAULT;
+               if (!reseed)
+                       entropylen = ((entropylen + 1) / 2) * 3;
+               pr_devel("DRBG: (re)seeding with %zu bytes of entropy\n",
+                        entropylen);
+               entropy = kzalloc(entropylen, GFP_KERNEL);
+               if (!entropy)
+                       return -ENOMEM;
+               get_random_bytes(entropy, entropylen);
+               drbg_string_fill(&data1, entropy, entropylen);
+       }
+       list_add_tail(&data1.list, &seedlist);
+
+       /*
+        * concatenation of entropy with personalization str / addtl input)
+        * the variable pers is directly handed in by the caller, so check its
+        * contents whether it is appropriate
+        */
+       if (pers && pers->buf && 0 < pers->len) {
+               list_add_tail(&pers->list, &seedlist);
+               pr_devel("DRBG: using personalization string\n");
+       }
+
+       ret = drbg->d_ops->update(drbg, &seedlist, reseed);
+       if (ret)
+               goto out;
+
+       drbg->seeded = true;
+       /* 10.1.1.2 / 10.1.1.3 step 5 */
+       drbg->reseed_ctr = 1;
+
+out:
+       if (entropy)
+               kzfree(entropy);
+       return ret;
+}
+
+/* Free all substructures in a DRBG state without the DRBG state structure */
+static inline void drbg_dealloc_state(struct drbg_state *drbg)
+{
+       if (!drbg)
+               return;
+       if (drbg->V)
+               kzfree(drbg->V);
+       drbg->V = NULL;
+       if (drbg->C)
+               kzfree(drbg->C);
+       drbg->C = NULL;
+       if (drbg->scratchpad)
+               kzfree(drbg->scratchpad);
+       drbg->scratchpad = NULL;
+       drbg->reseed_ctr = 0;
+#ifdef CONFIG_CRYPTO_FIPS
+       if (drbg->prev)
+               kzfree(drbg->prev);
+       drbg->prev = NULL;
+       drbg->fips_primed = false;
+#endif
+}
+
+/*
+ * Allocate all sub-structures for a DRBG state.
+ * The DRBG state structure must already be allocated.
+ */
+static inline int drbg_alloc_state(struct drbg_state *drbg)
+{
+       int ret = -ENOMEM;
+       unsigned int sb_size = 0;
+
+       if (!drbg)
+               return -EINVAL;
+
+       drbg->V = kzalloc(drbg_statelen(drbg), GFP_KERNEL);
+       if (!drbg->V)
+               goto err;
+       drbg->C = kzalloc(drbg_statelen(drbg), GFP_KERNEL);
+       if (!drbg->C)
+               goto err;
+#ifdef CONFIG_CRYPTO_FIPS
+       drbg->prev = kzalloc(drbg_blocklen(drbg), GFP_KERNEL);
+       if (!drbg->prev)
+               goto err;
+       drbg->fips_primed = false;
+#endif
+       /* scratchpad is only generated for CTR and Hash */
+       if (drbg->core->flags & DRBG_HMAC)
+               sb_size = 0;
+       else if (drbg->core->flags & DRBG_CTR)
+               sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg) + /* temp */
+                         drbg_statelen(drbg) + /* df_data */
+                         drbg_blocklen(drbg) + /* pad */
+                         drbg_blocklen(drbg) + /* iv */
+                         drbg_statelen(drbg) + drbg_blocklen(drbg); /* temp */
+       else
+               sb_size = drbg_statelen(drbg) + drbg_blocklen(drbg);
+
+       if (0 < sb_size) {
+               drbg->scratchpad = kzalloc(sb_size, GFP_KERNEL);
+               if (!drbg->scratchpad)
+                       goto err;
+       }
+       spin_lock_init(&drbg->drbg_lock);
+       return 0;
+
+err:
+       drbg_dealloc_state(drbg);
+       return ret;
+}
+
+/*
+ * Strategy to avoid holding long term locks: generate a shadow copy of DRBG
+ * and perform all operations on this shadow copy. After finishing, restore
+ * the updated state of the shadow copy into original drbg state. This way,
+ * only the read and write operations of the original drbg state must be
+ * locked
+ */
+static inline void drbg_copy_drbg(struct drbg_state *src,
+                                 struct drbg_state *dst)
+{
+       if (!src || !dst)
+               return;
+       memcpy(dst->V, src->V, drbg_statelen(src));
+       memcpy(dst->C, src->C, drbg_statelen(src));
+       dst->reseed_ctr = src->reseed_ctr;
+       dst->seeded = src->seeded;
+       dst->pr = src->pr;
+#ifdef CONFIG_CRYPTO_FIPS
+       dst->fips_primed = src->fips_primed;
+       memcpy(dst->prev, src->prev, drbg_blocklen(src));
+#endif
+       /*
+        * Not copied:
+        * scratchpad is initialized drbg_alloc_state;
+        * priv_data is initialized with call to crypto_init;
+        * d_ops and core are set outside, as these parameters are const;
+        * test_data is set outside to prevent it being copied back.
+        */
+}
+
+static int drbg_make_shadow(struct drbg_state *drbg, struct drbg_state **shadow)
+{
+       int ret = -ENOMEM;
+       struct drbg_state *tmp = NULL;
+
+       if (!drbg || !drbg->core || !drbg->V || !drbg->C) {
+               pr_devel("DRBG: attempt to generate shadow copy for "
+                        "uninitialized DRBG state rejected\n");
+               return -EINVAL;
+       }
+       /* HMAC does not have a scratchpad */
+       if (!(drbg->core->flags & DRBG_HMAC) && NULL == drbg->scratchpad)
+               return -EINVAL;
+
+       tmp = kzalloc(sizeof(struct drbg_state), GFP_KERNEL);
+       if (!tmp)
+               return -ENOMEM;
+
+       /* read-only data as they are defined as const, no lock needed */
+       tmp->core = drbg->core;
+       tmp->d_ops = drbg->d_ops;
+
+       ret = drbg_alloc_state(tmp);
+       if (ret)
+               goto err;
+
+       spin_lock_bh(&drbg->drbg_lock);
+       drbg_copy_drbg(drbg, tmp);
+       /* only make a link to the test buffer, as we only read that data */
+       tmp->test_data = drbg->test_data;
+       spin_unlock_bh(&drbg->drbg_lock);
+       *shadow = tmp;
+       return 0;
+
+err:
+       if (tmp)
+               kzfree(tmp);
+       return ret;
+}
+
+static void drbg_restore_shadow(struct drbg_state *drbg,
+                               struct drbg_state **shadow)
+{
+       struct drbg_state *tmp = *shadow;
+
+       spin_lock_bh(&drbg->drbg_lock);
+       drbg_copy_drbg(tmp, drbg);
+       spin_unlock_bh(&drbg->drbg_lock);
+       drbg_dealloc_state(tmp);
+       kzfree(tmp);
+       *shadow = NULL;
+}
+
+/*************************************************************************
+ * DRBG interface functions
+ *************************************************************************/
+
+/*
+ * DRBG generate function as required by SP800-90A - this function
+ * generates random numbers
+ *
+ * @drbg DRBG state handle
+ * @buf Buffer where to store the random numbers -- the buffer must already
+ *      be pre-allocated by caller
+ * @buflen Length of output buffer - this value defines the number of random
+ *        bytes pulled from DRBG
+ * @addtl Additional input that is mixed into state, may be NULL -- note
+ *       the entropy is pulled by the DRBG internally unconditionally
+ *       as defined in SP800-90A. The additional input is mixed into
+ *       the state in addition to the pulled entropy.
+ *
+ * return: generated number of bytes
+ */
+static int drbg_generate(struct drbg_state *drbg,
+                        unsigned char *buf, unsigned int buflen,
+                        struct drbg_string *addtl)
+{
+       int len = 0;
+       struct drbg_state *shadow = NULL;
+       LIST_HEAD(addtllist);
+       struct drbg_string timestamp;
+       union {
+               cycles_t cycles;
+               unsigned char char_cycles[sizeof(cycles_t)];
+       } now;
+
+       if (0 == buflen || !buf) {
+               pr_devel("DRBG: no output buffer provided\n");
+               return -EINVAL;
+       }
+       if (addtl && NULL == addtl->buf && 0 < addtl->len) {
+               pr_devel("DRBG: wrong format of additional information\n");
+               return -EINVAL;
+       }
+
+       len = drbg_make_shadow(drbg, &shadow);
+       if (len) {
+               pr_devel("DRBG: shadow copy cannot be generated\n");
+               return len;
+       }
+
+       /* 9.3.1 step 2 */
+       len = -EINVAL;
+       if (buflen > (drbg_max_request_bytes(shadow))) {
+               pr_devel("DRBG: requested random numbers too large %u\n",
+                        buflen);
+               goto err;
+       }
+
+       /* 9.3.1 step 3 is implicit with the chosen DRBG */
+
+       /* 9.3.1 step 4 */
+       if (addtl && addtl->len > (drbg_max_addtl(shadow))) {
+               pr_devel("DRBG: additional information string too long %zu\n",
+                        addtl->len);
+               goto err;
+       }
+       /* 9.3.1 step 5 is implicit with the chosen DRBG */
+
+       /*
+        * 9.3.1 step 6 and 9 supplemented by 9.3.2 step c is implemented
+        * here. The spec is a bit convoluted here, we make it simpler.
+        */
+       if ((drbg_max_requests(shadow)) < shadow->reseed_ctr)
+               shadow->seeded = false;
+
+       /* allocate cipher handle */
+       if (shadow->d_ops->crypto_init) {
+               len = shadow->d_ops->crypto_init(shadow);
+               if (len)
+                       goto err;
+       }
+
+       if (shadow->pr || !shadow->seeded) {
+               pr_devel("DRBG: reseeding before generation (prediction "
+                        "resistance: %s, state %s)\n",
+                        drbg->pr ? "true" : "false",
+                        drbg->seeded ? "seeded" : "unseeded");
+               /* 9.3.1 steps 7.1 through 7.3 */
+               len = drbg_seed(shadow, addtl, true);
+               if (len)
+                       goto err;
+               /* 9.3.1 step 7.4 */
+               addtl = NULL;
+       }
+
+       /*
+        * Mix the time stamp into the DRBG state if the DRBG is not in
+        * test mode. If there are two callers invoking the DRBG at the same
+        * time, i.e. before the first caller merges its shadow state back,
+        * both callers would obtain the same random number stream without
+        * changing the state here.
+        */
+       if (!drbg->test_data) {
+               now.cycles = random_get_entropy();
+               drbg_string_fill(&timestamp, now.char_cycles, sizeof(cycles_t));
+               list_add_tail(&timestamp.list, &addtllist);
+       }
+       if (addtl && 0 < addtl->len)
+               list_add_tail(&addtl->list, &addtllist);
+       /* 9.3.1 step 8 and 10 */
+       len = shadow->d_ops->generate(shadow, buf, buflen, &addtllist);
+
+       /* 10.1.1.4 step 6, 10.1.2.5 step 7, 10.2.1.5.2 step 7 */
+       shadow->reseed_ctr++;
+       if (0 >= len)
+               goto err;
+
+       /*
+        * Section 11.3.3 requires to re-perform self tests after some
+        * generated random numbers. The chosen value after which self
+        * test is performed is arbitrary, but it should be reasonable.
+        * However, we do not perform the self tests because of the following
+        * reasons: it is mathematically impossible that the initial self tests
+        * were successfully and the following are not. If the initial would
+        * pass and the following would not, the kernel integrity is violated.
+        * In this case, the entire kernel operation is questionable and it
+        * is unlikely that the integrity violation only affects the
+        * correct operation of the DRBG.
+        *
+        * Albeit the following code is commented out, it is provided in
+        * case somebody has a need to implement the test of 11.3.3.
+        */
+#if 0
+       if (shadow->reseed_ctr && !(shadow->reseed_ctr % 4096)) {
+               int err = 0;
+               pr_devel("DRBG: start to perform self test\n");
+               if (drbg->core->flags & DRBG_HMAC)
+                       err = alg_test("drbg_pr_hmac_sha256",
+                                      "drbg_pr_hmac_sha256", 0, 0);
+               else if (drbg->core->flags & DRBG_CTR)
+                       err = alg_test("drbg_pr_ctr_aes128",
+                                      "drbg_pr_ctr_aes128", 0, 0);
+               else
+                       err = alg_test("drbg_pr_sha256",
+                                      "drbg_pr_sha256", 0, 0);
+               if (err) {
+                       pr_err("DRBG: periodical self test failed\n");
+                       /*
+                        * uninstantiate implies that from now on, only errors
+                        * are returned when reusing this DRBG cipher handle
+                        */
+                       drbg_uninstantiate(drbg);
+                       drbg_dealloc_state(shadow);
+                       kzfree(shadow);
+                       return 0;
+               } else {
+                       pr_devel("DRBG: self test successful\n");
+               }
+       }
+#endif
+
+err:
+       if (shadow->d_ops->crypto_fini)
+               shadow->d_ops->crypto_fini(shadow);
+       drbg_restore_shadow(drbg, &shadow);
+       return len;
+}
+
+/*
+ * Wrapper around drbg_generate which can pull arbitrary long strings
+ * from the DRBG without hitting the maximum request limitation.
+ *
+ * Parameters: see drbg_generate
+ * Return codes: see drbg_generate -- if one drbg_generate request fails,
+ *              the entire drbg_generate_long request fails
+ */
+static int drbg_generate_long(struct drbg_state *drbg,
+                             unsigned char *buf, unsigned int buflen,
+                             struct drbg_string *addtl)
+{
+       int len = 0;
+       unsigned int slice = 0;
+       do {
+               int tmplen = 0;
+               unsigned int chunk = 0;
+               slice = ((buflen - len) / drbg_max_request_bytes(drbg));
+               chunk = slice ? drbg_max_request_bytes(drbg) : (buflen - len);
+               tmplen = drbg_generate(drbg, buf + len, chunk, addtl);
+               if (0 >= tmplen)
+                       return tmplen;
+               len += tmplen;
+       } while (slice > 0 && (len < buflen));
+       return len;
+}
+
+/*
+ * DRBG instantiation function as required by SP800-90A - this function
+ * sets up the DRBG handle, performs the initial seeding and all sanity
+ * checks required by SP800-90A
+ *
+ * @drbg memory of state -- if NULL, new memory is allocated
+ * @pers Personalization string that is mixed into state, may be NULL -- note
+ *      the entropy is pulled by the DRBG internally unconditionally
+ *      as defined in SP800-90A. The additional input is mixed into
+ *      the state in addition to the pulled entropy.
+ * @coreref reference to core
+ * @pr prediction resistance enabled
+ *
+ * return
+ *     0 on success
+ *     error value otherwise
+ */
+static int drbg_instantiate(struct drbg_state *drbg, struct drbg_string *pers,
+                           int coreref, bool pr)
+{
+       int ret = -ENOMEM;
+
+       pr_devel("DRBG: Initializing DRBG core %d with prediction resistance "
+                "%s\n", coreref, pr ? "enabled" : "disabled");
+       drbg->core = &drbg_cores[coreref];
+       drbg->pr = pr;
+       drbg->seeded = false;
+       switch (drbg->core->flags & DRBG_TYPE_MASK) {
+#ifdef CONFIG_CRYPTO_DRBG_HMAC
+       case DRBG_HMAC:
+               drbg->d_ops = &drbg_hmac_ops;
+               break;
+#endif /* CONFIG_CRYPTO_DRBG_HMAC */
+#ifdef CONFIG_CRYPTO_DRBG_HASH
+       case DRBG_HASH:
+               drbg->d_ops = &drbg_hash_ops;
+               break;
+#endif /* CONFIG_CRYPTO_DRBG_HASH */
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+       case DRBG_CTR:
+               drbg->d_ops = &drbg_ctr_ops;
+               break;
+#endif /* CONFIG_CRYPTO_DRBG_CTR */
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       /* 9.1 step 1 is implicit with the selected DRBG type */
+
+       /*
+        * 9.1 step 2 is implicit as caller can select prediction resistance
+        * and the flag is copied into drbg->flags --
+        * all DRBG types support prediction resistance
+        */
+
+       /* 9.1 step 4 is implicit in  drbg_sec_strength */
+
+       ret = drbg_alloc_state(drbg);
+       if (ret)
+               return ret;
+
+       ret = -EFAULT;
+       if (drbg->d_ops->crypto_init && drbg->d_ops->crypto_init(drbg))
+               goto err;
+       ret = drbg_seed(drbg, pers, false);
+       if (drbg->d_ops->crypto_fini)
+               drbg->d_ops->crypto_fini(drbg);
+       if (ret)
+               goto err;
+
+       return 0;
+
+err:
+       drbg_dealloc_state(drbg);
+       return ret;
+}
+
+/*
+ * DRBG uninstantiate function as required by SP800-90A - this function
+ * frees all buffers and the DRBG handle
+ *
+ * @drbg DRBG state handle
+ *
+ * return
+ *     0 on success
+ */
+static int drbg_uninstantiate(struct drbg_state *drbg)
+{
+       spin_lock_bh(&drbg->drbg_lock);
+       drbg_dealloc_state(drbg);
+       /* no scrubbing of test_data -- this shall survive an uninstantiate */
+       spin_unlock_bh(&drbg->drbg_lock);
+       return 0;
+}
+
+/*
+ * Helper function for setting the test data in the DRBG
+ *
+ * @drbg DRBG state handle
+ * @test_data test data to sets
+ */
+static inline void drbg_set_testdata(struct drbg_state *drbg,
+                                    struct drbg_test_data *test_data)
+{
+       if (!test_data || !test_data->testentropy)
+               return;
+       spin_lock_bh(&drbg->drbg_lock);
+       drbg->test_data = test_data;
+       spin_unlock_bh(&drbg->drbg_lock);
+}
+
+/***************************************************************
+ * Kernel crypto API cipher invocations requested by DRBG
+ ***************************************************************/
+
+#if defined(CONFIG_CRYPTO_DRBG_HASH) || defined(CONFIG_CRYPTO_DRBG_HMAC)
+struct sdesc {
+       struct shash_desc shash;
+       char ctx[];
+};
+
+static int drbg_init_hash_kernel(struct drbg_state *drbg)
+{
+       struct sdesc *sdesc;
+       struct crypto_shash *tfm;
+
+       tfm = crypto_alloc_shash(drbg->core->backend_cra_name, 0, 0);
+       if (IS_ERR(tfm)) {
+               pr_info("DRBG: could not allocate digest TFM handle\n");
+               return PTR_ERR(tfm);
+       }
+       BUG_ON(drbg_blocklen(drbg) != crypto_shash_digestsize(tfm));
+       sdesc = kzalloc(sizeof(struct shash_desc) + crypto_shash_descsize(tfm),
+                       GFP_KERNEL);
+       if (!sdesc) {
+               crypto_free_shash(tfm);
+               return -ENOMEM;
+       }
+
+       sdesc->shash.tfm = tfm;
+       sdesc->shash.flags = 0;
+       drbg->priv_data = sdesc;
+       return 0;
+}
+
+static int drbg_fini_hash_kernel(struct drbg_state *drbg)
+{
+       struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
+       if (sdesc) {
+               crypto_free_shash(sdesc->shash.tfm);
+               kzfree(sdesc);
+       }
+       drbg->priv_data = NULL;
+       return 0;
+}
+
+static int drbg_kcapi_hash(struct drbg_state *drbg, const unsigned char *key,
+                          unsigned char *outval, const struct list_head *in)
+{
+       struct sdesc *sdesc = (struct sdesc *)drbg->priv_data;
+       struct drbg_string *input = NULL;
+
+       if (key)
+               crypto_shash_setkey(sdesc->shash.tfm, key, drbg_statelen(drbg));
+       crypto_shash_init(&sdesc->shash);
+       list_for_each_entry(input, in, list)
+               crypto_shash_update(&sdesc->shash, input->buf, input->len);
+       return crypto_shash_final(&sdesc->shash, outval);
+}
+#endif /* (CONFIG_CRYPTO_DRBG_HASH || CONFIG_CRYPTO_DRBG_HMAC) */
+
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+static int drbg_init_sym_kernel(struct drbg_state *drbg)
+{
+       int ret = 0;
+       struct crypto_blkcipher *tfm;
+
+       tfm = crypto_alloc_blkcipher(drbg->core->backend_cra_name, 0, 0);
+       if (IS_ERR(tfm)) {
+               pr_info("DRBG: could not allocate cipher TFM handle\n");
+               return PTR_ERR(tfm);
+       }
+       BUG_ON(drbg_blocklen(drbg) != crypto_blkcipher_blocksize(tfm));
+       drbg->priv_data = tfm;
+       return ret;
+}
+
+static int drbg_fini_sym_kernel(struct drbg_state *drbg)
+{
+       struct crypto_blkcipher *tfm =
+               (struct crypto_blkcipher *)drbg->priv_data;
+       if (tfm)
+               crypto_free_blkcipher(tfm);
+       drbg->priv_data = NULL;
+       return 0;
+}
+
+static int drbg_kcapi_sym(struct drbg_state *drbg, const unsigned char *key,
+                         unsigned char *outval, const struct drbg_string *in)
+{
+       int ret = 0;
+       struct scatterlist sg_in, sg_out;
+       struct blkcipher_desc desc;
+       struct crypto_blkcipher *tfm =
+               (struct crypto_blkcipher *)drbg->priv_data;
+
+       desc.tfm = tfm;
+       desc.flags = 0;
+       crypto_blkcipher_setkey(tfm, key, (drbg_keylen(drbg)));
+       /* there is only component in *in */
+       sg_init_one(&sg_in, in->buf, in->len);
+       sg_init_one(&sg_out, outval, drbg_blocklen(drbg));
+       ret = crypto_blkcipher_encrypt(&desc, &sg_out, &sg_in, in->len);
+
+       return ret;
+}
+#endif /* CONFIG_CRYPTO_DRBG_CTR */
+
+/***************************************************************
+ * Kernel crypto API interface to register DRBG
+ ***************************************************************/
+
+/*
+ * Look up the DRBG flags by given kernel crypto API cra_name
+ * The code uses the drbg_cores definition to do this
+ *
+ * @cra_name kernel crypto API cra_name
+ * @coreref reference to integer which is filled with the pointer to
+ *  the applicable core
+ * @pr reference for setting prediction resistance
+ *
+ * return: flags
+ */
+static inline void drbg_convert_tfm_core(const char *cra_driver_name,
+                                        int *coreref, bool *pr)
+{
+       int i = 0;
+       size_t start = 0;
+       int len = 0;
+
+       *pr = true;
+       /* disassemble the names */
+       if (!memcmp(cra_driver_name, "drbg_nopr_", 10)) {
+               start = 10;
+               *pr = false;
+       } else if (!memcmp(cra_driver_name, "drbg_pr_", 8)) {
+               start = 8;
+       } else {
+               return;
+       }
+
+       /* remove the first part */
+       len = strlen(cra_driver_name) - start;
+       for (i = 0; ARRAY_SIZE(drbg_cores) > i; i++) {
+               if (!memcmp(cra_driver_name + start, drbg_cores[i].cra_name,
+                           len)) {
+                       *coreref = i;
+                       return;
+               }
+       }
+}
+
+static int drbg_kcapi_init(struct crypto_tfm *tfm)
+{
+       struct drbg_state *drbg = crypto_tfm_ctx(tfm);
+       bool pr = false;
+       int coreref = 0;
+
+       drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm), &coreref, &pr);
+       /*
+        * when personalization string is needed, the caller must call reset
+        * and provide the personalization string as seed information
+        */
+       return drbg_instantiate(drbg, NULL, coreref, pr);
+}
+
+static void drbg_kcapi_cleanup(struct crypto_tfm *tfm)
+{
+       drbg_uninstantiate(crypto_tfm_ctx(tfm));
+}
+
+/*
+ * Generate random numbers invoked by the kernel crypto API:
+ * The API of the kernel crypto API is extended as follows:
+ *
+ * If dlen is larger than zero, rdata is interpreted as the output buffer
+ * where random data is to be stored.
+ *
+ * If dlen is zero, rdata is interpreted as a pointer to a struct drbg_gen
+ * which holds the additional information string that is used for the
+ * DRBG generation process. The output buffer that is to be used to store
+ * data is also pointed to by struct drbg_gen.
+ */
+static int drbg_kcapi_random(struct crypto_rng *tfm, u8 *rdata,
+                            unsigned int dlen)
+{
+       struct drbg_state *drbg = crypto_rng_ctx(tfm);
+       if (0 < dlen) {
+               return drbg_generate_long(drbg, rdata, dlen, NULL);
+       } else {
+               struct drbg_gen *data = (struct drbg_gen *)rdata;
+               struct drbg_string addtl;
+               /* catch NULL pointer */
+               if (!data)
+                       return 0;
+               drbg_set_testdata(drbg, data->test_data);
+               /* linked list variable is now local to allow modification */
+               drbg_string_fill(&addtl, data->addtl->buf, data->addtl->len);
+               return drbg_generate_long(drbg, data->outbuf, data->outlen,
+                                         &addtl);
+       }
+}
+
+/*
+ * Reset the DRBG invoked by the kernel crypto API
+ * The reset implies a full re-initialization of the DRBG. Similar to the
+ * generate function of drbg_kcapi_random, this function extends the
+ * kernel crypto API interface with struct drbg_gen
+ */
+static int drbg_kcapi_reset(struct crypto_rng *tfm, u8 *seed, unsigned int slen)
+{
+       struct drbg_state *drbg = crypto_rng_ctx(tfm);
+       struct crypto_tfm *tfm_base = crypto_rng_tfm(tfm);
+       bool pr = false;
+       struct drbg_string seed_string;
+       int coreref = 0;
+
+       drbg_uninstantiate(drbg);
+       drbg_convert_tfm_core(crypto_tfm_alg_driver_name(tfm_base), &coreref,
+                             &pr);
+       if (0 < slen) {
+               drbg_string_fill(&seed_string, seed, slen);
+               return drbg_instantiate(drbg, &seed_string, coreref, pr);
+       } else {
+               struct drbg_gen *data = (struct drbg_gen *)seed;
+               /* allow invocation of API call with NULL, 0 */
+               if (!data)
+                       return drbg_instantiate(drbg, NULL, coreref, pr);
+               drbg_set_testdata(drbg, data->test_data);
+               /* linked list variable is now local to allow modification */
+               drbg_string_fill(&seed_string, data->addtl->buf,
+                                data->addtl->len);
+               return drbg_instantiate(drbg, &seed_string, coreref, pr);
+       }
+}
+
+/***************************************************************
+ * Kernel module: code to load the module
+ ***************************************************************/
+
+/*
+ * Tests as defined in 11.3.2 in addition to the cipher tests: testing
+ * of the error handling.
+ *
+ * Note: testing of failing seed source as defined in 11.3.2 is not applicable
+ * as seed source of get_random_bytes does not fail.
+ *
+ * Note 2: There is no sensible way of testing the reseed counter
+ * enforcement, so skip it.
+ */
+static inline int __init drbg_healthcheck_sanity(void)
+{
+#ifdef CONFIG_CRYPTO_FIPS
+       int len = 0;
+#define OUTBUFLEN 16
+       unsigned char buf[OUTBUFLEN];
+       struct drbg_state *drbg = NULL;
+       int ret = -EFAULT;
+       int rc = -EFAULT;
+       bool pr = false;
+       int coreref = 0;
+       struct drbg_string addtl;
+       size_t max_addtllen, max_request_bytes;
+
+       /* only perform test in FIPS mode */
+       if (!fips_enabled)
+               return 0;
+
+#ifdef CONFIG_CRYPTO_DRBG_CTR
+       drbg_convert_tfm_core("drbg_nopr_ctr_aes128", &coreref, &pr);
+#elif defined CONFIG_CRYPTO_DRBG_HASH
+       drbg_convert_tfm_core("drbg_nopr_sha256", &coreref, &pr);
+#else
+       drbg_convert_tfm_core("drbg_nopr_hmac_sha256", &coreref, &pr);
+#endif
+
+       drbg = kzalloc(sizeof(struct drbg_state), GFP_KERNEL);
+       if (!drbg)
+               return -ENOMEM;
+
+       /*
+        * if the following tests fail, it is likely that there is a buffer
+        * overflow as buf is much smaller than the requested or provided
+        * string lengths -- in case the error handling does not succeed
+        * we may get an OOPS. And we want to get an OOPS as this is a
+        * grave bug.
+        */
+
+       /* get a valid instance of DRBG for following tests */
+       ret = drbg_instantiate(drbg, NULL, coreref, pr);
+       if (ret) {
+               rc = ret;
+               goto outbuf;
+       }
+       max_addtllen = drbg_max_addtl(drbg);
+       max_request_bytes = drbg_max_request_bytes(drbg);
+       drbg_string_fill(&addtl, buf, max_addtllen + 1);
+       /* overflow addtllen with additonal info string */
+       len = drbg_generate(drbg, buf, OUTBUFLEN, &addtl);
+       BUG_ON(0 < len);
+       /* overflow max_bits */
+       len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
+       BUG_ON(0 < len);
+       drbg_uninstantiate(drbg);
+
+       /* overflow max addtllen with personalization string */
+       ret = drbg_instantiate(drbg, &addtl, coreref, pr);
+       BUG_ON(0 == ret);
+       /* test uninstantated DRBG */
+       len = drbg_generate(drbg, buf, (max_request_bytes + 1), NULL);
+       BUG_ON(0 < len);
+       /* all tests passed */
+       rc = 0;
+
+       pr_devel("DRBG: Sanity tests for failure code paths successfully "
+                "completed\n");
+
+       drbg_uninstantiate(drbg);
+outbuf:
+       kzfree(drbg);
+       return rc;
+#else /* CONFIG_CRYPTO_FIPS */
+       return 0;
+#endif /* CONFIG_CRYPTO_FIPS */
+}
+
+static struct crypto_alg drbg_algs[22];
+
+/*
+ * Fill the array drbg_algs used to register the different DRBGs
+ * with the kernel crypto API. To fill the array, the information
+ * from drbg_cores[] is used.
+ */
+static inline void __init drbg_fill_array(struct crypto_alg *alg,
+                                         const struct drbg_core *core, int pr)
+{
+       int pos = 0;
+       static int priority = 100;
+
+       memset(alg, 0, sizeof(struct crypto_alg));
+       memcpy(alg->cra_name, "stdrng", 6);
+       if (pr) {
+               memcpy(alg->cra_driver_name, "drbg_pr_", 8);
+               pos = 8;
+       } else {
+               memcpy(alg->cra_driver_name, "drbg_nopr_", 10);
+               pos = 10;
+       }
+       memcpy(alg->cra_driver_name + pos, core->cra_name,
+              strlen(core->cra_name));
+
+       alg->cra_priority = priority;
+       priority++;
+       /*
+        * If FIPS mode enabled, the selected DRBG shall have the
+        * highest cra_priority over other stdrng instances to ensure
+        * it is selected.
+        */
+       if (fips_enabled)
+               alg->cra_priority += 200;
+
+       alg->cra_flags          = CRYPTO_ALG_TYPE_RNG;
+       alg->cra_ctxsize        = sizeof(struct drbg_state);
+       alg->cra_type           = &crypto_rng_type;
+       alg->cra_module         = THIS_MODULE;
+       alg->cra_init           = drbg_kcapi_init;
+       alg->cra_exit           = drbg_kcapi_cleanup;
+       alg->cra_u.rng.rng_make_random  = drbg_kcapi_random;
+       alg->cra_u.rng.rng_reset        = drbg_kcapi_reset;
+       alg->cra_u.rng.seedsize = 0;
+}
+
+static int __init drbg_init(void)
+{
+       unsigned int i = 0; /* pointer to drbg_algs */
+       unsigned int j = 0; /* pointer to drbg_cores */
+       int ret = -EFAULT;
+
+       ret = drbg_healthcheck_sanity();
+       if (ret)
+               return ret;
+
+       if (ARRAY_SIZE(drbg_cores) * 2 > ARRAY_SIZE(drbg_algs)) {
+               pr_info("DRBG: Cannot register all DRBG types"
+                       "(slots needed: %zu, slots available: %zu)\n",
+                       ARRAY_SIZE(drbg_cores) * 2, ARRAY_SIZE(drbg_algs));
+               return ret;
+       }
+
+       /*
+        * each DRBG definition can be used with PR and without PR, thus
+        * we instantiate each DRBG in drbg_cores[] twice.
+        *
+        * As the order of placing them into the drbg_algs array matters
+        * (the later DRBGs receive a higher cra_priority) we register the
+        * prediction resistance DRBGs first as the should not be too
+        * interesting.
+        */
+       for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++)
+               drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 1);
+       for (j = 0; ARRAY_SIZE(drbg_cores) > j; j++, i++)
+               drbg_fill_array(&drbg_algs[i], &drbg_cores[j], 0);
+       return crypto_register_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
+}
+
+static void __exit drbg_exit(void)
+{
+       crypto_unregister_algs(drbg_algs, (ARRAY_SIZE(drbg_cores) * 2));
+}
+
+module_init(drbg_init);
+module_exit(drbg_exit);
+#ifndef CRYPTO_DRBG_HASH_STRING
+#define CRYPTO_DRBG_HASH_STRING ""
+#endif
+#ifndef CRYPTO_DRBG_HMAC_STRING
+#define CRYPTO_DRBG_HMAC_STRING ""
+#endif
+#ifndef CRYPTO_DRBG_CTR_STRING
+#define CRYPTO_DRBG_CTR_STRING ""
+#endif
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Stephan Mueller <smueller@chronox.de>");
+MODULE_DESCRIPTION("NIST SP800-90A Deterministic Random Bit Generator (DRBG) "
+                  "using following cores: "
+                  CRYPTO_DRBG_HASH_STRING
+                  CRYPTO_DRBG_HMAC_STRING
+                  CRYPTO_DRBG_CTR_STRING);
index 42ce9f5..bf7ab4a 100644 (file)
@@ -68,7 +68,7 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
        struct eseqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
        struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req);
        struct ablkcipher_request *subreq;
-       crypto_completion_t complete;
+       crypto_completion_t compl;
        void *data;
        struct scatterlist *osrc, *odst;
        struct scatterlist *dst;
@@ -86,7 +86,7 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
        ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
 
        giv = req->giv;
-       complete = req->creq.base.complete;
+       compl = req->creq.base.complete;
        data = req->creq.base.data;
 
        osrc = req->creq.src;
@@ -101,11 +101,11 @@ static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req)
        if (vsrc != giv + ivsize && vdst != giv + ivsize) {
                giv = PTR_ALIGN((u8 *)reqctx->tail,
                                crypto_ablkcipher_alignmask(geniv) + 1);
-               complete = eseqiv_complete;
+               compl = eseqiv_complete;
                data = req;
        }
 
-       ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
+       ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
                                        data);
 
        sg_init_table(reqctx->src, 2);
index b4f0179..276cdac 100644 (file)
@@ -228,14 +228,14 @@ static void gcm_hash_final_done(struct crypto_async_request *areq, int err);
 
 static int gcm_hash_update(struct aead_request *req,
                           struct crypto_gcm_req_priv_ctx *pctx,
-                          crypto_completion_t complete,
+                          crypto_completion_t compl,
                           struct scatterlist *src,
                           unsigned int len)
 {
        struct ahash_request *ahreq = &pctx->u.ahreq;
 
        ahash_request_set_callback(ahreq, aead_request_flags(req),
-                                  complete, req);
+                                  compl, req);
        ahash_request_set_crypt(ahreq, src, NULL, len);
 
        return crypto_ahash_update(ahreq);
@@ -244,12 +244,12 @@ static int gcm_hash_update(struct aead_request *req,
 static int gcm_hash_remain(struct aead_request *req,
                           struct crypto_gcm_req_priv_ctx *pctx,
                           unsigned int remain,
-                          crypto_completion_t complete)
+                          crypto_completion_t compl)
 {
        struct ahash_request *ahreq = &pctx->u.ahreq;
 
        ahash_request_set_callback(ahreq, aead_request_flags(req),
-                                  complete, req);
+                                  compl, req);
        sg_init_one(pctx->src, gcm_zeroes, remain);
        ahash_request_set_crypt(ahreq, pctx->src, NULL, remain);
 
@@ -375,14 +375,14 @@ static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err)
 {
        struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
        struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
-       crypto_completion_t complete;
+       crypto_completion_t compl;
        unsigned int remain = 0;
 
        if (!err && gctx->cryptlen) {
                remain = gcm_remain(gctx->cryptlen);
-               complete = remain ? gcm_hash_crypt_done :
+               compl = remain ? gcm_hash_crypt_done :
                        gcm_hash_crypt_remain_done;
-               err = gcm_hash_update(req, pctx, complete,
+               err = gcm_hash_update(req, pctx, compl,
                                      gctx->src, gctx->cryptlen);
                if (err == -EINPROGRESS || err == -EBUSY)
                        return;
@@ -429,14 +429,14 @@ static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err)
 static void __gcm_hash_init_done(struct aead_request *req, int err)
 {
        struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
-       crypto_completion_t complete;
+       crypto_completion_t compl;
        unsigned int remain = 0;
 
        if (!err && req->assoclen) {
                remain = gcm_remain(req->assoclen);
-               complete = remain ? gcm_hash_assoc_done :
+               compl = remain ? gcm_hash_assoc_done :
                        gcm_hash_assoc_remain_done;
-               err = gcm_hash_update(req, pctx, complete,
+               err = gcm_hash_update(req, pctx, compl,
                                      req->assoc, req->assoclen);
                if (err == -EINPROGRESS || err == -EBUSY)
                        return;
@@ -462,7 +462,7 @@ static int gcm_hash(struct aead_request *req,
        struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
        struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
        unsigned int remain;
-       crypto_completion_t complete;
+       crypto_completion_t compl;
        int err;
 
        ahash_request_set_tfm(ahreq, ctx->ghash);
@@ -473,8 +473,8 @@ static int gcm_hash(struct aead_request *req,
        if (err)
                return err;
        remain = gcm_remain(req->assoclen);
-       complete = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done;
-       err = gcm_hash_update(req, pctx, complete, req->assoc, req->assoclen);
+       compl = remain ? gcm_hash_assoc_done : gcm_hash_assoc_remain_done;
+       err = gcm_hash_update(req, pctx, compl, req->assoc, req->assoclen);
        if (err)
                return err;
        if (remain) {
@@ -484,8 +484,8 @@ static int gcm_hash(struct aead_request *req,
                        return err;
        }
        remain = gcm_remain(gctx->cryptlen);
-       complete = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done;
-       err = gcm_hash_update(req, pctx, complete, gctx->src, gctx->cryptlen);
+       compl = remain ? gcm_hash_crypt_done : gcm_hash_crypt_remain_done;
+       err = gcm_hash_update(req, pctx, compl, gctx->src, gctx->cryptlen);
        if (err)
                return err;
        if (remain) {
index 1c2aa69..a8ff2f7 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/module.h>
 #include <linux/crypto.h>
 #include <linux/vmalloc.h>
+#include <linux/mm.h>
 #include <linux/lzo.h>
 
 struct lzo_ctx {
@@ -30,7 +31,10 @@ static int lzo_init(struct crypto_tfm *tfm)
 {
        struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
+       ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS,
+                                   GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
+       if (!ctx->lzo_comp_mem)
+               ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS);
        if (!ctx->lzo_comp_mem)
                return -ENOMEM;
 
@@ -41,7 +45,7 @@ static void lzo_exit(struct crypto_tfm *tfm)
 {
        struct lzo_ctx *ctx = crypto_tfm_ctx(tfm);
 
-       vfree(ctx->lzo_comp_mem);
+       kvfree(ctx->lzo_comp_mem);
 }
 
 static int lzo_compress(struct crypto_tfm *tfm, const u8 *src,
index f2cba4e..ee190fc 100644 (file)
@@ -100,7 +100,7 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
        struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req);
        struct seqiv_ctx *ctx = crypto_ablkcipher_ctx(geniv);
        struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req);
-       crypto_completion_t complete;
+       crypto_completion_t compl;
        void *data;
        u8 *info;
        unsigned int ivsize;
@@ -108,7 +108,7 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
 
        ablkcipher_request_set_tfm(subreq, skcipher_geniv_cipher(geniv));
 
-       complete = req->creq.base.complete;
+       compl = req->creq.base.complete;
        data = req->creq.base.data;
        info = req->creq.info;
 
@@ -122,11 +122,11 @@ static int seqiv_givencrypt(struct skcipher_givcrypt_request *req)
                if (!info)
                        return -ENOMEM;
 
-               complete = seqiv_complete;
+               compl = seqiv_complete;
                data = req;
        }
 
-       ablkcipher_request_set_callback(subreq, req->creq.base.flags, complete,
+       ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl,
                                        data);
        ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst,
                                     req->creq.nbytes, info);
@@ -146,7 +146,7 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
        struct seqiv_ctx *ctx = crypto_aead_ctx(geniv);
        struct aead_request *areq = &req->areq;
        struct aead_request *subreq = aead_givcrypt_reqctx(req);
-       crypto_completion_t complete;
+       crypto_completion_t compl;
        void *data;
        u8 *info;
        unsigned int ivsize;
@@ -154,7 +154,7 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
 
        aead_request_set_tfm(subreq, aead_geniv_base(geniv));
 
-       complete = areq->base.complete;
+       compl = areq->base.complete;
        data = areq->base.data;
        info = areq->iv;
 
@@ -168,11 +168,11 @@ static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req)
                if (!info)
                        return -ENOMEM;
 
-               complete = seqiv_aead_complete;
+               compl = seqiv_aead_complete;
                data = req;
        }
 
-       aead_request_set_callback(subreq, areq->base.flags, complete, data);
+       aead_request_set_callback(subreq, areq->base.flags, compl, data);
        aead_request_set_crypt(subreq, areq->src, areq->dst, areq->cryptlen,
                               info);
        aead_request_set_assoc(subreq, areq->assoc, areq->assoclen);
index ba247cf..890449e 100644 (file)
 #define ENCRYPT 1
 #define DECRYPT 0
 
+/*
+ * return a string with the driver name
+ */
+#define get_driver_name(tfm_type, tfm) crypto_tfm_alg_driver_name(tfm_type ## _tfm(tfm))
+
 /*
  * Used by test_cipher_speed()
  */
@@ -68,13 +73,13 @@ static char *check[] = {
 };
 
 static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
-                              struct scatterlist *sg, int blen, int sec)
+                              struct scatterlist *sg, int blen, int secs)
 {
        unsigned long start, end;
        int bcount;
        int ret;
 
-       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+       for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                if (enc)
                        ret = crypto_blkcipher_encrypt(desc, sg, sg, blen);
@@ -86,7 +91,7 @@ static int test_cipher_jiffies(struct blkcipher_desc *desc, int enc,
        }
 
        printk("%d operations in %d seconds (%ld bytes)\n",
-              bcount, sec, (long)bcount * blen);
+              bcount, secs, (long)bcount * blen);
        return 0;
 }
 
@@ -138,13 +143,13 @@ out:
 }
 
 static int test_aead_jiffies(struct aead_request *req, int enc,
-                               int blen, int sec)
+                               int blen, int secs)
 {
        unsigned long start, end;
        int bcount;
        int ret;
 
-       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+       for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                if (enc)
                        ret = crypto_aead_encrypt(req);
@@ -156,7 +161,7 @@ static int test_aead_jiffies(struct aead_request *req, int enc,
        }
 
        printk("%d operations in %d seconds (%ld bytes)\n",
-              bcount, sec, (long)bcount * blen);
+              bcount, secs, (long)bcount * blen);
        return 0;
 }
 
@@ -260,7 +265,7 @@ static void sg_init_aead(struct scatterlist *sg, char *xbuf[XBUFSIZE],
        }
 }
 
-static void test_aead_speed(const char *algo, int enc, unsigned int sec,
+static void test_aead_speed(const char *algo, int enc, unsigned int secs,
                            struct aead_speed_template *template,
                            unsigned int tcount, u8 authsize,
                            unsigned int aad_size, u8 *keysize)
@@ -305,9 +310,6 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec,
        asg = &sg[8];
        sgout = &asg[8];
 
-
-       printk(KERN_INFO "\ntesting speed of %s %s\n", algo, e);
-
        tfm = crypto_alloc_aead(algo, 0, 0);
 
        if (IS_ERR(tfm)) {
@@ -316,6 +318,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec,
                goto out_notfm;
        }
 
+       printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
+                       get_driver_name(crypto_aead, tfm), e);
+
        req = aead_request_alloc(tfm, GFP_KERNEL);
        if (!req) {
                pr_err("alg: aead: Failed to allocate request for %s\n",
@@ -374,8 +379,9 @@ static void test_aead_speed(const char *algo, int enc, unsigned int sec,
                        aead_request_set_crypt(req, sg, sgout, *b_size, iv);
                        aead_request_set_assoc(req, asg, aad_size);
 
-                       if (sec)
-                               ret = test_aead_jiffies(req, enc, *b_size, sec);
+                       if (secs)
+                               ret = test_aead_jiffies(req, enc, *b_size,
+                                                       secs);
                        else
                                ret = test_aead_cycles(req, enc, *b_size);
 
@@ -405,7 +411,7 @@ out_noxbuf:
        return;
 }
 
-static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
+static void test_cipher_speed(const char *algo, int enc, unsigned int secs,
                              struct cipher_speed_template *template,
                              unsigned int tcount, u8 *keysize)
 {
@@ -422,8 +428,6 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
        else
                e = "decryption";
 
-       printk("\ntesting speed of %s %s\n", algo, e);
-
        tfm = crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
 
        if (IS_ERR(tfm)) {
@@ -434,6 +438,9 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
        desc.tfm = tfm;
        desc.flags = 0;
 
+       printk(KERN_INFO "\ntesting speed of %s (%s) %s\n", algo,
+                       get_driver_name(crypto_blkcipher, tfm), e);
+
        i = 0;
        do {
 
@@ -483,9 +490,9 @@ static void test_cipher_speed(const char *algo, int enc, unsigned int sec,
                                crypto_blkcipher_set_iv(tfm, iv, iv_len);
                        }
 
-                       if (sec)
+                       if (secs)
                                ret = test_cipher_jiffies(&desc, enc, sg,
-                                                         *b_size, sec);
+                                                         *b_size, secs);
                        else
                                ret = test_cipher_cycles(&desc, enc, sg,
                                                         *b_size);
@@ -506,13 +513,13 @@ out:
 
 static int test_hash_jiffies_digest(struct hash_desc *desc,
                                    struct scatterlist *sg, int blen,
-                                   char *out, int sec)
+                                   char *out, int secs)
 {
        unsigned long start, end;
        int bcount;
        int ret;
 
-       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+       for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                ret = crypto_hash_digest(desc, sg, blen, out);
                if (ret)
@@ -520,22 +527,22 @@ static int test_hash_jiffies_digest(struct hash_desc *desc,
        }
 
        printk("%6u opers/sec, %9lu bytes/sec\n",
-              bcount / sec, ((long)bcount * blen) / sec);
+              bcount / secs, ((long)bcount * blen) / secs);
 
        return 0;
 }
 
 static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg,
-                            int blen, int plen, char *out, int sec)
+                            int blen, int plen, char *out, int secs)
 {
        unsigned long start, end;
        int bcount, pcount;
        int ret;
 
        if (plen == blen)
-               return test_hash_jiffies_digest(desc, sg, blen, out, sec);
+               return test_hash_jiffies_digest(desc, sg, blen, out, secs);
 
-       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+       for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                ret = crypto_hash_init(desc);
                if (ret)
@@ -552,7 +559,7 @@ static int test_hash_jiffies(struct hash_desc *desc, struct scatterlist *sg,
        }
 
        printk("%6u opers/sec, %9lu bytes/sec\n",
-              bcount / sec, ((long)bcount * blen) / sec);
+              bcount / secs, ((long)bcount * blen) / secs);
 
        return 0;
 }
@@ -673,7 +680,7 @@ static void test_hash_sg_init(struct scatterlist *sg)
        }
 }
 
-static void test_hash_speed(const char *algo, unsigned int sec,
+static void test_hash_speed(const char *algo, unsigned int secs,
                            struct hash_speed *speed)
 {
        struct scatterlist sg[TVMEMSIZE];
@@ -683,8 +690,6 @@ static void test_hash_speed(const char *algo, unsigned int sec,
        int i;
        int ret;
 
-       printk(KERN_INFO "\ntesting speed of %s\n", algo);
-
        tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
 
        if (IS_ERR(tfm)) {
@@ -693,6 +698,9 @@ static void test_hash_speed(const char *algo, unsigned int sec,
                return;
        }
 
+       printk(KERN_INFO "\ntesting speed of %s (%s)\n", algo,
+                       get_driver_name(crypto_hash, tfm));
+
        desc.tfm = tfm;
        desc.flags = 0;
 
@@ -718,9 +726,9 @@ static void test_hash_speed(const char *algo, unsigned int sec,
                       "(%5u byte blocks,%5u bytes per update,%4u updates): ",
                       i, speed[i].blen, speed[i].plen, speed[i].blen / speed[i].plen);
 
-               if (sec)
+               if (secs)
                        ret = test_hash_jiffies(&desc, sg, speed[i].blen,
-                                               speed[i].plen, output, sec);
+                                               speed[i].plen, output, secs);
                else
                        ret = test_hash_cycles(&desc, sg, speed[i].blen,
                                               speed[i].plen, output);
@@ -765,13 +773,13 @@ static inline int do_one_ahash_op(struct ahash_request *req, int ret)
 }
 
 static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
-                                    char *out, int sec)
+                                    char *out, int secs)
 {
        unsigned long start, end;
        int bcount;
        int ret;
 
-       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+       for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                ret = do_one_ahash_op(req, crypto_ahash_digest(req));
                if (ret)
@@ -779,22 +787,22 @@ static int test_ahash_jiffies_digest(struct ahash_request *req, int blen,
        }
 
        printk("%6u opers/sec, %9lu bytes/sec\n",
-              bcount / sec, ((long)bcount * blen) / sec);
+              bcount / secs, ((long)bcount * blen) / secs);
 
        return 0;
 }
 
 static int test_ahash_jiffies(struct ahash_request *req, int blen,
-                             int plen, char *out, int sec)
+                             int plen, char *out, int secs)
 {
        unsigned long start, end;
        int bcount, pcount;
        int ret;
 
        if (plen == blen)
-               return test_ahash_jiffies_digest(req, blen, out, sec);
+               return test_ahash_jiffies_digest(req, blen, out, secs);
 
-       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+       for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                ret = crypto_ahash_init(req);
                if (ret)
@@ -811,7 +819,7 @@ static int test_ahash_jiffies(struct ahash_request *req, int blen,
        }
 
        pr_cont("%6u opers/sec, %9lu bytes/sec\n",
-               bcount / sec, ((long)bcount * blen) / sec);
+               bcount / secs, ((long)bcount * blen) / secs);
 
        return 0;
 }
@@ -911,7 +919,7 @@ out:
        return 0;
 }
 
-static void test_ahash_speed(const char *algo, unsigned int sec,
+static void test_ahash_speed(const char *algo, unsigned int secs,
                             struct hash_speed *speed)
 {
        struct scatterlist sg[TVMEMSIZE];
@@ -921,8 +929,6 @@ static void test_ahash_speed(const char *algo, unsigned int sec,
        static char output[1024];
        int i, ret;
 
-       printk(KERN_INFO "\ntesting speed of async %s\n", algo);
-
        tfm = crypto_alloc_ahash(algo, 0, 0);
        if (IS_ERR(tfm)) {
                pr_err("failed to load transform for %s: %ld\n",
@@ -930,6 +936,9 @@ static void test_ahash_speed(const char *algo, unsigned int sec,
                return;
        }
 
+       printk(KERN_INFO "\ntesting speed of async %s (%s)\n", algo,
+                       get_driver_name(crypto_ahash, tfm));
+
        if (crypto_ahash_digestsize(tfm) > sizeof(output)) {
                pr_err("digestsize(%u) > outputbuffer(%zu)\n",
                       crypto_ahash_digestsize(tfm), sizeof(output));
@@ -960,9 +969,9 @@ static void test_ahash_speed(const char *algo, unsigned int sec,
 
                ahash_request_set_crypt(req, sg, output, speed[i].plen);
 
-               if (sec)
+               if (secs)
                        ret = test_ahash_jiffies(req, speed[i].blen,
-                                                speed[i].plen, output, sec);
+                                                speed[i].plen, output, secs);
                else
                        ret = test_ahash_cycles(req, speed[i].blen,
                                                speed[i].plen, output);
@@ -994,13 +1003,13 @@ static inline int do_one_acipher_op(struct ablkcipher_request *req, int ret)
 }
 
 static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
-                               int blen, int sec)
+                               int blen, int secs)
 {
        unsigned long start, end;
        int bcount;
        int ret;
 
-       for (start = jiffies, end = start + sec * HZ, bcount = 0;
+       for (start = jiffies, end = start + secs * HZ, bcount = 0;
             time_before(jiffies, end); bcount++) {
                if (enc)
                        ret = do_one_acipher_op(req,
@@ -1014,7 +1023,7 @@ static int test_acipher_jiffies(struct ablkcipher_request *req, int enc,
        }
 
        pr_cont("%d operations in %d seconds (%ld bytes)\n",
-               bcount, sec, (long)bcount * blen);
+               bcount, secs, (long)bcount * blen);
        return 0;
 }
 
@@ -1065,7 +1074,7 @@ out:
        return ret;
 }
 
-static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
+static void test_acipher_speed(const char *algo, int enc, unsigned int secs,
                               struct cipher_speed_template *template,
                               unsigned int tcount, u8 *keysize)
 {
@@ -1083,8 +1092,6 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
        else
                e = "decryption";
 
-       pr_info("\ntesting speed of async %s %s\n", algo, e);
-
        init_completion(&tresult.completion);
 
        tfm = crypto_alloc_ablkcipher(algo, 0, 0);
@@ -1095,6 +1102,9 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
                return;
        }
 
+       pr_info("\ntesting speed of async %s (%s) %s\n", algo,
+                       get_driver_name(crypto_ablkcipher, tfm), e);
+
        req = ablkcipher_request_alloc(tfm, GFP_KERNEL);
        if (!req) {
                pr_err("tcrypt: skcipher: Failed to allocate request for %s\n",
@@ -1168,9 +1178,9 @@ static void test_acipher_speed(const char *algo, int enc, unsigned int sec,
 
                        ablkcipher_request_set_crypt(req, sg, sg, *b_size, iv);
 
-                       if (sec)
+                       if (secs)
                                ret = test_acipher_jiffies(req, enc,
-                                                          *b_size, sec);
+                                                          *b_size, secs);
                        else
                                ret = test_acipher_cycles(req, enc,
                                                          *b_size);
@@ -1585,6 +1595,12 @@ static int do_test(int m)
                test_cipher_speed("cbc(des3_ede)", DECRYPT, sec,
                                des3_speed_template, DES3_SPEED_VECTORS,
                                speed_template_24);
+               test_cipher_speed("ctr(des3_ede)", ENCRYPT, sec,
+                               des3_speed_template, DES3_SPEED_VECTORS,
+                               speed_template_24);
+               test_cipher_speed("ctr(des3_ede)", DECRYPT, sec,
+                               des3_speed_template, DES3_SPEED_VECTORS,
+                               speed_template_24);
                break;
 
        case 202:
index 498649a..ac2b631 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <crypto/rng.h>
+#include <crypto/drbg.h>
 
 #include "internal.h"
 
@@ -108,6 +109,11 @@ struct cprng_test_suite {
        unsigned int count;
 };
 
+struct drbg_test_suite {
+       struct drbg_testvec *vecs;
+       unsigned int count;
+};
+
 struct alg_test_desc {
        const char *alg;
        int (*test)(const struct alg_test_desc *desc, const char *driver,
@@ -121,6 +127,7 @@ struct alg_test_desc {
                struct pcomp_test_suite pcomp;
                struct hash_test_suite hash;
                struct cprng_test_suite cprng;
+               struct drbg_test_suite drbg;
        } suite;
 };
 
@@ -191,13 +198,20 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
        const char *algo = crypto_tfm_alg_driver_name(crypto_ahash_tfm(tfm));
        unsigned int i, j, k, temp;
        struct scatterlist sg[8];
-       char result[64];
+       char *result;
+       char *key;
        struct ahash_request *req;
        struct tcrypt_result tresult;
        void *hash_buff;
        char *xbuf[XBUFSIZE];
        int ret = -ENOMEM;
 
+       result = kmalloc(MAX_DIGEST_SIZE, GFP_KERNEL);
+       if (!result)
+               return ret;
+       key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
+       if (!key)
+               goto out_nobuf;
        if (testmgr_alloc_buf(xbuf))
                goto out_nobuf;
 
@@ -222,7 +236,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
                        goto out;
 
                j++;
-               memset(result, 0, 64);
+               memset(result, 0, MAX_DIGEST_SIZE);
 
                hash_buff = xbuf[0];
                hash_buff += align_offset;
@@ -232,8 +246,14 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
 
                if (template[i].ksize) {
                        crypto_ahash_clear_flags(tfm, ~0);
-                       ret = crypto_ahash_setkey(tfm, template[i].key,
-                                                 template[i].ksize);
+                       if (template[i].ksize > MAX_KEYLEN) {
+                               pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
+                                      j, algo, template[i].ksize, MAX_KEYLEN);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       memcpy(key, template[i].key, template[i].ksize);
+                       ret = crypto_ahash_setkey(tfm, key, template[i].ksize);
                        if (ret) {
                                printk(KERN_ERR "alg: hash: setkey failed on "
                                       "test %d for %s: ret=%d\n", j, algo,
@@ -293,7 +313,7 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
 
                if (template[i].np) {
                        j++;
-                       memset(result, 0, 64);
+                       memset(result, 0, MAX_DIGEST_SIZE);
 
                        temp = 0;
                        sg_init_table(sg, template[i].np);
@@ -312,8 +332,16 @@ static int __test_hash(struct crypto_ahash *tfm, struct hash_testvec *template,
                        }
 
                        if (template[i].ksize) {
+                               if (template[i].ksize > MAX_KEYLEN) {
+                                       pr_err("alg: hash: setkey failed on test %d for %s: key size %d > %d\n",
+                                              j, algo, template[i].ksize,
+                                              MAX_KEYLEN);
+                                       ret = -EINVAL;
+                                       goto out;
+                               }
                                crypto_ahash_clear_flags(tfm, ~0);
-                               ret = crypto_ahash_setkey(tfm, template[i].key,
+                               memcpy(key, template[i].key, template[i].ksize);
+                               ret = crypto_ahash_setkey(tfm, key,
                                                          template[i].ksize);
 
                                if (ret) {
@@ -365,6 +393,8 @@ out:
 out_noreq:
        testmgr_free_buf(xbuf);
 out_nobuf:
+       kfree(key);
+       kfree(result);
        return ret;
 }
 
@@ -422,6 +452,9 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
        iv = kzalloc(MAX_IVLEN, GFP_KERNEL);
        if (!iv)
                return ret;
+       key = kmalloc(MAX_KEYLEN, GFP_KERNEL);
+       if (!key)
+               goto out_noxbuf;
        if (testmgr_alloc_buf(xbuf))
                goto out_noxbuf;
        if (testmgr_alloc_buf(axbuf))
@@ -486,7 +519,14 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
                                crypto_aead_set_flags(
                                        tfm, CRYPTO_TFM_REQ_WEAK_KEY);
 
-                       key = template[i].key;
+                       if (template[i].klen > MAX_KEYLEN) {
+                               pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
+                                      d, j, algo, template[i].klen,
+                                      MAX_KEYLEN);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       memcpy(key, template[i].key, template[i].klen);
 
                        ret = crypto_aead_setkey(tfm, key,
                                                 template[i].klen);
@@ -587,7 +627,14 @@ static int __test_aead(struct crypto_aead *tfm, int enc,
                        if (template[i].wk)
                                crypto_aead_set_flags(
                                        tfm, CRYPTO_TFM_REQ_WEAK_KEY);
-                       key = template[i].key;
+                       if (template[i].klen > MAX_KEYLEN) {
+                               pr_err("alg: aead%s: setkey failed on test %d for %s: key size %d > %d\n",
+                                      d, j, algo, template[i].klen,
+                                      MAX_KEYLEN);
+                               ret = -EINVAL;
+                               goto out;
+                       }
+                       memcpy(key, template[i].key, template[i].klen);
 
                        ret = crypto_aead_setkey(tfm, key, template[i].klen);
                        if (!ret == template[i].fail) {
@@ -769,6 +816,7 @@ out_nooutbuf:
 out_noaxbuf:
        testmgr_free_buf(xbuf);
 out_noxbuf:
+       kfree(key);
        kfree(iv);
        return ret;
 }
@@ -1715,6 +1763,100 @@ static int alg_test_cprng(const struct alg_test_desc *desc, const char *driver,
        return err;
 }
 
+
+static int drbg_cavs_test(struct drbg_testvec *test, int pr,
+                         const char *driver, u32 type, u32 mask)
+{
+       int ret = -EAGAIN;
+       struct crypto_rng *drng;
+       struct drbg_test_data test_data;
+       struct drbg_string addtl, pers, testentropy;
+       unsigned char *buf = kzalloc(test->expectedlen, GFP_KERNEL);
+
+       if (!buf)
+               return -ENOMEM;
+
+       drng = crypto_alloc_rng(driver, type, mask);
+       if (IS_ERR(drng)) {
+               printk(KERN_ERR "alg: drbg: could not allocate DRNG handle for "
+                      "%s\n", driver);
+               kzfree(buf);
+               return -ENOMEM;
+       }
+
+       test_data.testentropy = &testentropy;
+       drbg_string_fill(&testentropy, test->entropy, test->entropylen);
+       drbg_string_fill(&pers, test->pers, test->perslen);
+       ret = crypto_drbg_reset_test(drng, &pers, &test_data);
+       if (ret) {
+               printk(KERN_ERR "alg: drbg: Failed to reset rng\n");
+               goto outbuf;
+       }
+
+       drbg_string_fill(&addtl, test->addtla, test->addtllen);
+       if (pr) {
+               drbg_string_fill(&testentropy, test->entpra, test->entprlen);
+               ret = crypto_drbg_get_bytes_addtl_test(drng,
+                       buf, test->expectedlen, &addtl, &test_data);
+       } else {
+               ret = crypto_drbg_get_bytes_addtl(drng,
+                       buf, test->expectedlen, &addtl);
+       }
+       if (ret <= 0) {
+               printk(KERN_ERR "alg: drbg: could not obtain random data for "
+                      "driver %s\n", driver);
+               goto outbuf;
+       }
+
+       drbg_string_fill(&addtl, test->addtlb, test->addtllen);
+       if (pr) {
+               drbg_string_fill(&testentropy, test->entprb, test->entprlen);
+               ret = crypto_drbg_get_bytes_addtl_test(drng,
+                       buf, test->expectedlen, &addtl, &test_data);
+       } else {
+               ret = crypto_drbg_get_bytes_addtl(drng,
+                       buf, test->expectedlen, &addtl);
+       }
+       if (ret <= 0) {
+               printk(KERN_ERR "alg: drbg: could not obtain random data for "
+                      "driver %s\n", driver);
+               goto outbuf;
+       }
+
+       ret = memcmp(test->expected, buf, test->expectedlen);
+
+outbuf:
+       crypto_free_rng(drng);
+       kzfree(buf);
+       return ret;
+}
+
+
+static int alg_test_drbg(const struct alg_test_desc *desc, const char *driver,
+                        u32 type, u32 mask)
+{
+       int err = 0;
+       int pr = 0;
+       int i = 0;
+       struct drbg_testvec *template = desc->suite.drbg.vecs;
+       unsigned int tcount = desc->suite.drbg.count;
+
+       if (0 == memcmp(driver, "drbg_pr_", 8))
+               pr = 1;
+
+       for (i = 0; i < tcount; i++) {
+               err = drbg_cavs_test(&template[i], pr, driver, type, mask);
+               if (err) {
+                       printk(KERN_ERR "alg: drbg: Test %d failed for %s\n",
+                              i, driver);
+                       err = -EINVAL;
+                       break;
+               }
+       }
+       return err;
+
+}
+
 static int alg_test_null(const struct alg_test_desc *desc,
                             const char *driver, u32 type, u32 mask)
 {
@@ -2457,6 +2599,152 @@ static const struct alg_test_desc alg_test_descs[] = {
        }, {
                .alg = "digest_null",
                .test = alg_test_null,
+       }, {
+               .alg = "drbg_nopr_ctr_aes128",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_nopr_ctr_aes128_tv_template,
+                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes128_tv_template)
+                       }
+               }
+       }, {
+               .alg = "drbg_nopr_ctr_aes192",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_nopr_ctr_aes192_tv_template,
+                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes192_tv_template)
+                       }
+               }
+       }, {
+               .alg = "drbg_nopr_ctr_aes256",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_nopr_ctr_aes256_tv_template,
+                               .count = ARRAY_SIZE(drbg_nopr_ctr_aes256_tv_template)
+                       }
+               }
+       }, {
+               /*
+                * There is no need to specifically test the DRBG with every
+                * backend cipher -- covered by drbg_nopr_hmac_sha256 test
+                */
+               .alg = "drbg_nopr_hmac_sha1",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_nopr_hmac_sha256",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_nopr_hmac_sha256_tv_template,
+                               .count =
+                               ARRAY_SIZE(drbg_nopr_hmac_sha256_tv_template)
+                       }
+               }
+       }, {
+               /* covered by drbg_nopr_hmac_sha256 test */
+               .alg = "drbg_nopr_hmac_sha384",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_nopr_hmac_sha512",
+               .test = alg_test_null,
+               .fips_allowed = 1,
+       }, {
+               .alg = "drbg_nopr_sha1",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_nopr_sha256",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_nopr_sha256_tv_template,
+                               .count = ARRAY_SIZE(drbg_nopr_sha256_tv_template)
+                       }
+               }
+       }, {
+               /* covered by drbg_nopr_sha256 test */
+               .alg = "drbg_nopr_sha384",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_nopr_sha512",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_pr_ctr_aes128",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_pr_ctr_aes128_tv_template,
+                               .count = ARRAY_SIZE(drbg_pr_ctr_aes128_tv_template)
+                       }
+               }
+       }, {
+               /* covered by drbg_pr_ctr_aes128 test */
+               .alg = "drbg_pr_ctr_aes192",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_pr_ctr_aes256",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_pr_hmac_sha1",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_pr_hmac_sha256",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_pr_hmac_sha256_tv_template,
+                               .count = ARRAY_SIZE(drbg_pr_hmac_sha256_tv_template)
+                       }
+               }
+       }, {
+               /* covered by drbg_pr_hmac_sha256 test */
+               .alg = "drbg_pr_hmac_sha384",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_pr_hmac_sha512",
+               .test = alg_test_null,
+               .fips_allowed = 1,
+       }, {
+               .alg = "drbg_pr_sha1",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_pr_sha256",
+               .test = alg_test_drbg,
+               .fips_allowed = 1,
+               .suite = {
+                       .drbg = {
+                               .vecs = drbg_pr_sha256_tv_template,
+                               .count = ARRAY_SIZE(drbg_pr_sha256_tv_template)
+                       }
+               }
+       }, {
+               /* covered by drbg_pr_sha256 test */
+               .alg = "drbg_pr_sha384",
+               .fips_allowed = 1,
+               .test = alg_test_null,
+       }, {
+               .alg = "drbg_pr_sha512",
+               .fips_allowed = 1,
+               .test = alg_test_null,
        }, {
                .alg = "ecb(__aes-aesni)",
                .test = alg_test_null,
index 69d0dd8..6597203 100644 (file)
@@ -32,7 +32,7 @@
 #define MAX_DIGEST_SIZE                64
 #define MAX_TAP                        8
 
-#define MAX_KEYLEN             56
+#define MAX_KEYLEN             160
 #define MAX_IVLEN              32
 
 struct hash_testvec {
@@ -92,6 +92,21 @@ struct cprng_testvec {
        unsigned short loops;
 };
 
+struct drbg_testvec {
+       unsigned char *entropy;
+       size_t entropylen;
+       unsigned char *entpra;
+       unsigned char *entprb;
+       size_t entprlen;
+       unsigned char *addtla;
+       unsigned char *addtlb;
+       size_t addtllen;
+       unsigned char *pers;
+       size_t perslen;
+       unsigned char *expected;
+       size_t expectedlen;
+};
+
 static char zeroed_string[48];
 
 /*
@@ -1807,18 +1822,59 @@ static struct hash_testvec tgr128_tv_template[] = {
        },
 };
 
-#define GHASH_TEST_VECTORS 1
+#define GHASH_TEST_VECTORS 5
 
 static struct hash_testvec ghash_tv_template[] =
 {
        {
-
-               .key    = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03\xff\xca\xff\x95\xf8\x30\xf0\x61",
+               .key    = "\xdf\xa6\xbf\x4d\xed\x81\xdb\x03"
+                         "\xff\xca\xff\x95\xf8\x30\xf0\x61",
                .ksize  = 16,
-               .plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
+               .plaintext = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0"
+                            "\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
                .psize  = 16,
                .digest = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6"
                          "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60",
+       }, {
+               .key    = "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b"
+                         "\x0b\x0b\x0b\x0b\x0b\x0b\x0b\x0b",
+               .ksize  = 16,
+               .plaintext = "what do ya want for nothing?",
+               .psize  = 28,
+               .digest = "\x3e\x1f\x5c\x4d\x65\xf0\xef\xce"
+                         "\x0d\x61\x06\x27\x66\x51\xd5\xe2",
+               .np     = 2,
+               .tap    = {14, 14}
+       }, {
+               .key    = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa"
+                         "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa",
+               .ksize  = 16,
+               .plaintext = "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+                       "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+                       "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd"
+                       "\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd\xdd",
+               .psize  = 50,
+               .digest = "\xfb\x49\x8a\x36\xe1\x96\xe1\x96"
+                         "\xe1\x96\xe1\x96\xe1\x96\xe1\x96",
+       }, {
+               .key    = "\xda\x53\xeb\x0a\xd2\xc5\x5b\xb6"
+                         "\x4f\xc4\x80\x2c\xc3\xfe\xda\x60",
+               .ksize  = 16,
+               .plaintext = "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
+                       "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
+                       "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd"
+                       "\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd\xcd",
+               .psize  = 50,
+               .digest = "\x2b\x5c\x0c\x7f\x52\xd1\x60\xc2"
+                         "\x49\xed\x6e\x32\x7a\xa9\xbe\x08",
+       }, {
+               .key    = "\x95\x2b\x2a\x56\xa5\x60\x04a\xc0"
+                         "\xb3\x2b\x66\x56\xa0\x5b\x40\xb6",
+               .ksize  = 16,
+               .plaintext = "Test With Truncation",
+               .psize  = 20,
+               .digest = "\xf8\x94\x87\x2a\x4b\x63\x99\x28"
+                         "\x23\xf7\x93\xf7\x19\xf5\x96\xd9",
        },
 };
 
@@ -3097,8 +3153,8 @@ static struct cipher_testvec des_enc_tv_template[] = {
                          "\x5F\x62\xC7\x72\xD9\xFC\xCB\x9A",
                .rlen   = 248,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 248 - 8, 8 },
+               .np     = 3,
+               .tap    = { 248 - 10, 2, 8 },
        },
 };
 
@@ -3207,8 +3263,8 @@ static struct cipher_testvec des_dec_tv_template[] = {
                          "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
                .rlen   = 248,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 248 - 8, 8 },
+               .np     = 3,
+               .tap    = { 248 - 10, 2, 8 },
        },
 };
 
@@ -3333,8 +3389,8 @@ static struct cipher_testvec des_cbc_enc_tv_template[] = {
                          "\xC6\x4A\xF3\x55\xC7\x29\x2E\x63",
                .rlen   = 248,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 248 - 8, 8 },
+               .np     = 3,
+               .tap    = { 248 - 10, 2, 8 },
        },
 };
 
@@ -3442,8 +3498,8 @@ static struct cipher_testvec des_cbc_dec_tv_template[] = {
                          "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
                .rlen   = 248,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 248 - 8, 8 },
+               .np     = 3,
+               .tap    = { 248 - 10, 2, 8 },
        },
 };
 
@@ -3517,8 +3573,8 @@ static struct cipher_testvec des_ctr_enc_tv_template[] = {
                          "\x69\x74\xA1\x06\x46\x0F\x4E\x75",
                .rlen   = 248,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 248 - 8, 8 },
+               .np     = 3,
+               .tap    = { 248 - 10, 2, 8 },
        }, { /* Generated with Crypto++ */
                .key    = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
                .klen   = 8,
@@ -3663,8 +3719,8 @@ static struct cipher_testvec des_ctr_dec_tv_template[] = {
                          "\xC6\x2F\xBB\x24\x8D\x19\x82\xEB",
                .rlen   = 248,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 248 - 8, 8 },
+               .np     = 3,
+               .tap    = { 248 - 10, 2, 8 },
        }, { /* Generated with Crypto++ */
                .key    = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55",
                .klen   = 8,
@@ -3899,8 +3955,8 @@ static struct cipher_testvec des3_ede_enc_tv_template[] = {
                          "\xD8\x45\xFF\x33\xBA\xBB\x2B\x63",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -4064,8 +4120,8 @@ static struct cipher_testvec des3_ede_dec_tv_template[] = {
                          "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -4244,8 +4300,8 @@ static struct cipher_testvec des3_ede_cbc_enc_tv_template[] = {
                          "\x95\x63\x73\xA2\x44\xAC\xF8\xA5",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -4424,8 +4480,8 @@ static struct cipher_testvec des3_ede_cbc_dec_tv_template[] = {
                          "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -4564,8 +4620,8 @@ static struct cipher_testvec des3_ede_ctr_enc_tv_template[] = {
                          "\x5C\xEE\xFC\xCF\xC4\x70\x00\x34",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        }, { /* Generated with Crypto++ */
                .key    = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
                          "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
@@ -4842,8 +4898,8 @@ static struct cipher_testvec des3_ede_ctr_dec_tv_template[] = {
                          "\xB8\x03\xEA\x7D\xE1\x48\xD3\x47",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        }, { /* Generated with Crypto++ */
                .key    = "\x9C\xD6\xF3\x9C\xB9\x5A\x67\x00"
                          "\x5A\x67\x00\x2D\xCE\xEB\x2D\xCE"
@@ -5182,8 +5238,8 @@ static struct cipher_testvec bf_enc_tv_template[] = {
                          "\xC9\x1A\xFB\x5D\xDE\xBB\x43\xF4",
                .rlen   = 504,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 504 - 8, 8 },
+               .np     = 3,
+               .tap    = { 504 - 10, 2, 8 },
        },
 };
 
@@ -5374,8 +5430,8 @@ static struct cipher_testvec bf_dec_tv_template[] = {
                          "\x2B\xC2\x59\xF0\x64\xFB\x92\x06",
                .rlen   = 504,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 504 - 8, 8 },
+               .np     = 3,
+               .tap    = { 504 - 10, 2, 8 },
        },
 };
 
@@ -5531,8 +5587,8 @@ static struct cipher_testvec bf_cbc_enc_tv_template[] = {
                          "\xB4\x98\xD8\x6B\x74\xE7\x65\xF4",
                .rlen   = 504,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 504 - 8, 8 },
+               .np     = 3,
+               .tap    = { 504 - 10, 2, 8 },
        },
 };
 
@@ -5688,8 +5744,8 @@ static struct cipher_testvec bf_cbc_dec_tv_template[] = {
                          "\x2B\xC2\x59\xF0\x64\xFB\x92\x06",
                .rlen   = 504,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 504 - 8, 8 },
+               .np     = 3,
+               .tap    = { 504 - 10, 2, 8 },
        },
 };
 
@@ -6694,8 +6750,8 @@ static struct cipher_testvec tf_enc_tv_template[] = {
                          "\x2C\x75\x64\xC4\xCA\xC1\x7E\xD5",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -6862,8 +6918,8 @@ static struct cipher_testvec tf_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -7045,8 +7101,8 @@ static struct cipher_testvec tf_cbc_enc_tv_template[] = {
                          "\x0A\xA3\x30\x10\x26\x25\x41\x2C",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -7228,8 +7284,8 @@ static struct cipher_testvec tf_cbc_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -8302,8 +8358,8 @@ static struct cipher_testvec tf_lrw_enc_tv_template[] = {
                          "\x11\xd7\xb8\x6e\xea\xe1\x80\x30",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -8555,8 +8611,8 @@ static struct cipher_testvec tf_lrw_dec_tv_template[] = {
                          "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -8897,8 +8953,8 @@ static struct cipher_testvec tf_xts_enc_tv_template[] = {
                          "\x37\x30\xe1\x91\x8d\xb3\x2a\xff",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -9240,8 +9296,8 @@ static struct cipher_testvec tf_xts_dec_tv_template[] = {
                          "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -9438,8 +9494,8 @@ static struct cipher_testvec serpent_enc_tv_template[] = {
                          "\xF4\x46\x2E\xEB\xAC\xF3\xD2\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -9664,8 +9720,8 @@ static struct cipher_testvec serpent_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -9846,8 +9902,8 @@ static struct cipher_testvec serpent_cbc_enc_tv_template[] = {
                          "\xBC\x08\x3A\xA2\x29\xB3\xDF\xD1",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -9987,8 +10043,8 @@ static struct cipher_testvec serpent_cbc_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -11061,8 +11117,8 @@ static struct cipher_testvec serpent_lrw_enc_tv_template[] = {
                          "\xd9\x51\x0f\xd7\x94\x2f\xc5\xa7",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -11314,8 +11370,8 @@ static struct cipher_testvec serpent_lrw_dec_tv_template[] = {
                          "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -11656,8 +11712,8 @@ static struct cipher_testvec serpent_xts_enc_tv_template[] = {
                          "\xd4\xa0\x91\x98\x11\x5f\x4d\xb1",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -11999,8 +12055,8 @@ static struct cipher_testvec serpent_xts_dec_tv_template[] = {
                          "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -12182,8 +12238,8 @@ static struct cipher_testvec cast6_enc_tv_template[] = {
                          "\x11\x74\x93\x57\xB4\x7E\xC6\x00",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -12353,8 +12409,8 @@ static struct cipher_testvec cast6_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -12494,8 +12550,8 @@ static struct cipher_testvec cast6_cbc_enc_tv_template[] = {
                          "\x22\x46\x89\x2D\x0F\x2B\x08\x24",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -12635,8 +12691,8 @@ static struct cipher_testvec cast6_cbc_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -12792,8 +12848,8 @@ static struct cipher_testvec cast6_ctr_enc_tv_template[] = {
                          "\xF9\xC5\xDD\x27\xB3\x39\xCB\xCB",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -12949,8 +13005,8 @@ static struct cipher_testvec cast6_ctr_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -13096,8 +13152,8 @@ static struct cipher_testvec cast6_lrw_enc_tv_template[] = {
                          "\xC4\xF5\x99\x61\xBC\xBB\x5B\x46",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -13243,8 +13299,8 @@ static struct cipher_testvec cast6_lrw_dec_tv_template[] = {
                          "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -13392,8 +13448,8 @@ static struct cipher_testvec cast6_xts_enc_tv_template[] = {
                          "\x22\x60\x4E\xE8\xA4\x5D\x85\xB9",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -13541,8 +13597,8 @@ static struct cipher_testvec cast6_xts_dec_tv_template[] = {
                          "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -13749,8 +13805,8 @@ static struct cipher_testvec aes_enc_tv_template[] = {
                          "\x17\xBB\xC0\x6B\x62\x3F\x56\xE9",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -13921,8 +13977,8 @@ static struct cipher_testvec aes_dec_tv_template[] = {
                          "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -14140,8 +14196,8 @@ static struct cipher_testvec aes_cbc_enc_tv_template[] = {
                          "\xA3\xAA\x13\xCC\x50\xFF\x7B\x02",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -14359,8 +14415,8 @@ static struct cipher_testvec aes_cbc_dec_tv_template[] = {
                          "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -16265,8 +16321,8 @@ static struct cipher_testvec aes_lrw_enc_tv_template[] = {
                          "\x74\x3f\x7d\x58\x88\x75\xde\x3e",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        }
 };
 
@@ -16519,8 +16575,8 @@ static struct cipher_testvec aes_lrw_dec_tv_template[] = {
                          "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        }
 };
 
@@ -16861,8 +16917,8 @@ static struct cipher_testvec aes_xts_enc_tv_template[] = {
                          "\xb9\xc6\xe6\x93\xe1\x48\xc1\x51",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        }
 };
 
@@ -17203,8 +17259,8 @@ static struct cipher_testvec aes_xts_dec_tv_template[] = {
                          "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        }
 };
 
@@ -17420,8 +17476,8 @@ static struct cipher_testvec aes_ctr_enc_tv_template[] = {
                          "\xF1\x4C\xE5\xB2\x91\x64\x0C\x51",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        }, { /* Generated with Crypto++ */
                .key    = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55"
                          "\x0F\x32\x55\x78\x9B\xBE\x78\x9B"
@@ -17775,8 +17831,8 @@ static struct cipher_testvec aes_ctr_dec_tv_template[] = {
                          "\xED\x56\xBF\x28\xB4\x1D\x86\x12",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        }, { /* Generated with Crypto++ */
                .key    = "\xC9\x83\xA6\xC9\xEC\x0F\x32\x55"
                          "\x0F\x32\x55\x78\x9B\xBE\x78\x9B"
@@ -20743,6 +20799,834 @@ static struct cprng_testvec ansi_cprng_aes_tv_template[] = {
        },
 };
 
+/*
+ * SP800-90A DRBG Test vectors from
+ * http://csrc.nist.gov/groups/STM/cavp/documents/drbg/drbgtestvectors.zip
+ *
+ * Test vectors for DRBG with prediction resistance. All types of DRBGs
+ * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
+ * w/o personalization string, w/ and w/o additional input string).
+ */
+static struct drbg_testvec drbg_pr_sha256_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\x72\x88\x4c\xcd\x6c\x85\x57\x70\xf7\x0b\x8b\x86"
+                       "\xc1\xeb\xd2\x4e\x36\x14\xab\x18\xc4\x9c\xc9\xcf"
+                       "\x1a\xe8\xf7\x7b\x02\x49\x73\xd7\xf1\x42\x7d\xc6"
+                       "\x3f\x29\x2d\xec\xd3\x66\x51\x3f\x1d\x8d\x5b\x4e",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\x38\x9c\x91\xfa\xc2\xa3\x46\x89\x56\x08\x3f\x62"
+                       "\x73\xd5\x22\xa9\x29\x63\x3a\x1d\xe5\x5d\x5e\x4f"
+                       "\x67\xb0\x67\x7a\x5e\x9e\x0c\x62",
+               .entprb = (unsigned char *)
+                       "\xb2\x8f\x36\xb2\xf6\x8d\x39\x13\xfa\x6c\x66\xcf"
+                       "\x62\x8a\x7e\x8c\x12\x33\x71\x9c\x69\xe4\xa5\xf0"
+                       "\x8c\xee\xeb\x9c\xf5\x31\x98\x31",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\x52\x7b\xa3\xad\x71\x77\xa4\x49\x42\x04\x61\xc7"
+                       "\xf0\xaf\xa5\xfd\xd3\xb3\x0d\x6a\x61\xba\x35\x49"
+                       "\xbb\xaa\xaf\xe4\x25\x7d\xb5\x48\xaf\x5c\x18\x3d"
+                       "\x33\x8d\x9d\x45\xdf\x98\xd5\x94\xa8\xda\x92\xfe"
+                       "\xc4\x3c\x94\x2a\xcf\x7f\x7b\xf2\xeb\x28\xa9\xf1"
+                       "\xe0\x86\x30\xa8\xfe\xf2\x48\x90\x91\x0c\x75\xb5"
+                       "\x3c\x00\xf0\x4d\x09\x4f\x40\xa7\xa2\x8c\x52\xdf"
+                       "\x52\xef\x17\xbf\x3d\xd1\xa2\x31\xb4\xb8\xdc\xe6"
+                       "\x5b\x0d\x1f\x78\x36\xb4\xe6\x4b\xa7\x11\x25\xd5"
+                       "\x94\xc6\x97\x36\xab\xf0\xe5\x31\x28\x6a\xbb\xce"
+                       "\x30\x81\xa6\x8f\x27\x14\xf8\x1c",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x5d\xf2\x14\xbc\xf6\xb5\x4e\x0b\xf0\x0d\x6f\x2d"
+                       "\xe2\x01\x66\x7b\xd0\xa4\x73\xa4\x21\xdd\xb0\xc0"
+                       "\x51\x79\x09\xf4\xea\xa9\x08\xfa\xa6\x67\xe0\xe1"
+                       "\xd1\x88\xa8\xad\xee\x69\x74\xb3\x55\x06\x9b\xf6",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\xef\x48\x06\xa2\xc2\x45\xf1\x44\xfa\x34\x2c\xeb"
+                       "\x8d\x78\x3c\x09\x8f\x34\x72\x20\xf2\xe7\xfd\x13"
+                       "\x76\x0a\xf6\xdc\x3c\xf5\xc0\x15",
+               .entprb = (unsigned char *)
+                       "\x4b\xbe\xe5\x24\xed\x6a\x2d\x0c\xdb\x73\x5e\x09"
+                       "\xf9\xad\x67\x7c\x51\x47\x8b\x6b\x30\x2a\xc6\xde"
+                       "\x76\xaa\x55\x04\x8b\x0a\x72\x95",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\x3b\x14\x71\x99\xa1\xda\xa0\x42\xe6\xc8\x85\x32"
+                       "\x70\x20\x32\x53\x9a\xbe\xd1\x1e\x15\xef\xfb\x4c"
+                       "\x25\x6e\x19\x3a\xf0\xb9\xcb\xde\xf0\x3b\xc6\x18"
+                       "\x4d\x85\x5a\x9b\xf1\xe3\xc2\x23\x03\x93\x08\xdb"
+                       "\xa7\x07\x4b\x33\x78\x40\x4d\xeb\x24\xf5\x6e\x81"
+                       "\x4a\x1b\x6e\xa3\x94\x52\x43\xb0\xaf\x2e\x21\xf4"
+                       "\x42\x46\x8e\x90\xed\x34\x21\x75\xea\xda\x67\xb6"
+                       "\xe4\xf6\xff\xc6\x31\x6c\x9a\x5a\xdb\xb3\x97\x13"
+                       "\x09\xd3\x20\x98\x33\x2d\x6d\xd7\xb5\x6a\xa8\xa9"
+                       "\x9a\x5b\xd6\x87\x52\xa1\x89\x2b\x4b\x9c\x64\x60"
+                       "\x50\x47\xa3\x63\x81\x16\xaf\x19",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\xbe\x13\xdb\x2a\xe9\xa8\xfe\x09\x97\xe1\xce\x5d"
+                       "\xe8\xbb\xc0\x7c\x4f\xcb\x62\x19\x3f\x0f\xd2\xad"
+                       "\xa9\xd0\x1d\x59\x02\xc4\xff\x70",
+               .addtlb = (unsigned char *)
+                       "\x6f\x96\x13\xe2\xa7\xf5\x6c\xfe\xdf\x66\xe3\x31"
+                       "\x63\x76\xbf\x20\x27\x06\x49\xf1\xf3\x01\x77\x41"
+                       "\x9f\xeb\xe4\x38\xfe\x67\x00\xcd",
+               .addtllen = 32,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xc6\x1c\xaf\x83\xa2\x56\x38\xf9\xb0\xbc\xd9\x85"
+                       "\xf5\x2e\xc4\x46\x9c\xe1\xb9\x40\x98\x70\x10\x72"
+                       "\xd7\x7d\x15\x85\xa1\x83\x5a\x97\xdf\xc8\xa8\xe8"
+                       "\x03\x4c\xcb\x70\x35\x8b\x90\x94\x46\x8a\x6e\xa1",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\xc9\x05\xa4\xcf\x28\x80\x4b\x93\x0f\x8b\xc6\xf9"
+                       "\x09\x41\x58\x74\xe9\xec\x28\xc7\x53\x0a\x73\x60"
+                       "\xba\x0a\xde\x57\x5b\x4b\x9f\x29",
+               .entprb = (unsigned char *)
+                       "\x4f\x31\xd2\xeb\xac\xfa\xa8\xe2\x01\x7d\xf3\xbd"
+                       "\x42\xbd\x20\xa0\x30\x65\x74\xd5\x5d\xd2\xad\xa4"
+                       "\xa9\xeb\x1f\x4d\xf6\xfd\xb8\x26",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\xf6\x13\x05\xcb\x83\x60\x16\x42\x49\x1d\xc6\x25"
+                       "\x3b\x8c\x31\xa3\xbe\x8b\xbd\x1c\xe2\xec\x1d\xde"
+                       "\xbb\xbf\xa1\xac\xa8\x9f\x50\xce\x69\xce\xef\xd5"
+                       "\xd6\xf2\xef\x6a\xf7\x81\x38\xdf\xbc\xa7\x5a\xb9"
+                       "\xb2\x42\x65\xab\xe4\x86\x8d\x2d\x9d\x59\x99\x2c"
+                       "\x5a\x0d\x71\x55\x98\xa4\x45\xc2\x8d\xdb\x05\x5e"
+                       "\x50\x21\xf7\xcd\xe8\x98\x43\xce\x57\x74\x63\x4c"
+                       "\xf3\xb1\xa5\x14\x1e\x9e\x01\xeb\x54\xd9\x56\xae"
+                       "\xbd\xb6\x6f\x1a\x47\x6b\x3b\x44\xe4\xa2\xe9\x3c"
+                       "\x6c\x83\x12\x30\xb8\x78\x7f\x8e\x54\x82\xd4\xfe"
+                       "\x90\x35\x0d\x4c\x4d\x85\xe7\x13",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = (unsigned char *)
+                       "\xa5\xbf\xac\x4f\x71\xa1\xbb\x67\x94\xc6\x50\xc7"
+                       "\x2a\x45\x9e\x10\xa8\xed\xf7\x52\x4f\xfe\x21\x90"
+                       "\xa4\x1b\xe1\xe2\x53\xcc\x61\x47",
+               .perslen = 32,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xb6\xc1\x8d\xdf\x99\x54\xbe\x95\x10\x48\xd9\xf6"
+                       "\xd7\x48\xa8\x73\x2d\x74\xde\x1e\xde\x57\x7e\xf4"
+                       "\x7b\x7b\x64\xef\x88\x7a\xa8\x10\x4b\xe1\xc1\x87"
+                       "\xbb\x0b\xe1\x39\x39\x50\xaf\x68\x9c\xa2\xbf\x5e",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\xdc\x81\x0a\x01\x58\xa7\x2e\xce\xee\x48\x8c\x7c"
+                       "\x77\x9e\x3c\xf1\x17\x24\x7a\xbb\xab\x9f\xca\x12"
+                       "\x19\xaf\x97\x2d\x5f\xf9\xff\xfc",
+               .entprb = (unsigned char *)
+                       "\xaf\xfc\x4f\x98\x8b\x93\x95\xc1\xb5\x8b\x7f\x73"
+                       "\x6d\xa6\xbe\x6d\x33\xeb\x2c\x82\xb1\xaf\xc1\xb6"
+                       "\xb6\x05\xe2\x44\xaa\xfd\xe7\xdb",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\x51\x79\xde\x1c\x0f\x58\xf3\xf4\xc9\x57\x2e\x31"
+                       "\xa7\x09\xa1\x53\x64\x63\xa2\xc5\x1d\x84\x88\x65"
+                       "\x01\x1b\xc6\x16\x3c\x49\x5b\x42\x8e\x53\xf5\x18"
+                       "\xad\x94\x12\x0d\x4f\x55\xcc\x45\x5c\x98\x0f\x42"
+                       "\x28\x2f\x47\x11\xf9\xc4\x01\x97\x6b\xa0\x94\x50"
+                       "\xa9\xd1\x5e\x06\x54\x3f\xdf\xbb\xc4\x98\xee\x8b"
+                       "\xba\xa9\xfa\x49\xee\x1d\xdc\xfb\x50\xf6\x51\x9f"
+                       "\x6c\x4a\x9a\x6f\x63\xa2\x7d\xad\xaf\x3a\x24\xa0"
+                       "\xd9\x9f\x07\xeb\x15\xee\x26\xe0\xd5\x63\x39\xda"
+                       "\x3c\x59\xd6\x33\x6c\x02\xe8\x05\x71\x46\x68\x44"
+                       "\x63\x4a\x68\x72\xe9\xf5\x55\xfe",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\x15\x20\x2f\xf6\x98\x28\x63\xa2\xc4\x4e\xbb\x6c"
+                       "\xb2\x25\x92\x61\x79\xc9\x22\xc4\x61\x54\x96\xff"
+                       "\x4a\x85\xca\x80\xfe\x0d\x1c\xd0",
+               .addtlb = (unsigned char *)
+                       "\xde\x29\x8e\x03\x42\x61\xa3\x28\x5e\xc8\x80\xc2"
+                       "\x6d\xbf\xad\x13\xe1\x8d\x2a\xc7\xe8\xc7\x18\x89"
+                       "\x42\x58\x9e\xd6\xcc\xad\x7b\x1e",
+               .addtllen = 32,
+               .pers = (unsigned char *)
+                       "\x84\xc3\x73\x9e\xce\xb3\xbc\x89\xf7\x62\xb3\xe1"
+                       "\xd7\x48\x45\x8a\xa9\xcc\xe9\xed\xd5\x81\x84\x52"
+                       "\x82\x4c\xdc\x19\xb8\xf8\x92\x5c",
+               .perslen = 32,
+       },
+};
+
+static struct drbg_testvec drbg_pr_hmac_sha256_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\x99\x69\xe5\x4b\x47\x03\xff\x31\x78\x5b\x87\x9a"
+                       "\x7e\x5c\x0e\xae\x0d\x3e\x30\x95\x59\xe9\xfe\x96"
+                       "\xb0\x67\x6d\x49\xd5\x91\xea\x4d\x07\xd2\x0d\x46"
+                       "\xd0\x64\x75\x7d\x30\x23\xca\xc2\x37\x61\x27\xab",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\xc6\x0f\x29\x99\x10\x0f\x73\x8c\x10\xf7\x47\x92"
+                       "\x67\x6a\x3f\xc4\xa2\x62\xd1\x37\x21\x79\x80\x46"
+                       "\xe2\x9a\x29\x51\x81\x56\x9f\x54",
+               .entprb = (unsigned char *)
+                       "\xc1\x1d\x45\x24\xc9\x07\x1b\xd3\x09\x60\x15\xfc"
+                       "\xf7\xbc\x24\xa6\x07\xf2\x2f\xa0\x65\xc9\x37\x65"
+                       "\x8a\x2a\x77\xa8\x69\x90\x89\xf4",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\xab\xc0\x15\x85\x60\x94\x80\x3a\x93\x8d\xff\xd2"
+                       "\x0d\xa9\x48\x43\x87\x0e\xf9\x35\xb8\x2c\xfe\xc1"
+                       "\x77\x06\xb8\xf5\x51\xb8\x38\x50\x44\x23\x5d\xd4"
+                       "\x4b\x59\x9f\x94\xb3\x9b\xe7\x8d\xd4\x76\xe0\xcf"
+                       "\x11\x30\x9c\x99\x5a\x73\x34\xe0\xa7\x8b\x37\xbc"
+                       "\x95\x86\x23\x50\x86\xfa\x3b\x63\x7b\xa9\x1c\xf8"
+                       "\xfb\x65\xef\xa2\x2a\x58\x9c\x13\x75\x31\xaa\x7b"
+                       "\x2d\x4e\x26\x07\xaa\xc2\x72\x92\xb0\x1c\x69\x8e"
+                       "\x6e\x01\xae\x67\x9e\xb8\x7c\x01\xa8\x9c\x74\x22"
+                       "\xd4\x37\x2d\x6d\x75\x4a\xba\xbb\x4b\xf8\x96\xfc"
+                       "\xb1\xcd\x09\xd6\x92\xd0\x28\x3f",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xb9\x1f\xe9\xef\xdd\x9b\x7d\x20\xb6\xec\xe0\x2f"
+                       "\xdb\x76\x24\xce\x41\xc8\x3a\x4a\x12\x7f\x3e\x2f"
+                       "\xae\x05\x99\xea\xb5\x06\x71\x0d\x0c\x4c\xb4\x05"
+                       "\x26\xc6\xbd\xf5\x7f\x2a\x3d\xf2\xb5\x49\x7b\xda",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\xef\x67\x50\x9c\xa7\x7d\xdf\xb7\x2d\x81\x01\xa4"
+                       "\x62\x81\x6a\x69\x5b\xb3\x37\x45\xa7\x34\x8e\x26"
+                       "\x46\xd9\x26\xa2\x19\xd4\x94\x43",
+               .entprb = (unsigned char *)
+                       "\x97\x75\x53\x53\xba\xb4\xa6\xb2\x91\x60\x71\x79"
+                       "\xd1\x6b\x4a\x24\x9a\x34\x66\xcc\x33\xab\x07\x98"
+                       "\x51\x78\x72\xb2\x79\xfd\x2c\xff",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\x9c\xdc\x63\x8a\x19\x23\x22\x66\x0c\xc5\xb9\xd7"
+                       "\xfb\x2a\xb0\x31\xe3\x8a\x36\xa8\x5a\xa8\x14\xda"
+                       "\x1e\xa9\xcc\xfe\xb8\x26\x44\x83\x9f\xf6\xff\xaa"
+                       "\xc8\x98\xb8\x30\x35\x3b\x3d\x36\xd2\x49\xd4\x40"
+                       "\x62\x0a\x65\x10\x76\x55\xef\xc0\x95\x9c\xa7\xda"
+                       "\x3f\xcf\xb7\x7b\xc6\xe1\x28\x52\xfc\x0c\xe2\x37"
+                       "\x0d\x83\xa7\x51\x4b\x31\x47\x3c\xe1\x3c\xae\x70"
+                       "\x01\xc8\xa3\xd3\xc2\xac\x77\x9c\xd1\x68\x77\x9b"
+                       "\x58\x27\x3b\xa5\x0f\xc2\x7a\x8b\x04\x65\x62\xd5"
+                       "\xe8\xd6\xfe\x2a\xaf\xd3\xd3\xfe\xbd\x18\xfb\xcd"
+                       "\xcd\x66\xb5\x01\x69\x66\xa0\x3c",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\x17\xc1\x56\xcb\xcc\x50\xd6\x03\x7d\x45\x76\xa3"
+                       "\x75\x76\xc1\x4a\x66\x1b\x2e\xdf\xb0\x2e\x7d\x56"
+                       "\x6d\x99\x3b\xc6\x58\xda\x03\xf6",
+               .addtlb = (unsigned char *)
+                       "\x7c\x7b\x4a\x4b\x32\x5e\x6f\x67\x34\xf5\x21\x4c"
+                       "\xf9\x96\xf9\xbf\x1c\x8c\x81\xd3\x9b\x60\x6a\x44"
+                       "\xc6\x03\xa2\xfb\x13\x20\x19\xb7",
+               .addtllen = 32,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x13\x54\x96\xfc\x1b\x7d\x28\xf3\x18\xc9\xa7\x89"
+                       "\xb6\xb3\xc8\x72\xac\x00\xd4\x59\x36\x25\x05\xaf"
+                       "\xa5\xdb\x96\xcb\x3c\x58\x46\x87\xa5\xaa\xbf\x20"
+                       "\x3b\xfe\x23\x0e\xd1\xc7\x41\x0f\x3f\xc9\xb3\x67",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\xe2\xbd\xb7\x48\x08\x06\xf3\xe1\x93\x3c\xac\x79"
+                       "\xa7\x2b\x11\xda\xe3\x2e\xe1\x91\xa5\x02\x19\x57"
+                       "\x20\x28\xad\xf2\x60\xd7\xcd\x45",
+               .entprb = (unsigned char *)
+                       "\x8b\xd4\x69\xfc\xff\x59\x95\x95\xc6\x51\xde\x71"
+                       "\x68\x5f\xfc\xf9\x4a\xab\xec\x5a\xcb\xbe\xd3\x66"
+                       "\x1f\xfa\x74\xd3\xac\xa6\x74\x60",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\x1f\x9e\xaf\xe4\xd2\x46\xb7\x47\x41\x4c\x65\x99"
+                       "\x01\xe9\x3b\xbb\x83\x0c\x0a\xb0\xc1\x3a\xe2\xb3"
+                       "\x31\x4e\xeb\x93\x73\xee\x0b\x26\xc2\x63\xa5\x75"
+                       "\x45\x99\xd4\x5c\x9f\xa1\xd4\x45\x87\x6b\x20\x61"
+                       "\x40\xea\x78\xa5\x32\xdf\x9e\x66\x17\xaf\xb1\x88"
+                       "\x9e\x2e\x23\xdd\xc1\xda\x13\x97\x88\xa5\xb6\x5e"
+                       "\x90\x14\x4e\xef\x13\xab\x5c\xd9\x2c\x97\x9e\x7c"
+                       "\xd7\xf8\xce\xea\x81\xf5\xcd\x71\x15\x49\x44\xce"
+                       "\x83\xb6\x05\xfb\x7d\x30\xb5\x57\x2c\x31\x4f\xfc"
+                       "\xfe\x80\xb6\xc0\x13\x0c\x5b\x9b\x2e\x8f\x3d\xfc"
+                       "\xc2\xa3\x0c\x11\x1b\x80\x5f\xf3",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = (unsigned char *)
+                       "\x64\xb6\xfc\x60\xbc\x61\x76\x23\x6d\x3f\x4a\x0f"
+                       "\xe1\xb4\xd5\x20\x9e\x70\xdd\x03\x53\x6d\xbf\xce"
+                       "\xcd\x56\x80\xbc\xb8\x15\xc8\xaa",
+               .perslen = 32,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xc7\xcc\xbc\x67\x7e\x21\x66\x1e\x27\x2b\x63\xdd"
+                       "\x3a\x78\xdc\xdf\x66\x6d\x3f\x24\xae\xcf\x37\x01"
+                       "\xa9\x0d\x89\x8a\xa7\xdc\x81\x58\xae\xb2\x10\x15"
+                       "\x7e\x18\x44\x6d\x13\xea\xdf\x37\x85\xfe\x81\xfb",
+               .entropylen = 48,
+               .entpra = (unsigned char *)
+                       "\x7b\xa1\x91\x5b\x3c\x04\xc4\x1b\x1d\x19\x2f\x1a"
+                       "\x18\x81\x60\x3c\x6c\x62\x91\xb7\xe9\xf5\xcb\x96"
+                       "\xbb\x81\x6a\xcc\xb5\xae\x55\xb6",
+               .entprb = (unsigned char *)
+                       "\x99\x2c\xc7\x78\x7e\x3b\x88\x12\xef\xbe\xd3\xd2"
+                       "\x7d\x2a\xa5\x86\xda\x8d\x58\x73\x4a\x0a\xb2\x2e"
+                       "\xbb\x4c\x7e\xe3\x9a\xb6\x81\xc1",
+               .entprlen = 32,
+               .expected = (unsigned char *)
+                       "\x95\x6f\x95\xfc\x3b\xb7\xfe\x3e\xd0\x4e\x1a\x14"
+                       "\x6c\x34\x7f\x7b\x1d\x0d\x63\x5e\x48\x9c\x69\xe6"
+                       "\x46\x07\xd2\x87\xf3\x86\x52\x3d\x98\x27\x5e\xd7"
+                       "\x54\xe7\x75\x50\x4f\xfb\x4d\xfd\xac\x2f\x4b\x77"
+                       "\xcf\x9e\x8e\xcc\x16\xa2\x24\xcd\x53\xde\x3e\xc5"
+                       "\x55\x5d\xd5\x26\x3f\x89\xdf\xca\x8b\x4e\x1e\xb6"
+                       "\x88\x78\x63\x5c\xa2\x63\x98\x4e\x6f\x25\x59\xb1"
+                       "\x5f\x2b\x23\xb0\x4b\xa5\x18\x5d\xc2\x15\x74\x40"
+                       "\x59\x4c\xb4\x1e\xcf\x9a\x36\xfd\x43\xe2\x03\xb8"
+                       "\x59\x91\x30\x89\x2a\xc8\x5a\x43\x23\x7c\x73\x72"
+                       "\xda\x3f\xad\x2b\xba\x00\x6b\xd1",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\x18\xe8\x17\xff\xef\x39\xc7\x41\x5c\x73\x03\x03"
+                       "\xf6\x3d\xe8\x5f\xc8\xab\xe4\xab\x0f\xad\xe8\xd6"
+                       "\x86\x88\x55\x28\xc1\x69\xdd\x76",
+               .addtlb = (unsigned char *)
+                       "\xac\x07\xfc\xbe\x87\x0e\xd3\xea\x1f\x7e\xb8\xe7"
+                       "\x9d\xec\xe8\xe7\xbc\xf3\x18\x25\x77\x35\x4a\xaa"
+                       "\x00\x99\x2a\xdd\x0a\x00\x50\x82",
+               .addtllen = 32,
+               .pers = (unsigned char *)
+                       "\xbc\x55\xab\x3c\xf6\x52\xb0\x11\x3d\x7b\x90\xb8"
+                       "\x24\xc9\x26\x4e\x5a\x1e\x77\x0d\x3d\x58\x4a\xda"
+                       "\xd1\x81\xe9\xf8\xeb\x30\x8f\x6f",
+               .perslen = 32,
+       },
+};
+
+static struct drbg_testvec drbg_pr_ctr_aes128_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\xd1\x44\xc6\x61\x81\x6d\xca\x9d\x15\x28\x8a\x42"
+                       "\x94\xd7\x28\x9c\x43\x77\x19\x29\x1a\x6d\xc3\xa2",
+               .entropylen = 24,
+               .entpra = (unsigned char *)
+                       "\x96\xd8\x9e\x45\x32\xc9\xd2\x08\x7a\x6d\x97\x15"
+                       "\xb4\xec\x80\xb1",
+               .entprb = (unsigned char *)
+                       "\x8b\xb6\x72\xb5\x24\x0b\x98\x65\x95\x95\xe9\xc9"
+                       "\x28\x07\xeb\xc2",
+               .entprlen = 16,
+               .expected = (unsigned char *)
+                       "\x70\x19\xd0\x4c\x45\x78\xd6\x68\xa9\x9a\xaa\xfe"
+                       "\xc1\xdf\x27\x9a\x1c\x0d\x0d\xf7\x24\x75\x46\xcc"
+                       "\x77\x6b\xdf\x89\xc6\x94\xdc\x74\x50\x10\x70\x18"
+                       "\x9b\xdc\x96\xb4\x89\x23\x40\x1a\xce\x09\x87\xce"
+                       "\xd2\xf3\xd5\xe4\x51\x67\x74\x11\x5a\xcc\x8b\x3b"
+                       "\x8a\xf1\x23\xa8",
+               .expectedlen = 64,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x8e\x83\xe0\xeb\x37\xea\x3e\x53\x5e\x17\x6e\x77"
+                       "\xbd\xb1\x53\x90\xfc\xdc\xc1\x3c\x9a\x88\x22\x94",
+               .entropylen = 24,
+               .entpra = (unsigned char *)
+                       "\x6a\x85\xe7\x37\xc8\xf1\x04\x31\x98\x4f\xc8\x73"
+                       "\x67\xd1\x08\xf8",
+               .entprb = (unsigned char *)
+                       "\xd7\xa4\x68\xe2\x12\x74\xc3\xd9\xf1\xb7\x05\xbc"
+                       "\xd4\xba\x04\x58",
+               .entprlen = 16,
+               .expected = (unsigned char *)
+                       "\x78\xd6\xa6\x70\xff\xd1\x82\xf5\xa2\x88\x7f\x6d"
+                       "\x3d\x8c\x39\xb1\xa8\xcb\x2c\x91\xab\x14\x7e\xbc"
+                       "\x95\x45\x9f\x24\xb8\x20\xac\x21\x23\xdb\x72\xd7"
+                       "\x12\x8d\x48\x95\xf3\x19\x0c\x43\xc6\x19\x45\xfc"
+                       "\x8b\xac\x40\x29\x73\x00\x03\x45\x5e\x12\xff\x0c"
+                       "\xc1\x02\x41\x82",
+               .expectedlen = 64,
+               .addtla = (unsigned char *)
+                       "\xa2\xd9\x38\xcf\x8b\x29\x67\x5b\x65\x62\x6f\xe8"
+                       "\xeb\xb3\x01\x76",
+               .addtlb = (unsigned char *)
+                       "\x59\x63\x1e\x81\x8a\x14\xa8\xbb\xa1\xb8\x41\x25"
+                       "\xd0\x7f\xcc\x43",
+               .addtllen = 16,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x04\xd9\x49\xa6\xdc\xe8\x6e\xbb\xf1\x08\x77\x2b"
+                       "\x9e\x08\xca\x92\x65\x16\xda\x99\xa2\x59\xf3\xe8",
+               .entropylen = 24,
+               .entpra = (unsigned char *)
+                       "\x38\x7e\x3f\x6b\x51\x70\x7b\x20\xec\x53\xd0\x66"
+                       "\xc3\x0f\xe3\xb0",
+               .entprb = (unsigned char *)
+                       "\xe0\x86\xa6\xaa\x5f\x72\x2f\xad\xf7\xef\x06\xb8"
+                       "\xd6\x9c\x9d\xe8",
+               .entprlen = 16,
+               .expected = (unsigned char *)
+                       "\xc9\x0a\xaf\x85\x89\x71\x44\x66\x4f\x25\x0b\x2b"
+                       "\xde\xd8\xfa\xff\x52\x5a\x1b\x32\x5e\x41\x7a\x10"
+                       "\x1f\xef\x1e\x62\x23\xe9\x20\x30\xc9\x0d\xad\x69"
+                       "\xb4\x9c\x5b\xf4\x87\x42\xd5\xae\x5e\x5e\x43\xcc"
+                       "\xd9\xfd\x0b\x93\x4a\xe3\xd4\x06\x37\x36\x0f\x3f"
+                       "\x72\x82\x0c\xcf",
+               .expectedlen = 64,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = (unsigned char *)
+                       "\xbf\xa4\x9a\x8f\x7b\xd8\xb1\x7a\x9d\xfa\x45\xed"
+                       "\x21\x52\xb3\xad",
+               .perslen = 16,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x92\x89\x8f\x31\xfa\x1c\xff\x6d\x18\x2f\x26\x06"
+                       "\x43\xdf\xf8\x18\xc2\xa4\xd9\x72\xc3\xb9\xb6\x97",
+               .entropylen = 24,
+               .entpra = (unsigned char *)
+                       "\x20\x72\x8a\x06\xf8\x6f\x8d\xd4\x41\xe2\x72\xb7"
+                       "\xc4\x2c\xe8\x10",
+               .entprb = (unsigned char *)
+                       "\x3d\xb0\xf0\x94\xf3\x05\x50\x33\x17\x86\x3e\x22"
+                       "\x08\xf7\xa5\x01",
+               .entprlen = 16,
+               .expected = (unsigned char *)
+                       "\x5a\x35\x39\x87\x0f\x4d\x22\xa4\x09\x24\xee\x71"
+                       "\xc9\x6f\xac\x72\x0a\xd6\xf0\x88\x82\xd0\x83\x28"
+                       "\x73\xec\x3f\x93\xd8\xab\x45\x23\xf0\x7e\xac\x45"
+                       "\x14\x5e\x93\x9f\xb1\xd6\x76\x43\x3d\xb6\xe8\x08"
+                       "\x88\xf6\xda\x89\x08\x77\x42\xfe\x1a\xf4\x3f\xc4"
+                       "\x23\xc5\x1f\x68",
+               .expectedlen = 64,
+               .addtla = (unsigned char *)
+                       "\x1a\x40\xfa\xe3\xcc\x6c\x7c\xa0\xf8\xda\xba\x59"
+                       "\x23\x6d\xad\x1d",
+               .addtlb = (unsigned char *)
+                       "\x9f\x72\x76\x6c\xc7\x46\xe5\xed\x2e\x53\x20\x12"
+                       "\xbc\x59\x31\x8c",
+               .addtllen = 16,
+               .pers = (unsigned char *)
+                       "\xea\x65\xee\x60\x26\x4e\x7e\xb6\x0e\x82\x68\xc4"
+                       "\x37\x3c\x5c\x0b",
+               .perslen = 16,
+       },
+};
+
+/*
+ * SP800-90A DRBG Test vectors from
+ * http://csrc.nist.gov/groups/STM/cavp/documents/drbg/drbgtestvectors.zip
+ *
+ * Test vectors for DRBG without prediction resistance. All types of DRBGs
+ * (Hash, HMAC, CTR) are tested with all permutations of use cases (w/ and
+ * w/o personalization string, w/ and w/o additional input string).
+ */
+static struct drbg_testvec drbg_nopr_sha256_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\xa6\x5a\xd0\xf3\x45\xdb\x4e\x0e\xff\xe8\x75\xc3"
+                       "\xa2\xe7\x1f\x42\xc7\x12\x9d\x62\x0f\xf5\xc1\x19"
+                       "\xa9\xef\x55\xf0\x51\x85\xe0\xfb\x85\x81\xf9\x31"
+                       "\x75\x17\x27\x6e\x06\xe9\x60\x7d\xdb\xcb\xcc\x2e",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\xd3\xe1\x60\xc3\x5b\x99\xf3\x40\xb2\x62\x82\x64"
+                       "\xd1\x75\x10\x60\xe0\x04\x5d\xa3\x83\xff\x57\xa5"
+                       "\x7d\x73\xa6\x73\xd2\xb8\xd8\x0d\xaa\xf6\xa6\xc3"
+                       "\x5a\x91\xbb\x45\x79\xd7\x3f\xd0\xc8\xfe\xd1\x11"
+                       "\xb0\x39\x13\x06\x82\x8a\xdf\xed\x52\x8f\x01\x81"
+                       "\x21\xb3\xfe\xbd\xc3\x43\xe7\x97\xb8\x7d\xbb\x63"
+                       "\xdb\x13\x33\xde\xd9\xd1\xec\xe1\x77\xcf\xa6\xb7"
+                       "\x1f\xe8\xab\x1d\xa4\x66\x24\xed\x64\x15\xe5\x1c"
+                       "\xcd\xe2\xc7\xca\x86\xe2\x83\x99\x0e\xea\xeb\x91"
+                       "\x12\x04\x15\x52\x8b\x22\x95\x91\x02\x81\xb0\x2d"
+                       "\xd4\x31\xf4\xc9\xf7\x04\x27\xdf",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x73\xd3\xfb\xa3\x94\x5f\x2b\x5f\xb9\x8f\xf6\x9c"
+                       "\x8a\x93\x17\xae\x19\xc3\x4c\xc3\xd6\xca\xa3\x2d"
+                       "\x16\xfc\x42\xd2\x2d\xd5\x6f\x56\xcc\x1d\x30\xff"
+                       "\x9e\x06\x3e\x09\xce\x58\xe6\x9a\x35\xb3\xa6\x56",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\x71\x7b\x93\x46\x1a\x40\xaa\x35\xa4\xaa\xc5\xe7"
+                       "\x6d\x5b\x5b\x8a\xa0\xdf\x39\x7d\xae\x71\x58\x5b"
+                       "\x3c\x7c\xb4\xf0\x89\xfa\x4a\x8c\xa9\x5c\x54\xc0"
+                       "\x40\xdf\xbc\xce\x26\x81\x34\xf8\xba\x7d\x1c\xe8"
+                       "\xad\x21\xe0\x74\xcf\x48\x84\x30\x1f\xa1\xd5\x4f"
+                       "\x81\x42\x2f\xf4\xdb\x0b\x23\xf8\x73\x27\xb8\x1d"
+                       "\x42\xf8\x44\x58\xd8\x5b\x29\x27\x0a\xf8\x69\x59"
+                       "\xb5\x78\x44\xeb\x9e\xe0\x68\x6f\x42\x9a\xb0\x5b"
+                       "\xe0\x4e\xcb\x6a\xaa\xe2\xd2\xd5\x33\x25\x3e\xe0"
+                       "\x6c\xc7\x6a\x07\xa5\x03\x83\x9f\xe2\x8b\xd1\x1c"
+                       "\x70\xa8\x07\x59\x97\xeb\xf6\xbe",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\xf4\xd5\x98\x3d\xa8\xfc\xfa\x37\xb7\x54\x67\x73"
+                       "\xc7\xc3\xdd\x47\x34\x71\x02\x5d\xc1\xa0\xd3\x10"
+                       "\xc1\x8b\xbd\xf5\x66\x34\x6f\xdd",
+               .addtlb = (unsigned char *)
+                       "\xf7\x9e\x6a\x56\x0e\x73\xe9\xd9\x7a\xd1\x69\xe0"
+                       "\x6f\x8c\x55\x1c\x44\xd1\xce\x6f\x28\xcc\xa4\x4d"
+                       "\xa8\xc0\x85\xd1\x5a\x0c\x59\x40",
+               .addtllen = 32,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x2a\x85\xa9\x8b\xd0\xda\x83\xd6\xad\xab\x9f\xbb"
+                       "\x54\x31\x15\x95\x1c\x4d\x49\x9f\x6a\x15\xf6\xe4"
+                       "\x15\x50\x88\x06\x29\x0d\xed\x8d\xb9\x6f\x96\xe1"
+                       "\x83\x9f\xf7\x88\xda\x84\xbf\x44\x28\xd9\x1d\xaa",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\x2d\x55\xde\xc9\xed\x05\x47\x07\x3d\x04\xfc\x28"
+                       "\x0f\x92\xf0\x4d\xd8\x00\x32\x47\x0a\x1b\x1c\x4b"
+                       "\xef\xd9\x97\xa1\x17\x67\xda\x26\x6c\xfe\x76\x46"
+                       "\x6f\xbc\x6d\x82\x4e\x83\x8a\x98\x66\x6c\x01\xb6"
+                       "\xe6\x64\xe0\x08\x10\x6f\xd3\x5d\x90\xe7\x0d\x72"
+                       "\xa6\xa7\xe3\xbb\x98\x11\x12\x56\x23\xc2\x6d\xd1"
+                       "\xc8\xa8\x7a\x39\xf3\x34\xe3\xb8\xf8\x66\x00\x77"
+                       "\x7d\xcf\x3c\x3e\xfa\xc9\x0f\xaf\xe0\x24\xfa\xe9"
+                       "\x84\xf9\x6a\x01\xf6\x35\xdb\x5c\xab\x2a\xef\x4e"
+                       "\xac\xab\x55\xb8\x9b\xef\x98\x68\xaf\x51\xd8\x16"
+                       "\xa5\x5e\xae\xf9\x1e\xd2\xdb\xe6",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = (unsigned char *)
+                       "\xa8\x80\xec\x98\x30\x98\x15\xd2\xc6\xc4\x68\xf1"
+                       "\x3a\x1c\xbf\xce\x6a\x40\x14\xeb\x36\x99\x53\xda"
+                       "\x57\x6b\xce\xa4\x1c\x66\x3d\xbc",
+               .perslen = 32,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x69\xed\x82\xa9\xc5\x7b\xbf\xe5\x1d\x2f\xcb\x7a"
+                       "\xd3\x50\x7d\x96\xb4\xb9\x2b\x50\x77\x51\x27\x74"
+                       "\x33\x74\xba\xf1\x30\xdf\x8e\xdf\x87\x1d\x87\xbc"
+                       "\x96\xb2\xc3\xa7\xed\x60\x5e\x61\x4e\x51\x29\x1a",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\xa5\x71\x24\x31\x11\xfe\x13\xe1\xa8\x24\x12\xfb"
+                       "\x37\xa1\x27\xa5\xab\x77\xa1\x9f\xae\x8f\xaf\x13"
+                       "\x93\xf7\x53\x85\x91\xb6\x1b\xab\xd4\x6b\xea\xb6"
+                       "\xef\xda\x4c\x90\x6e\xef\x5f\xde\xe1\xc7\x10\x36"
+                       "\xd5\x67\xbd\x14\xb6\x89\x21\x0c\xc9\x92\x65\x64"
+                       "\xd0\xf3\x23\xe0\x7f\xd1\xe8\x75\xc2\x85\x06\xea"
+                       "\xca\xc0\xcb\x79\x2d\x29\x82\xfc\xaa\x9a\xc6\x95"
+                       "\x7e\xdc\x88\x65\xba\xec\x0e\x16\x87\xec\xa3\x9e"
+                       "\xd8\x8c\x80\xab\x3a\x64\xe0\xcb\x0e\x45\x98\xdd"
+                       "\x7c\x6c\x6c\x26\x11\x13\xc8\xce\xa9\x47\xa6\x06"
+                       "\x57\xa2\x66\xbb\x2d\x7f\xf3\xc1",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\x74\xd3\x6d\xda\xe8\xd6\x86\x5f\x63\x01\xfd\xf2"
+                       "\x7d\x06\x29\x6d\x94\xd1\x66\xf0\xd2\x72\x67\x4e"
+                       "\x77\xc5\x3d\x9e\x03\xe3\xa5\x78",
+               .addtlb = (unsigned char *)
+                       "\xf6\xb6\x3d\xf0\x7c\x26\x04\xc5\x8b\xcd\x3e\x6a"
+                       "\x9f\x9c\x3a\x2e\xdb\x47\x87\xe5\x8e\x00\x5e\x2b"
+                       "\x74\x7f\xa6\xf6\x80\xcd\x9b\x21",
+               .addtllen = 32,
+               .pers = (unsigned char *)
+                       "\x74\xa6\xe0\x08\xf9\x27\xee\x1d\x6e\x3c\x28\x20"
+                       "\x87\xdd\xd7\x54\x31\x47\x78\x4b\xe5\x6d\xa3\x73"
+                       "\xa9\x65\xb1\x10\xc1\xdc\x77\x7c",
+               .perslen = 32,
+       },
+};
+
+static struct drbg_testvec drbg_nopr_hmac_sha256_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\xca\x85\x19\x11\x34\x93\x84\xbf\xfe\x89\xde\x1c"
+                       "\xbd\xc4\x6e\x68\x31\xe4\x4d\x34\xa4\xfb\x93\x5e"
+                       "\xe2\x85\xdd\x14\xb7\x1a\x74\x88\x65\x9b\xa9\x6c"
+                       "\x60\x1d\xc6\x9f\xc9\x02\x94\x08\x05\xec\x0c\xa8",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\xe5\x28\xe9\xab\xf2\xde\xce\x54\xd4\x7c\x7e\x75"
+                       "\xe5\xfe\x30\x21\x49\xf8\x17\xea\x9f\xb4\xbe\xe6"
+                       "\xf4\x19\x96\x97\xd0\x4d\x5b\x89\xd5\x4f\xbb\x97"
+                       "\x8a\x15\xb5\xc4\x43\xc9\xec\x21\x03\x6d\x24\x60"
+                       "\xb6\xf7\x3e\xba\xd0\xdc\x2a\xba\x6e\x62\x4a\xbf"
+                       "\x07\x74\x5b\xc1\x07\x69\x4b\xb7\x54\x7b\xb0\x99"
+                       "\x5f\x70\xde\x25\xd6\xb2\x9e\x2d\x30\x11\xbb\x19"
+                       "\xd2\x76\x76\xc0\x71\x62\xc8\xb5\xcc\xde\x06\x68"
+                       "\x96\x1d\xf8\x68\x03\x48\x2c\xb3\x7e\xd6\xd5\xc0"
+                       "\xbb\x8d\x50\xcf\x1f\x50\xd4\x76\xaa\x04\x58\xbd"
+                       "\xab\xa8\x06\xf4\x8b\xe9\xdc\xb8",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xf9\x7a\x3c\xfd\x91\xfa\xa0\x46\xb9\xe6\x1b\x94"
+                       "\x93\xd4\x36\xc4\x93\x1f\x60\x4b\x22\xf1\x08\x15"
+                       "\x21\xb3\x41\x91\x51\xe8\xff\x06\x11\xf3\xa7\xd4"
+                       "\x35\x95\x35\x7d\x58\x12\x0b\xd1\xe2\xdd\x8a\xed",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\xc6\x87\x1c\xff\x08\x24\xfe\x55\xea\x76\x89\xa5"
+                       "\x22\x29\x88\x67\x30\x45\x0e\x5d\x36\x2d\xa5\xbf"
+                       "\x59\x0d\xcf\x9a\xcd\x67\xfe\xd4\xcb\x32\x10\x7d"
+                       "\xf5\xd0\x39\x69\xa6\x6b\x1f\x64\x94\xfd\xf5\xd6"
+                       "\x3d\x5b\x4d\x0d\x34\xea\x73\x99\xa0\x7d\x01\x16"
+                       "\x12\x6d\x0d\x51\x8c\x7c\x55\xba\x46\xe1\x2f\x62"
+                       "\xef\xc8\xfe\x28\xa5\x1c\x9d\x42\x8e\x6d\x37\x1d"
+                       "\x73\x97\xab\x31\x9f\xc7\x3d\xed\x47\x22\xe5\xb4"
+                       "\xf3\x00\x04\x03\x2a\x61\x28\xdf\x5e\x74\x97\xec"
+                       "\xf8\x2c\xa7\xb0\xa5\x0e\x86\x7e\xf6\x72\x8a\x4f"
+                       "\x50\x9a\x8c\x85\x90\x87\x03\x9c",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\x51\x72\x89\xaf\xe4\x44\xa0\xfe\x5e\xd1\xa4\x1d"
+                       "\xbb\xb5\xeb\x17\x15\x00\x79\xbd\xd3\x1e\x29\xcf"
+                       "\x2f\xf3\x00\x34\xd8\x26\x8e\x3b",
+               .addtlb = (unsigned char *)
+                       "\x88\x02\x8d\x29\xef\x80\xb4\xe6\xf0\xfe\x12\xf9"
+                       "\x1d\x74\x49\xfe\x75\x06\x26\x82\xe8\x9c\x57\x14"
+                       "\x40\xc0\xc9\xb5\x2c\x42\xa6\xe0",
+               .addtllen = 32,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x8d\xf0\x13\xb4\xd1\x03\x52\x30\x73\x91\x7d\xdf"
+                       "\x6a\x86\x97\x93\x05\x9e\x99\x43\xfc\x86\x54\x54"
+                       "\x9e\x7a\xb2\x2f\x7c\x29\xf1\x22\xda\x26\x25\xaf"
+                       "\x2d\xdd\x4a\xbc\xce\x3c\xf4\xfa\x46\x59\xd8\x4e",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\xb9\x1c\xba\x4c\xc8\x4f\xa2\x5d\xf8\x61\x0b\x81"
+                       "\xb6\x41\x40\x27\x68\xa2\x09\x72\x34\x93\x2e\x37"
+                       "\xd5\x90\xb1\x15\x4c\xbd\x23\xf9\x74\x52\xe3\x10"
+                       "\xe2\x91\xc4\x51\x46\x14\x7f\x0d\xa2\xd8\x17\x61"
+                       "\xfe\x90\xfb\xa6\x4f\x94\x41\x9c\x0f\x66\x2b\x28"
+                       "\xc1\xed\x94\xda\x48\x7b\xb7\xe7\x3e\xec\x79\x8f"
+                       "\xbc\xf9\x81\xb7\x91\xd1\xbe\x4f\x17\x7a\x89\x07"
+                       "\xaa\x3c\x40\x16\x43\xa5\xb6\x2b\x87\xb8\x9d\x66"
+                       "\xb3\xa6\x0e\x40\xd4\xa8\xe4\xe9\xd8\x2a\xf6\xd2"
+                       "\x70\x0e\x6f\x53\x5c\xdb\x51\xf7\x5c\x32\x17\x29"
+                       "\x10\x37\x41\x03\x0c\xcc\x3a\x56",
+               .expectedlen = 128,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = (unsigned char *)
+                       "\xb5\x71\xe6\x6d\x7c\x33\x8b\xc0\x7b\x76\xad\x37"
+                       "\x57\xbb\x2f\x94\x52\xbf\x7e\x07\x43\x7a\xe8\x58"
+                       "\x1c\xe7\xbc\x7c\x3a\xc6\x51\xa9",
+               .perslen = 32,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xc2\xa5\x66\xa9\xa1\x81\x7b\x15\xc5\xc3\xb7\x78"
+                       "\x17\x7a\xc8\x7c\x24\xe7\x97\xbe\x0a\x84\x5f\x11"
+                       "\xc2\xfe\x39\x9d\xd3\x77\x32\xf2\xcb\x18\x94\xeb"
+                       "\x2b\x97\xb3\xc5\x6e\x62\x83\x29\x51\x6f\x86\xec",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\xb3\xa3\x69\x8d\x77\x76\x99\xa0\xdd\x9f\xa3\xf0"
+                       "\xa9\xfa\x57\x83\x2d\x3c\xef\xac\x5d\xf2\x44\x37"
+                       "\xc6\xd7\x3a\x0f\xe4\x10\x40\xf1\x72\x90\x38\xae"
+                       "\xf1\xe9\x26\x35\x2e\xa5\x9d\xe1\x20\xbf\xb7\xb0"
+                       "\x73\x18\x3a\x34\x10\x6e\xfe\xd6\x27\x8f\xf8\xad"
+                       "\x84\x4b\xa0\x44\x81\x15\xdf\xdd\xf3\x31\x9a\x82"
+                       "\xde\x6b\xb1\x1d\x80\xbd\x87\x1a\x9a\xcd\x35\xc7"
+                       "\x36\x45\xe1\x27\x0f\xb9\xfe\x4f\xa8\x8e\xc0\xe4"
+                       "\x65\x40\x9e\xa0\xcb\xa8\x09\xfe\x2f\x45\xe0\x49"
+                       "\x43\xa2\xe3\x96\xbb\xb7\xdd\x2f\x4e\x07\x95\x30"
+                       "\x35\x24\xcc\x9c\xc5\xea\x54\xa1",
+               .expectedlen = 128,
+               .addtla = (unsigned char *)
+                       "\x41\x3d\xd8\x3f\xe5\x68\x35\xab\xd4\x78\xcb\x96"
+                       "\x93\xd6\x76\x35\x90\x1c\x40\x23\x9a\x26\x64\x62"
+                       "\xd3\x13\x3b\x83\xe4\x9c\x82\x0b",
+               .addtlb = (unsigned char *)
+                       "\xd5\xc4\xa7\x1f\x9d\x6d\x95\xa1\xbe\xdf\x0b\xd2"
+                       "\x24\x7c\x27\x7d\x1f\x84\xa4\xe5\x7a\x4a\x88\x25"
+                       "\xb8\x2a\x2d\x09\x7d\xe6\x3e\xf1",
+               .addtllen = 32,
+               .pers = (unsigned char *)
+                       "\x13\xce\x4d\x8d\xd2\xdb\x97\x96\xf9\x41\x56\xc8"
+                       "\xe8\xf0\x76\x9b\x0a\xa1\xc8\x2c\x13\x23\xb6\x15"
+                       "\x36\x60\x3b\xca\x37\xc9\xee\x29",
+               .perslen = 32,
+       },
+};
+
+static struct drbg_testvec drbg_nopr_ctr_aes192_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\xc3\x5c\x2f\xa2\xa8\x9d\x52\xa1\x1f\xa3\x2a\xa9"
+                       "\x6c\x95\xb8\xf1\xc9\xa8\xf9\xcb\x24\x5a\x8b\x40"
+                       "\xf3\xa6\xe5\xa7\xfb\xd9\xd3\xc6\x8e\x27\x7b\xa9"
+                       "\xac\x9b\xbb\x00",
+               .entropylen = 40,
+               .expected = (unsigned char *)
+                       "\x8c\x2e\x72\xab\xfd\x9b\xb8\x28\x4d\xb7\x9e\x17"
+                       "\xa4\x3a\x31\x46\xcd\x76\x94\xe3\x52\x49\xfc\x33"
+                       "\x83\x91\x4a\x71\x17\xf4\x13\x68\xe6\xd4\xf1\x48"
+                       "\xff\x49\xbf\x29\x07\x6b\x50\x15\xc5\x9f\x45\x79"
+                       "\x45\x66\x2e\x3d\x35\x03\x84\x3f\x4a\xa5\xa3\xdf"
+                       "\x9a\x9d\xf1\x0d",
+               .expectedlen = 64,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       },
+};
+
+static struct drbg_testvec drbg_nopr_ctr_aes256_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\x36\x40\x19\x40\xfa\x8b\x1f\xba\x91\xa1\x66\x1f"
+                       "\x21\x1d\x78\xa0\xb9\x38\x9a\x74\xe5\xbc\xcf\xec"
+                       "\xe8\xd7\x66\xaf\x1a\x6d\x3b\x14\x49\x6f\x25\xb0"
+                       "\xf1\x30\x1b\x4f\x50\x1b\xe3\x03\x80\xa1\x37\xeb",
+               .entropylen = 48,
+               .expected = (unsigned char *)
+                       "\x58\x62\xeb\x38\xbd\x55\x8d\xd9\x78\xa6\x96\xe6"
+                       "\xdf\x16\x47\x82\xdd\xd8\x87\xe7\xe9\xa6\xc9\xf3"
+                       "\xf1\xfb\xaf\xb7\x89\x41\xb5\x35\xa6\x49\x12\xdf"
+                       "\xd2\x24\xc6\xdc\x74\x54\xe5\x25\x0b\x3d\x97\x16"
+                       "\x5e\x16\x26\x0c\x2f\xaf\x1c\xc7\x73\x5c\xb7\x5f"
+                       "\xb4\xf0\x7e\x1d",
+               .expectedlen = 64,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       },
+};
+
+static struct drbg_testvec drbg_nopr_ctr_aes128_tv_template[] = {
+       {
+               .entropy = (unsigned char *)
+                       "\x87\xe1\xc5\x32\x99\x7f\x57\xa3\x5c\x28\x6d\xe8"
+                       "\x64\xbf\xf2\x64\xa3\x9e\x98\xdb\x6c\x10\x78\x7f",
+               .entropylen = 24,
+               .expected = (unsigned char *)
+                       "\x2c\x14\x7e\x24\x11\x9a\xd8\xd4\xb2\xed\x61\xc1"
+                       "\x53\xd0\x50\xc9\x24\xff\x59\x75\x15\xf1\x17\x3a"
+                       "\x3d\xf4\x4b\x2c\x84\x28\xef\x89\x0e\xb9\xde\xf3"
+                       "\xe4\x78\x04\xb2\xfd\x9b\x35\x7f\xe1\x3f\x8a\x3e"
+                       "\x10\xc8\x67\x0a\xf9\xdf\x2d\x6c\x96\xfb\xb2\xb8"
+                       "\xcb\x2d\xd6\xb0",
+               .expectedlen = 64,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\x71\xbd\xce\x35\x42\x7d\x20\xbf\x58\xcf\x17\x74"
+                       "\xce\x72\xd8\x33\x34\x50\x2d\x8f\x5b\x14\xc4\xdd",
+               .entropylen = 24,
+               .expected = (unsigned char *)
+                       "\x97\x33\xe8\x20\x12\xe2\x7b\xa1\x46\x8f\xf2\x34"
+                       "\xb3\xc9\xb6\x6b\x20\xb2\x4f\xee\x27\xd8\x0b\x21"
+                       "\x8c\xff\x63\x73\x69\x29\xfb\xf3\x85\xcd\x88\x8e"
+                       "\x43\x2c\x71\x8b\xa2\x55\xd2\x0f\x1d\x7f\xe3\xe1"
+                       "\x2a\xa3\xe9\x2c\x25\x89\xc7\x14\x52\x99\x56\xcc"
+                       "\xc3\xdf\xb3\x81",
+               .expectedlen = 64,
+               .addtla = (unsigned char *)
+                       "\x66\xef\x42\xd6\x9a\x8c\x3d\x6d\x4a\x9e\x95\xa6"
+                       "\x91\x4d\x81\x56",
+               .addtlb = (unsigned char *)
+                       "\xe3\x18\x83\xd9\x4b\x5e\xc4\xcc\xaa\x61\x2f\xbb"
+                       "\x4a\x55\xd1\xc6",
+               .addtllen = 16,
+               .pers = NULL,
+               .perslen = 0,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xca\x4b\x1e\xfa\x75\xbd\x69\x36\x38\x73\xb8\xf9"
+                       "\xdb\x4d\x35\x0e\x47\xbf\x6c\x37\x72\xfd\xf7\xa9",
+               .entropylen = 24,
+               .expected = (unsigned char *)
+                       "\x59\xc3\x19\x79\x1b\xb1\xf3\x0e\xe9\x34\xae\x6e"
+                       "\x8b\x1f\xad\x1f\x74\xca\x25\x45\x68\xb8\x7f\x75"
+                       "\x12\xf8\xf2\xab\x4c\x23\x01\x03\x05\xe1\x70\xee"
+                       "\x75\xd8\xcb\xeb\x23\x4c\x7a\x23\x6e\x12\x27\xdb"
+                       "\x6f\x7a\xac\x3c\x44\xb7\x87\x4b\x65\x56\x74\x45"
+                       "\x34\x30\x0c\x3d",
+               .expectedlen = 64,
+               .addtla = NULL,
+               .addtlb = NULL,
+               .addtllen = 0,
+               .pers = (unsigned char *)
+                       "\xeb\xaa\x60\x2c\x4d\xbe\x33\xff\x1b\xef\xbf\x0a"
+                       "\x0b\xc6\x97\x54",
+               .perslen = 16,
+       }, {
+               .entropy = (unsigned char *)
+                       "\xc0\x70\x1f\x92\x50\x75\x8f\xcd\xf2\xbe\x73\x98"
+                       "\x80\xdb\x66\xeb\x14\x68\xb4\xa5\x87\x9c\x2d\xa6",
+               .entropylen = 24,
+               .expected = (unsigned char *)
+                       "\x97\xc0\xc0\xe5\xa0\xcc\xf2\x4f\x33\x63\x48\x8a"
+                       "\xdb\x13\x0a\x35\x89\xbf\x80\x65\x62\xee\x13\x95"
+                       "\x7c\x33\xd3\x7d\xf4\x07\x77\x7a\x2b\x65\x0b\x5f"
+                       "\x45\x5c\x13\xf1\x90\x77\x7f\xc5\x04\x3f\xcc\x1a"
+                       "\x38\xf8\xcd\x1b\xbb\xd5\x57\xd1\x4a\x4c\x2e\x8a"
+                       "\x2b\x49\x1e\x5c",
+               .expectedlen = 64,
+               .addtla = (unsigned char *)
+                       "\xf9\x01\xf8\x16\x7a\x1d\xff\xde\x8e\x3c\x83\xe2"
+                       "\x44\x85\xe7\xfe",
+               .addtlb = (unsigned char *)
+                       "\x17\x1c\x09\x38\xc2\x38\x9f\x97\x87\x60\x55\xb4"
+                       "\x82\x16\x62\x7f",
+               .addtllen = 16,
+               .pers = (unsigned char *)
+                       "\x80\x08\xae\xe8\xe9\x69\x40\xc5\x08\x73\xc7\x9f"
+                       "\x8e\xcf\xe0\x02",
+               .perslen = 16,
+       },
+};
+
 /* Cast5 test vectors from RFC 2144 */
 #define CAST5_ENC_TEST_VECTORS         4
 #define CAST5_DEC_TEST_VECTORS         4
@@ -20907,8 +21791,8 @@ static struct cipher_testvec cast5_enc_tv_template[] = {
                          "\xF5\xBC\x25\xD6\x02\x56\x57\x1C",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -21068,8 +21952,8 @@ static struct cipher_testvec cast5_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -21206,8 +22090,8 @@ static struct cipher_testvec cast5_cbc_enc_tv_template[] = {
                          "\x1D\x18\x66\x44\x5B\x8F\x14\xEB",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -21344,8 +22228,8 @@ static struct cipher_testvec cast5_cbc_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -21495,8 +22379,8 @@ static struct cipher_testvec cast5_ctr_enc_tv_template[] = {
                          "\xC0\x0D\x96\xAA\x23\xF8\xFE\x13",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -21646,8 +22530,8 @@ static struct cipher_testvec cast5_ctr_dec_tv_template[] = {
                          "\xDC\x50\xE7\x7E\x15\x89\x20\xB7",
                .rlen   = 496,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 496 - 16, 16 },
+               .np     = 3,
+               .tap    = { 496 - 20, 4, 16 },
        },
 };
 
@@ -22805,8 +23689,8 @@ static struct cipher_testvec camellia_enc_tv_template[] = {
                          "\x33\x1A\xBB\xD3\xA2\x7E\x97\x66",
                .rlen   = 1008,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 1008 - 16, 16 },
+               .np     = 3,
+               .tap    = { 1008 - 20, 4, 16 },
        },
 };
 
@@ -23105,8 +23989,8 @@ static struct cipher_testvec camellia_dec_tv_template[] = {
                          "\x72\x09\xA0\x14\xAB\x42\xD9\x4D",
                .rlen   = 1008,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 1008 - 16, 16 },
+               .np     = 3,
+               .tap    = { 1008 - 20, 4, 16 },
        },
 };
 
@@ -23401,8 +24285,8 @@ static struct cipher_testvec camellia_cbc_enc_tv_template[] = {
                          "\x70\xC5\xB9\x0B\x3B\x7A\x6E\x6C",
                .rlen   = 1008,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 1008 - 16, 16 },
+               .np     = 3,
+               .tap    = { 1008 - 20, 4, 16 },
        },
 };
 
@@ -23697,8 +24581,8 @@ static struct cipher_testvec camellia_cbc_dec_tv_template[] = {
                          "\x72\x09\xA0\x14\xAB\x42\xD9\x4D",
                .rlen   = 1008,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 1008 - 16, 16 },
+               .np     = 3,
+               .tap    = { 1008 - 20, 4, 16 },
        },
 };
 
@@ -25283,8 +26167,8 @@ static struct cipher_testvec camellia_lrw_enc_tv_template[] = {
                          "\x5a\xa8\x92\x7f\xba\xe6\x0c\x95",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -25536,8 +26420,8 @@ static struct cipher_testvec camellia_lrw_dec_tv_template[] = {
                          "\x21\xc4\xc2\x75\x67\x89\x37\x0a",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -25878,8 +26762,8 @@ static struct cipher_testvec camellia_xts_enc_tv_template[] = {
                          "\xd5\xc6\x99\xcc\x4e\x6c\x94\x95",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
@@ -26221,8 +27105,8 @@ static struct cipher_testvec camellia_xts_dec_tv_template[] = {
                          "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
                .rlen   = 512,
                .also_non_np = 1,
-               .np     = 2,
-               .tap    = { 512 - 16, 16 },
+               .np     = 3,
+               .tap    = { 512 - 20, 4, 16 },
        },
 };
 
index c67f6f5..36b0e61 100644 (file)
 #include <linux/types.h>
 #include <linux/dmi.h>
 #include <linux/delay.h>
+#ifdef CONFIG_ACPI_PROCFS_POWER
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#endif
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/acpi.h>
@@ -52,6 +56,7 @@ MODULE_AUTHOR("Paul Diefenbaugh");
 MODULE_DESCRIPTION("ACPI AC Adapter Driver");
 MODULE_LICENSE("GPL");
 
+
 static int acpi_ac_add(struct acpi_device *device);
 static int acpi_ac_remove(struct acpi_device *device);
 static void acpi_ac_notify(struct acpi_device *device, u32 event);
@@ -67,6 +72,13 @@ static int acpi_ac_resume(struct device *dev);
 #endif
 static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+extern struct proc_dir_entry *acpi_lock_ac_dir(void);
+extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
+static int acpi_ac_open_fs(struct inode *inode, struct file *file);
+#endif
+
+
 static int ac_sleep_before_get_state_ms;
 
 static struct acpi_driver acpi_ac_driver = {
@@ -91,6 +103,16 @@ struct acpi_ac {
 
 #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+static const struct file_operations acpi_ac_fops = {
+       .owner = THIS_MODULE,
+       .open = acpi_ac_open_fs,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+#endif
+
 /* --------------------------------------------------------------------------
                                AC Adapter Management
    -------------------------------------------------------------------------- */
@@ -143,6 +165,83 @@ static enum power_supply_property ac_props[] = {
        POWER_SUPPLY_PROP_ONLINE,
 };
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+/* --------------------------------------------------------------------------
+                              FS Interface (/proc)
+   -------------------------------------------------------------------------- */
+
+static struct proc_dir_entry *acpi_ac_dir;
+
+static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
+{
+       struct acpi_ac *ac = seq->private;
+
+
+       if (!ac)
+               return 0;
+
+       if (acpi_ac_get_state(ac)) {
+               seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
+               return 0;
+       }
+
+       seq_puts(seq, "state:                   ");
+       switch (ac->state) {
+       case ACPI_AC_STATUS_OFFLINE:
+               seq_puts(seq, "off-line\n");
+               break;
+       case ACPI_AC_STATUS_ONLINE:
+               seq_puts(seq, "on-line\n");
+               break;
+       default:
+               seq_puts(seq, "unknown\n");
+               break;
+       }
+
+       return 0;
+}
+
+static int acpi_ac_open_fs(struct inode *inode, struct file *file)
+{
+       return single_open(file, acpi_ac_seq_show, PDE_DATA(inode));
+}
+
+static int acpi_ac_add_fs(struct acpi_ac *ac)
+{
+       struct proc_dir_entry *entry = NULL;
+
+       printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
+                       " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
+       if (!acpi_device_dir(ac->device)) {
+               acpi_device_dir(ac->device) =
+                       proc_mkdir(acpi_device_bid(ac->device), acpi_ac_dir);
+               if (!acpi_device_dir(ac->device))
+                       return -ENODEV;
+       }
+
+       /* 'state' [R] */
+       entry = proc_create_data(ACPI_AC_FILE_STATE,
+                                S_IRUGO, acpi_device_dir(ac->device),
+                                &acpi_ac_fops, ac);
+       if (!entry)
+               return -ENODEV;
+       return 0;
+}
+
+static int acpi_ac_remove_fs(struct acpi_ac *ac)
+{
+
+       if (acpi_device_dir(ac->device)) {
+               remove_proc_entry(ACPI_AC_FILE_STATE,
+                                 acpi_device_dir(ac->device));
+               remove_proc_entry(acpi_device_bid(ac->device), acpi_ac_dir);
+               acpi_device_dir(ac->device) = NULL;
+       }
+
+       return 0;
+}
+#endif
+
 /* --------------------------------------------------------------------------
                                    Driver Model
    -------------------------------------------------------------------------- */
@@ -243,6 +342,11 @@ static int acpi_ac_add(struct acpi_device *device)
                goto end;
 
        ac->charger.name = acpi_device_bid(device);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       result = acpi_ac_add_fs(ac);
+       if (result)
+               goto end;
+#endif
        ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
        ac->charger.properties = ac_props;
        ac->charger.num_properties = ARRAY_SIZE(ac_props);
@@ -258,8 +362,12 @@ static int acpi_ac_add(struct acpi_device *device)
        ac->battery_nb.notifier_call = acpi_ac_battery_notify;
        register_acpi_notifier(&ac->battery_nb);
 end:
-       if (result)
+       if (result) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+               acpi_ac_remove_fs(ac);
+#endif
                kfree(ac);
+       }
 
        dmi_check_system(ac_dmi_table);
        return result;
@@ -303,6 +411,10 @@ static int acpi_ac_remove(struct acpi_device *device)
                power_supply_unregister(&ac->charger);
        unregister_acpi_notifier(&ac->battery_nb);
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_ac_remove_fs(ac);
+#endif
+
        kfree(ac);
 
        return 0;
@@ -315,9 +427,20 @@ static int __init acpi_ac_init(void)
        if (acpi_disabled)
                return -ENODEV;
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_ac_dir = acpi_lock_ac_dir();
+       if (!acpi_ac_dir)
+               return -ENODEV;
+#endif
+
+
        result = acpi_bus_register_driver(&acpi_ac_driver);
-       if (result < 0)
+       if (result < 0) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+               acpi_unlock_ac_dir(acpi_ac_dir);
+#endif
                return -ENODEV;
+       }
 
        return 0;
 }
@@ -325,6 +448,9 @@ static int __init acpi_ac_init(void)
 static void __exit acpi_ac_exit(void)
 {
        acpi_bus_unregister_driver(&acpi_ac_driver);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_unlock_ac_dir(acpi_ac_dir);
+#endif
 }
 module_init(acpi_ac_init);
 module_exit(acpi_ac_exit);
index 63407d2..9cb65b0 100644 (file)
@@ -34,6 +34,9 @@ ACPI_MODULE_NAME("acpi_lpss");
 
 /* Offsets relative to LPSS_PRIVATE_OFFSET */
 #define LPSS_CLK_DIVIDER_DEF_MASK      (BIT(1) | BIT(16))
+#define LPSS_RESETS                    0x04
+#define LPSS_RESETS_RESET_FUNC         BIT(0)
+#define LPSS_RESETS_RESET_APB          BIT(1)
 #define LPSS_GENERAL                   0x08
 #define LPSS_GENERAL_LTR_MODE_SW       BIT(2)
 #define LPSS_GENERAL_UART_RTS_OVRD     BIT(3)
@@ -99,6 +102,17 @@ static void lpss_uart_setup(struct lpss_private_data *pdata)
        writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset);
 }
 
+static void lpss_i2c_setup(struct lpss_private_data *pdata)
+{
+       unsigned int offset;
+       u32 val;
+
+       offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
+       val = readl(pdata->mmio_base + offset);
+       val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
+       writel(val, pdata->mmio_base + offset);
+}
+
 static struct lpss_device_desc lpt_dev_desc = {
        .clk_required = true,
        .prv_offset = 0x800,
@@ -171,6 +185,7 @@ static struct lpss_device_desc byt_i2c_dev_desc = {
        .prv_offset = 0x800,
        .save_ctx = true,
        .shared_clock = &i2c_clock,
+       .setup = lpss_i2c_setup,
 };
 
 #else
index 6703c1f..4ddb0dc 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/module.h>
 
 static const struct acpi_device_id acpi_pnp_device_ids[] = {
+       /* soc_button_array */
+       {"PNP0C40"},
        /* pata_isapnp */
        {"PNP0600"},            /* Generic ESDI/IDE/ATA compatible hard disk controller */
        /* floppy */
index e48fc98..130f513 100644 (file)
 #include <linux/jiffies.h>
 #include <linux/async.h>
 #include <linux/dmi.h>
+#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/suspend.h>
+#include <linux/delay.h>
 #include <asm/unaligned.h>
 
 #ifdef CONFIG_ACPI_PROCFS_POWER
@@ -70,6 +72,7 @@ MODULE_DESCRIPTION("ACPI Battery Driver");
 MODULE_LICENSE("GPL");
 
 static int battery_bix_broken_package;
+static int battery_notification_delay_ms;
 static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -532,6 +535,20 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
                        " invalid.\n");
        }
 
+       /*
+        * When fully charged, some batteries wrongly report
+        * capacity_now = design_capacity instead of = full_charge_capacity
+        */
+       if (battery->capacity_now > battery->full_charge_capacity
+           && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) {
+               battery->capacity_now = battery->full_charge_capacity;
+               if (battery->capacity_now != battery->design_capacity)
+                       printk_once(KERN_WARNING FW_BUG
+                               "battery: reported current charge level (%d) "
+                               "is higher than reported maximum charge level (%d).\n",
+                               battery->capacity_now, battery->full_charge_capacity);
+       }
+
        if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
            && battery->capacity_now >= 0 && battery->capacity_now <= 100)
                battery->capacity_now = (battery->capacity_now *
@@ -930,7 +947,10 @@ static ssize_t acpi_battery_write_alarm(struct file *file,
                goto end;
        }
        alarm_string[count] = '\0';
-       battery->alarm = simple_strtol(alarm_string, NULL, 0);
+       if (kstrtoint(alarm_string, 0, &battery->alarm)) {
+               result = -EINVAL;
+               goto end;
+       }
        result = acpi_battery_set_alarm(battery);
       end:
        if (!result)
@@ -1062,6 +1082,14 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
        if (!battery)
                return;
        old = battery->bat.dev;
+       /*
+       * On Acer Aspire V5-573G notifications are sometimes triggered too
+       * early. For example, when AC is unplugged and notification is
+       * triggered, battery state is still reported as "Full", and changes to
+       * "Discharging" only after short delay, without any notification.
+       */
+       if (battery_notification_delay_ms > 0)
+               msleep(battery_notification_delay_ms);
        if (event == ACPI_BATTERY_NOTIFY_INFO)
                acpi_battery_refresh(battery);
        acpi_battery_update(battery, false);
@@ -1106,17 +1134,60 @@ static int battery_notify(struct notifier_block *nb,
        return 0;
 }
 
+static int battery_bix_broken_package_quirk(const struct dmi_system_id *d)
+{
+       battery_bix_broken_package = 1;
+       return 0;
+}
+
+static int battery_notification_delay_quirk(const struct dmi_system_id *d)
+{
+       battery_notification_delay_ms = 1000;
+       return 0;
+}
+
 static struct dmi_system_id bat_dmi_table[] = {
        {
+               .callback = battery_bix_broken_package_quirk,
                .ident = "NEC LZ750/LS",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
                },
        },
+       {
+               .callback = battery_notification_delay_quirk,
+               .ident = "Acer Aspire V5-573G",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
+               },
+       },
        {},
 };
 
+/*
+ * Some machines'(E,G Lenovo Z480) ECs are not stable
+ * during boot up and this causes battery driver fails to be
+ * probed due to failure of getting battery information
+ * from EC sometimes. After several retries, the operation
+ * may work. So add retry code here and 20ms sleep between
+ * every retries.
+ */
+static int acpi_battery_update_retry(struct acpi_battery *battery)
+{
+       int retry, ret;
+
+       for (retry = 5; retry; retry--) {
+               ret = acpi_battery_update(battery, false);
+               if (!ret)
+                       break;
+
+               msleep(20);
+       }
+       return ret;
+}
+
 static int acpi_battery_add(struct acpi_device *device)
 {
        int result = 0;
@@ -1135,9 +1206,11 @@ static int acpi_battery_add(struct acpi_device *device)
        mutex_init(&battery->sysfs_lock);
        if (acpi_has_method(battery->device->handle, "_BIX"))
                set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
-       result = acpi_battery_update(battery, false);
+
+       result = acpi_battery_update_retry(battery);
        if (result)
                goto fail;
+
 #ifdef CONFIG_ACPI_PROCFS_POWER
        result = acpi_battery_add_fs(device);
 #endif
@@ -1227,8 +1300,7 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
        if (acpi_disabled)
                return;
 
-       if (dmi_check_system(bat_dmi_table))
-               battery_bix_broken_package = 1;
+       dmi_check_system(bat_dmi_table);
        
 #ifdef CONFIG_ACPI_PROCFS_POWER
        acpi_battery_dir = acpi_lock_battery_dir();
index ad11ba4..a66ab65 100644 (file)
@@ -1,11 +1,14 @@
 /*
- *  ec.c - ACPI Embedded Controller Driver (v2.1)
+ *  ec.c - ACPI Embedded Controller Driver (v2.2)
  *
- *  Copyright (C) 2006-2008 Alexey Starikovskiy <astarikovskiy@suse.de>
- *  Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
- *  Copyright (C) 2004 Luming Yu <luming.yu@intel.com>
- *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
- *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *  Copyright (C) 2001-2014 Intel Corporation
+ *    Author: 2014       Lv Zheng <lv.zheng@intel.com>
+ *            2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
+ *            2006       Denis Sadykov <denis.m.sadykov@intel.com>
+ *            2004       Luming Yu <luming.yu@intel.com>
+ *            2001, 2002 Andy Grover <andrew.grover@intel.com>
+ *            2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *  Copyright (C) 2008      Alexey Starikovskiy <astarikovskiy@suse.de>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
@@ -52,6 +55,7 @@
 /* EC status register */
 #define ACPI_EC_FLAG_OBF       0x01    /* Output buffer full */
 #define ACPI_EC_FLAG_IBF       0x02    /* Input buffer full */
+#define ACPI_EC_FLAG_CMD       0x08    /* Input buffer contains a command */
 #define ACPI_EC_FLAG_BURST     0x10    /* burst mode */
 #define ACPI_EC_FLAG_SCI       0x20    /* EC-SCI occurred */
 
@@ -78,6 +82,9 @@ enum {
        EC_FLAGS_BLOCKED,               /* Transactions are blocked */
 };
 
+#define ACPI_EC_COMMAND_POLL           0x01 /* Available for command byte */
+#define ACPI_EC_COMMAND_COMPLETE       0x02 /* Completed last byte */
+
 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
 static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
 module_param(ec_delay, uint, 0644);
@@ -109,7 +116,7 @@ struct transaction {
        u8 ri;
        u8 wlen;
        u8 rlen;
-       bool done;
+       u8 flags;
 };
 
 struct acpi_ec *boot_ec, *first_ec;
@@ -127,83 +134,104 @@ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
 static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
 {
        u8 x = inb(ec->command_addr);
-       pr_debug("---> status = 0x%2.2x\n", x);
+       pr_debug("EC_SC(R) = 0x%2.2x "
+                "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n",
+                x,
+                !!(x & ACPI_EC_FLAG_SCI),
+                !!(x & ACPI_EC_FLAG_BURST),
+                !!(x & ACPI_EC_FLAG_CMD),
+                !!(x & ACPI_EC_FLAG_IBF),
+                !!(x & ACPI_EC_FLAG_OBF));
        return x;
 }
 
 static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
 {
        u8 x = inb(ec->data_addr);
-       pr_debug("---> data = 0x%2.2x\n", x);
+       pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
        return x;
 }
 
 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
 {
-       pr_debug("<--- command = 0x%2.2x\n", command);
+       pr_debug("EC_SC(W) = 0x%2.2x\n", command);
        outb(command, ec->command_addr);
 }
 
 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
 {
-       pr_debug("<--- data = 0x%2.2x\n", data);
+       pr_debug("EC_DATA(W) = 0x%2.2x\n", data);
        outb(data, ec->data_addr);
 }
 
-static int ec_transaction_done(struct acpi_ec *ec)
+static int ec_transaction_completed(struct acpi_ec *ec)
 {
        unsigned long flags;
        int ret = 0;
        spin_lock_irqsave(&ec->lock, flags);
-       if (!ec->curr || ec->curr->done)
+       if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
                ret = 1;
        spin_unlock_irqrestore(&ec->lock, flags);
        return ret;
 }
 
-static void start_transaction(struct acpi_ec *ec)
+static bool advance_transaction(struct acpi_ec *ec)
 {
-       ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
-       ec->curr->done = false;
-       acpi_ec_write_cmd(ec, ec->curr->command);
-}
-
-static void advance_transaction(struct acpi_ec *ec, u8 status)
-{
-       unsigned long flags;
        struct transaction *t;
+       u8 status;
+       bool wakeup = false;
 
-       spin_lock_irqsave(&ec->lock, flags);
+       pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK");
+       status = acpi_ec_read_status(ec);
        t = ec->curr;
        if (!t)
-               goto unlock;
-       if (t->wlen > t->wi) {
-               if ((status & ACPI_EC_FLAG_IBF) == 0)
-                       acpi_ec_write_data(ec,
-                               t->wdata[t->wi++]);
-               else
-                       goto err;
-       } else if (t->rlen > t->ri) {
-               if ((status & ACPI_EC_FLAG_OBF) == 1) {
-                       t->rdata[t->ri++] = acpi_ec_read_data(ec);
-                       if (t->rlen == t->ri)
-                               t->done = true;
+               goto err;
+       if (t->flags & ACPI_EC_COMMAND_POLL) {
+               if (t->wlen > t->wi) {
+                       if ((status & ACPI_EC_FLAG_IBF) == 0)
+                               acpi_ec_write_data(ec, t->wdata[t->wi++]);
+                       else
+                               goto err;
+               } else if (t->rlen > t->ri) {
+                       if ((status & ACPI_EC_FLAG_OBF) == 1) {
+                               t->rdata[t->ri++] = acpi_ec_read_data(ec);
+                               if (t->rlen == t->ri) {
+                                       t->flags |= ACPI_EC_COMMAND_COMPLETE;
+                                       wakeup = true;
+                               }
+                       } else
+                               goto err;
+               } else if (t->wlen == t->wi &&
+                          (status & ACPI_EC_FLAG_IBF) == 0) {
+                       t->flags |= ACPI_EC_COMMAND_COMPLETE;
+                       wakeup = true;
+               }
+               return wakeup;
+       } else {
+               if ((status & ACPI_EC_FLAG_IBF) == 0) {
+                       acpi_ec_write_cmd(ec, t->command);
+                       t->flags |= ACPI_EC_COMMAND_POLL;
                } else
                        goto err;
-       } else if (t->wlen == t->wi &&
-                  (status & ACPI_EC_FLAG_IBF) == 0)
-               t->done = true;
-       goto unlock;
+               return wakeup;
+       }
 err:
        /*
         * If SCI bit is set, then don't think it's a false IRQ
         * otherwise will take a not handled IRQ as a false one.
         */
-       if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI))
-               ++t->irq_count;
+       if (!(status & ACPI_EC_FLAG_SCI)) {
+               if (in_interrupt() && t)
+                       ++t->irq_count;
+       }
+       return wakeup;
+}
 
-unlock:
-       spin_unlock_irqrestore(&ec->lock, flags);
+static void start_transaction(struct acpi_ec *ec)
+{
+       ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
+       ec->curr->flags = 0;
+       (void)advance_transaction(ec);
 }
 
 static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
@@ -228,15 +256,17 @@ static int ec_poll(struct acpi_ec *ec)
                        /* don't sleep with disabled interrupts */
                        if (EC_FLAGS_MSI || irqs_disabled()) {
                                udelay(ACPI_EC_MSI_UDELAY);
-                               if (ec_transaction_done(ec))
+                               if (ec_transaction_completed(ec))
                                        return 0;
                        } else {
                                if (wait_event_timeout(ec->wait,
-                                               ec_transaction_done(ec),
+                                               ec_transaction_completed(ec),
                                                msecs_to_jiffies(1)))
                                        return 0;
                        }
-                       advance_transaction(ec, acpi_ec_read_status(ec));
+                       spin_lock_irqsave(&ec->lock, flags);
+                       (void)advance_transaction(ec);
+                       spin_unlock_irqrestore(&ec->lock, flags);
                } while (time_before(jiffies, delay));
                pr_debug("controller reset, restart transaction\n");
                spin_lock_irqsave(&ec->lock, flags);
@@ -268,23 +298,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
        return ret;
 }
 
-static int ec_check_ibf0(struct acpi_ec *ec)
-{
-       u8 status = acpi_ec_read_status(ec);
-       return (status & ACPI_EC_FLAG_IBF) == 0;
-}
-
-static int ec_wait_ibf0(struct acpi_ec *ec)
-{
-       unsigned long delay = jiffies + msecs_to_jiffies(ec_delay);
-       /* interrupt wait manually if GPE mode is not active */
-       while (time_before(jiffies, delay))
-               if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
-                                       msecs_to_jiffies(1)))
-                       return 0;
-       return -ETIME;
-}
-
 static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
 {
        int status;
@@ -305,12 +318,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
                        goto unlock;
                }
        }
-       if (ec_wait_ibf0(ec)) {
-               pr_err("input buffer is not empty, "
-                               "aborting transaction\n");
-               status = -ETIME;
-               goto end;
-       }
        pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n",
                        t->command, t->wdata ? t->wdata[0] : 0);
        /* disable GPE during transaction if storm is detected */
@@ -334,7 +341,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
                set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
        }
        pr_debug("transaction end\n");
-end:
        if (ec->global_lock)
                acpi_release_global_lock(glk);
 unlock:
@@ -634,17 +640,14 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
        u32 gpe_number, void *data)
 {
+       unsigned long flags;
        struct acpi_ec *ec = data;
-       u8 status = acpi_ec_read_status(ec);
 
-       pr_debug("~~~> interrupt, status:0x%02x\n", status);
-
-       advance_transaction(ec, status);
-       if (ec_transaction_done(ec) &&
-           (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
+       spin_lock_irqsave(&ec->lock, flags);
+       if (advance_transaction(ec))
                wake_up(&ec->wait);
-               ec_check_sci(ec, acpi_ec_read_status(ec));
-       }
+       spin_unlock_irqrestore(&ec->lock, flags);
+       ec_check_sci(ec, acpi_ec_read_status(ec));
        return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
 }
 
@@ -1066,8 +1069,10 @@ int __init acpi_ec_ecdt_probe(void)
        /* fall through */
        }
 
-       if (EC_FLAGS_SKIP_DSDT_SCAN)
+       if (EC_FLAGS_SKIP_DSDT_SCAN) {
+               kfree(saved_ec);
                return -ENODEV;
+       }
 
        /* This workaround is needed only on some broken machines,
         * which require early EC, but fail to provide ECDT */
@@ -1105,6 +1110,7 @@ install:
        }
 error:
        kfree(boot_ec);
+       kfree(saved_ec);
        boot_ec = NULL;
        return -ENODEV;
 }
index 3f2bdc8..bad25b0 100644 (file)
@@ -235,7 +235,8 @@ void acpi_os_vprintf(const char *fmt, va_list args)
 static unsigned long acpi_rsdp;
 static int __init setup_acpi_rsdp(char *arg)
 {
-       acpi_rsdp = simple_strtoul(arg, NULL, 16);
+       if (kstrtoul(arg, 16, &acpi_rsdp))
+               return -EINVAL;
        return 0;
 }
 early_param("acpi_rsdp", setup_acpi_rsdp);
index 0bdacc5..2ba8f02 100644 (file)
@@ -77,7 +77,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
        switch (ares->type) {
        case ACPI_RESOURCE_TYPE_MEMORY24:
                memory24 = &ares->data.memory24;
-               if (!memory24->address_length)
+               if (!memory24->minimum && !memory24->address_length)
                        return false;
                acpi_dev_get_memresource(res, memory24->minimum,
                                         memory24->address_length,
@@ -85,7 +85,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
                break;
        case ACPI_RESOURCE_TYPE_MEMORY32:
                memory32 = &ares->data.memory32;
-               if (!memory32->address_length)
+               if (!memory32->minimum && !memory32->address_length)
                        return false;
                acpi_dev_get_memresource(res, memory32->minimum,
                                         memory32->address_length,
@@ -93,7 +93,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
                break;
        case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
                fixed_memory32 = &ares->data.fixed_memory32;
-               if (!fixed_memory32->address_length)
+               if (!fixed_memory32->address && !fixed_memory32->address_length)
                        return false;
                acpi_dev_get_memresource(res, fixed_memory32->address,
                                         fixed_memory32->address_length,
@@ -150,7 +150,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
        switch (ares->type) {
        case ACPI_RESOURCE_TYPE_IO:
                io = &ares->data.io;
-               if (!io->address_length)
+               if (!io->minimum && !io->address_length)
                        return false;
                acpi_dev_get_ioresource(res, io->minimum,
                                        io->address_length,
@@ -158,7 +158,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
                break;
        case ACPI_RESOURCE_TYPE_FIXED_IO:
                fixed_io = &ares->data.fixed_io;
-               if (!fixed_io->address_length)
+               if (!fixed_io->address && !fixed_io->address_length)
                        return false;
                acpi_dev_get_ioresource(res, fixed_io->address,
                                        fixed_io->address_length,
index 05550ba..6d5a6cd 100644 (file)
@@ -360,7 +360,8 @@ static int __init acpi_parse_apic_instance(char *str)
        if (!str)
                return -EINVAL;
 
-       acpi_apic_instance = simple_strtoul(str, NULL, 0);
+       if (kstrtoint(str, 0, &acpi_apic_instance))
+               return -EINVAL;
 
        pr_notice("Shall use APIC/MADT table %d\n", acpi_apic_instance);
 
index fb9ffe9..350d52a 100644 (file)
@@ -68,7 +68,7 @@ MODULE_AUTHOR("Bruno Ducrot");
 MODULE_DESCRIPTION("ACPI Video Driver");
 MODULE_LICENSE("GPL");
 
-static bool brightness_switch_enabled;
+static bool brightness_switch_enabled = 1;
 module_param(brightness_switch_enabled, bool, 0644);
 
 /*
@@ -241,13 +241,14 @@ static bool acpi_video_use_native_backlight(void)
                return use_native_backlight_dmi;
 }
 
-static bool acpi_video_verify_backlight_support(void)
+bool acpi_video_verify_backlight_support(void)
 {
        if (acpi_osi_is_win8() && acpi_video_use_native_backlight() &&
            backlight_device_registered(BACKLIGHT_RAW))
                return false;
        return acpi_video_backlight_support();
 }
+EXPORT_SYMBOL_GPL(acpi_video_verify_backlight_support);
 
 /* backlight device sysfs support */
 static int acpi_video_get_brightness(struct backlight_device *bd)
@@ -562,6 +563,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-471G"),
                },
        },
+       {
+        .callback = video_set_use_native_backlight,
+        .ident = "Acer TravelMate B113",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate B113"),
+               },
+       },
        {
        .callback = video_set_use_native_backlight,
        .ident = "HP ProBook 4340s",
@@ -572,6 +581,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
        },
        {
        .callback = video_set_use_native_backlight,
+       .ident = "HP ProBook 4540s",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"),
+               },
+       },
+       {
+       .callback = video_set_use_native_backlight,
        .ident = "HP ProBook 2013 models",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
index 33e3db5..c42feb2 100644 (file)
@@ -166,6 +166,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
                },
        },
+       {
+       .callback = video_detect_force_vendor,
+       .ident = "Dell Inspiron 5737",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
+               },
+       },
        { },
 };
 
index 7671dba..e65d400 100644 (file)
@@ -141,6 +141,15 @@ config AHCI_SUNXI
 
          If unsure, say N.
 
+config AHCI_TEGRA
+       tristate "NVIDIA Tegra124 AHCI SATA support"
+       depends on ARCH_TEGRA
+       help
+         This option enables support for the NVIDIA Tegra124 SoC's
+         onboard AHCI SATA.
+
+         If unsure, say N.
+
 config AHCI_XGENE
        tristate "APM X-Gene 6.0Gbps AHCI SATA host controller support"
        depends on PHY_XGENE
index 5a02aee..ae41107 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_AHCI_IMX)                += ahci_imx.o libahci.o libahci_platform.o
 obj-$(CONFIG_AHCI_MVEBU)       += ahci_mvebu.o libahci.o libahci_platform.o
 obj-$(CONFIG_AHCI_SUNXI)       += ahci_sunxi.o libahci.o libahci_platform.o
 obj-$(CONFIG_AHCI_ST)          += ahci_st.o libahci.o libahci_platform.o
+obj-$(CONFIG_AHCI_TEGRA)       += ahci_tegra.o libahci.o libahci_platform.o
 obj-$(CONFIG_AHCI_XGENE)       += ahci_xgene.o libahci.o libahci_platform.o
 
 # SFF w/ custom DMA
index 0cd7c7a..25d0ac3 100644 (file)
@@ -441,7 +441,7 @@ static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id
        hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
 
        /* save initial config */
-       ahci_save_initial_config(&pdev->dev, hpriv, 0, 0);
+       ahci_save_initial_config(&pdev->dev, hpriv);
 
        /* prepare host */
        if (hpriv->cap & HOST_CAP_NCQ)
index dae5607..a29f801 100644 (file)
@@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 
        /* Promise */
        { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },   /* PDC42819 */
+       { PCI_VDEVICE(PROMISE, 0x3781), board_ahci },   /* FastTrak TX8660 ahci-mode */
 
        /* Asmedia */
        { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci },   /* ASM1060 */
@@ -525,8 +526,7 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
                          "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
        }
 
-       ahci_save_initial_config(&pdev->dev, hpriv, force_port_map,
-                                mask_port_map);
+       ahci_save_initial_config(&pdev->dev, hpriv);
 }
 
 static int ahci_pci_reset_controller(struct ata_host *host)
index 05882e4..59ae0ee 100644 (file)
@@ -53,7 +53,7 @@
 
 enum {
        AHCI_MAX_PORTS          = 32,
-       AHCI_MAX_CLKS           = 3,
+       AHCI_MAX_CLKS           = 4,
        AHCI_MAX_SG             = 168, /* hardware max is 64K */
        AHCI_DMA_BOUNDARY       = 0xffffffff,
        AHCI_MAX_CMDS           = 32,
@@ -316,8 +316,12 @@ struct ahci_port_priv {
 };
 
 struct ahci_host_priv {
-       void __iomem *          mmio;           /* bus-independent mem map */
+       /* Input fields */
        unsigned int            flags;          /* AHCI_HFLAG_* */
+       u32                     force_port_map; /* force port map */
+       u32                     mask_port_map;  /* mask out particular bits */
+
+       void __iomem *          mmio;           /* bus-independent mem map */
        u32                     cap;            /* cap to use */
        u32                     cap2;           /* cap2 to use */
        u32                     port_map;       /* port map to use */
@@ -330,7 +334,12 @@ struct ahci_host_priv {
        bool                    got_runtime_pm; /* Did we do pm_runtime_get? */
        struct clk              *clks[AHCI_MAX_CLKS]; /* Optional */
        struct regulator        *target_pwr;    /* Optional */
-       struct phy              *phy;           /* If platform uses phy */
+       /*
+        * If platform uses PHYs. There is a 1:1 relation between the port number and
+        * the PHY position in this array.
+        */
+       struct phy              **phys;
+       unsigned                nports;         /* Number of ports */
        void                    *plat_data;     /* Other platform data */
        /*
         * Optional ahci_start_engine override, if not set this gets set to the
@@ -361,9 +370,7 @@ unsigned int ahci_dev_classify(struct ata_port *ap);
 void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
                        u32 opts);
 void ahci_save_initial_config(struct device *dev,
-                             struct ahci_host_priv *hpriv,
-                             unsigned int force_port_map,
-                             unsigned int mask_port_map);
+                             struct ahci_host_priv *hpriv);
 void ahci_init_controller(struct ata_host *host);
 int ahci_reset_controller(struct ata_host *host);
 
@@ -371,7 +378,9 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
                      int pmp, unsigned long deadline,
                      int (*check_ready)(struct ata_link *link));
 
+unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
 int ahci_stop_engine(struct ata_port *ap);
+void ahci_start_fis_rx(struct ata_port *ap);
 void ahci_start_engine(struct ata_port *ap);
 int ahci_check_ready(struct ata_link *link);
 int ahci_kick_engine(struct ata_port *ap);
index 2b77d53..ad1e71e 100644 (file)
@@ -85,8 +85,7 @@ static int ahci_da850_probe(struct platform_device *pdev)
 
        da850_sata_init(dev, pwrdn_reg, hpriv->mmio);
 
-       rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info,
-                                    0, 0, 0);
+       rc = ahci_platform_init_host(pdev, hpriv, &ahci_da850_port_info);
        if (rc)
                goto disable_resources;
 
index 3a90152..f3970b4 100644 (file)
@@ -58,10 +58,13 @@ enum ahci_imx_type {
 struct imx_ahci_priv {
        struct platform_device *ahci_pdev;
        enum ahci_imx_type type;
+       struct clk *sata_clk;
+       struct clk *sata_ref_clk;
        struct clk *ahb_clk;
        struct regmap *gpr;
        bool no_device;
        bool first_time;
+       u32 phy_params;
 };
 
 static int ahci_imx_hotplug;
@@ -224,7 +227,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
                        return ret;
        }
 
-       ret = ahci_platform_enable_clks(hpriv);
+       ret = clk_prepare_enable(imxpriv->sata_ref_clk);
        if (ret < 0)
                goto disable_regulator;
 
@@ -246,14 +249,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
                                   IMX6Q_GPR13_SATA_TX_LVL_MASK |
                                   IMX6Q_GPR13_SATA_MPLL_CLK_EN |
                                   IMX6Q_GPR13_SATA_TX_EDGE_RATE,
-                                  IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB |
-                                  IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
-                                  IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
-                                  IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
-                                  IMX6Q_GPR13_SATA_MPLL_SS_EN |
-                                  IMX6Q_GPR13_SATA_TX_ATTEN_9_16 |
-                                  IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB |
-                                  IMX6Q_GPR13_SATA_TX_LVL_1_025_V);
+                                  imxpriv->phy_params);
                regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13,
                                   IMX6Q_GPR13_SATA_MPLL_CLK_EN,
                                   IMX6Q_GPR13_SATA_MPLL_CLK_EN);
@@ -263,7 +259,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
                ret = imx_sata_phy_reset(hpriv);
                if (ret) {
                        dev_err(dev, "failed to reset phy: %d\n", ret);
-                       goto disable_regulator;
+                       goto disable_clk;
                }
        }
 
@@ -271,6 +267,8 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
 
        return 0;
 
+disable_clk:
+       clk_disable_unprepare(imxpriv->sata_ref_clk);
 disable_regulator:
        if (hpriv->target_pwr)
                regulator_disable(hpriv->target_pwr);
@@ -291,7 +289,7 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv)
                                   !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
        }
 
-       ahci_platform_disable_clks(hpriv);
+       clk_disable_unprepare(imxpriv->sata_ref_clk);
 
        if (hpriv->target_pwr)
                regulator_disable(hpriv->target_pwr);
@@ -324,6 +322,9 @@ static void ahci_imx_error_handler(struct ata_port *ap)
        writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
        imx_sata_disable(hpriv);
        imxpriv->no_device = true;
+
+       dev_info(ap->dev, "no device found, disabling link.\n");
+       dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX ".hotplug=1 to enable hotplug\n");
 }
 
 static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
@@ -364,6 +365,165 @@ static const struct of_device_id imx_ahci_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, imx_ahci_of_match);
 
+struct reg_value {
+       u32 of_value;
+       u32 reg_value;
+};
+
+struct reg_property {
+       const char *name;
+       const struct reg_value *values;
+       size_t num_values;
+       u32 def_value;
+       u32 set_value;
+};
+
+static const struct reg_value gpr13_tx_level[] = {
+       {  937, IMX6Q_GPR13_SATA_TX_LVL_0_937_V },
+       {  947, IMX6Q_GPR13_SATA_TX_LVL_0_947_V },
+       {  957, IMX6Q_GPR13_SATA_TX_LVL_0_957_V },
+       {  966, IMX6Q_GPR13_SATA_TX_LVL_0_966_V },
+       {  976, IMX6Q_GPR13_SATA_TX_LVL_0_976_V },
+       {  986, IMX6Q_GPR13_SATA_TX_LVL_0_986_V },
+       {  996, IMX6Q_GPR13_SATA_TX_LVL_0_996_V },
+       { 1005, IMX6Q_GPR13_SATA_TX_LVL_1_005_V },
+       { 1015, IMX6Q_GPR13_SATA_TX_LVL_1_015_V },
+       { 1025, IMX6Q_GPR13_SATA_TX_LVL_1_025_V },
+       { 1035, IMX6Q_GPR13_SATA_TX_LVL_1_035_V },
+       { 1045, IMX6Q_GPR13_SATA_TX_LVL_1_045_V },
+       { 1054, IMX6Q_GPR13_SATA_TX_LVL_1_054_V },
+       { 1064, IMX6Q_GPR13_SATA_TX_LVL_1_064_V },
+       { 1074, IMX6Q_GPR13_SATA_TX_LVL_1_074_V },
+       { 1084, IMX6Q_GPR13_SATA_TX_LVL_1_084_V },
+       { 1094, IMX6Q_GPR13_SATA_TX_LVL_1_094_V },
+       { 1104, IMX6Q_GPR13_SATA_TX_LVL_1_104_V },
+       { 1113, IMX6Q_GPR13_SATA_TX_LVL_1_113_V },
+       { 1123, IMX6Q_GPR13_SATA_TX_LVL_1_123_V },
+       { 1133, IMX6Q_GPR13_SATA_TX_LVL_1_133_V },
+       { 1143, IMX6Q_GPR13_SATA_TX_LVL_1_143_V },
+       { 1152, IMX6Q_GPR13_SATA_TX_LVL_1_152_V },
+       { 1162, IMX6Q_GPR13_SATA_TX_LVL_1_162_V },
+       { 1172, IMX6Q_GPR13_SATA_TX_LVL_1_172_V },
+       { 1182, IMX6Q_GPR13_SATA_TX_LVL_1_182_V },
+       { 1191, IMX6Q_GPR13_SATA_TX_LVL_1_191_V },
+       { 1201, IMX6Q_GPR13_SATA_TX_LVL_1_201_V },
+       { 1211, IMX6Q_GPR13_SATA_TX_LVL_1_211_V },
+       { 1221, IMX6Q_GPR13_SATA_TX_LVL_1_221_V },
+       { 1230, IMX6Q_GPR13_SATA_TX_LVL_1_230_V },
+       { 1240, IMX6Q_GPR13_SATA_TX_LVL_1_240_V }
+};
+
+static const struct reg_value gpr13_tx_boost[] = {
+       {    0, IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB },
+       {  370, IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB },
+       {  740, IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB },
+       { 1110, IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB },
+       { 1480, IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB },
+       { 1850, IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB },
+       { 2220, IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB },
+       { 2590, IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB },
+       { 2960, IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB },
+       { 3330, IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB },
+       { 3700, IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB },
+       { 4070, IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB },
+       { 4440, IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB },
+       { 4810, IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB },
+       { 5280, IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB },
+       { 5750, IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB }
+};
+
+static const struct reg_value gpr13_tx_atten[] = {
+       {  8, IMX6Q_GPR13_SATA_TX_ATTEN_8_16 },
+       {  9, IMX6Q_GPR13_SATA_TX_ATTEN_9_16 },
+       { 10, IMX6Q_GPR13_SATA_TX_ATTEN_10_16 },
+       { 12, IMX6Q_GPR13_SATA_TX_ATTEN_12_16 },
+       { 14, IMX6Q_GPR13_SATA_TX_ATTEN_14_16 },
+       { 16, IMX6Q_GPR13_SATA_TX_ATTEN_16_16 },
+};
+
+static const struct reg_value gpr13_rx_eq[] = {
+       {  500, IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB },
+       { 1000, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB },
+       { 1500, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB },
+       { 2000, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB },
+       { 2500, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB },
+       { 3000, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB },
+       { 3500, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB },
+       { 4000, IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB },
+};
+
+static const struct reg_property gpr13_props[] = {
+       {
+               .name = "fsl,transmit-level-mV",
+               .values = gpr13_tx_level,
+               .num_values = ARRAY_SIZE(gpr13_tx_level),
+               .def_value = IMX6Q_GPR13_SATA_TX_LVL_1_025_V,
+       }, {
+               .name = "fsl,transmit-boost-mdB",
+               .values = gpr13_tx_boost,
+               .num_values = ARRAY_SIZE(gpr13_tx_boost),
+               .def_value = IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB,
+       }, {
+               .name = "fsl,transmit-atten-16ths",
+               .values = gpr13_tx_atten,
+               .num_values = ARRAY_SIZE(gpr13_tx_atten),
+               .def_value = IMX6Q_GPR13_SATA_TX_ATTEN_9_16,
+       }, {
+               .name = "fsl,receive-eq-mdB",
+               .values = gpr13_rx_eq,
+               .num_values = ARRAY_SIZE(gpr13_rx_eq),
+               .def_value = IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB,
+       }, {
+               .name = "fsl,no-spread-spectrum",
+               .def_value = IMX6Q_GPR13_SATA_MPLL_SS_EN,
+               .set_value = 0,
+       },
+};
+
+static u32 imx_ahci_parse_props(struct device *dev,
+                               const struct reg_property *prop, size_t num)
+{
+       struct device_node *np = dev->of_node;
+       u32 reg_value = 0;
+       int i, j;
+
+       for (i = 0; i < num; i++, prop++) {
+               u32 of_val;
+
+               if (prop->num_values == 0) {
+                       if (of_property_read_bool(np, prop->name))
+                               reg_value |= prop->set_value;
+                       else
+                               reg_value |= prop->def_value;
+                       continue;
+               }
+
+               if (of_property_read_u32(np, prop->name, &of_val)) {
+                       dev_info(dev, "%s not specified, using %08x\n",
+                               prop->name, prop->def_value);
+                       reg_value |= prop->def_value;
+                       continue;
+               }
+
+               for (j = 0; j < prop->num_values; j++) {
+                       if (prop->values[j].of_value == of_val) {
+                               dev_info(dev, "%s value %u, using %08x\n",
+                                       prop->name, of_val, prop->values[j].reg_value);
+                               reg_value |= prop->values[j].reg_value;
+                               break;
+                       }
+               }
+
+               if (j == prop->num_values) {
+                       dev_err(dev, "DT property %s is not a valid value\n",
+                               prop->name);
+                       reg_value |= prop->def_value;
+               }
+       }
+
+       return reg_value;
+}
+
 static int imx_ahci_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -385,6 +545,19 @@ static int imx_ahci_probe(struct platform_device *pdev)
        imxpriv->no_device = false;
        imxpriv->first_time = true;
        imxpriv->type = (enum ahci_imx_type)of_id->data;
+
+       imxpriv->sata_clk = devm_clk_get(dev, "sata");
+       if (IS_ERR(imxpriv->sata_clk)) {
+               dev_err(dev, "can't get sata clock.\n");
+               return PTR_ERR(imxpriv->sata_clk);
+       }
+
+       imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
+       if (IS_ERR(imxpriv->sata_ref_clk)) {
+               dev_err(dev, "can't get sata_ref clock.\n");
+               return PTR_ERR(imxpriv->sata_ref_clk);
+       }
+
        imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
        if (IS_ERR(imxpriv->ahb_clk)) {
                dev_err(dev, "can't get ahb clock.\n");
@@ -392,6 +565,8 @@ static int imx_ahci_probe(struct platform_device *pdev)
        }
 
        if (imxpriv->type == AHCI_IMX6Q) {
+               u32 reg_value;
+
                imxpriv->gpr = syscon_regmap_lookup_by_compatible(
                                                        "fsl,imx6q-iomuxc-gpr");
                if (IS_ERR(imxpriv->gpr)) {
@@ -399,6 +574,15 @@ static int imx_ahci_probe(struct platform_device *pdev)
                                "failed to find fsl,imx6q-iomux-gpr regmap\n");
                        return PTR_ERR(imxpriv->gpr);
                }
+
+               reg_value = imx_ahci_parse_props(dev, gpr13_props,
+                                                ARRAY_SIZE(gpr13_props));
+
+               imxpriv->phy_params =
+                                  IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M |
+                                  IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F |
+                                  IMX6Q_GPR13_SATA_SPD_MODE_3P0G |
+                                  reg_value;
        }
 
        hpriv = ahci_platform_get_resources(pdev);
@@ -407,10 +591,14 @@ static int imx_ahci_probe(struct platform_device *pdev)
 
        hpriv->plat_data = imxpriv;
 
-       ret = imx_sata_enable(hpriv);
+       ret = clk_prepare_enable(imxpriv->sata_clk);
        if (ret)
                return ret;
 
+       ret = imx_sata_enable(hpriv);
+       if (ret)
+               goto disable_clk;
+
        /*
         * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
         * and IP vendor specific register IMX_TIMER1MS.
@@ -432,19 +620,26 @@ static int imx_ahci_probe(struct platform_device *pdev)
        reg_val = clk_get_rate(imxpriv->ahb_clk) / 1000;
        writel(reg_val, hpriv->mmio + IMX_TIMER1MS);
 
-       ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
-                                     0, 0, 0);
+       ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info);
        if (ret)
-               imx_sata_disable(hpriv);
+               goto disable_sata;
+
+       return 0;
 
+disable_sata:
+       imx_sata_disable(hpriv);
+disable_clk:
+       clk_disable_unprepare(imxpriv->sata_clk);
        return ret;
 }
 
 static void ahci_imx_host_stop(struct ata_host *host)
 {
        struct ahci_host_priv *hpriv = host->private_data;
+       struct imx_ahci_priv *imxpriv = hpriv->plat_data;
 
        imx_sata_disable(hpriv);
+       clk_disable_unprepare(imxpriv->sata_clk);
 }
 
 #ifdef CONFIG_PM_SLEEP
index fd3dfd7..68672d2 100644 (file)
@@ -88,8 +88,7 @@ static int ahci_mvebu_probe(struct platform_device *pdev)
        ahci_mvebu_mbus_config(hpriv, dram);
        ahci_mvebu_regret_option(hpriv);
 
-       rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info,
-                                    0, 0, 0);
+       rc = ahci_platform_init_host(pdev, hpriv, &ahci_mvebu_port_info);
        if (rc)
                goto disable_resources;
 
index ebe505c..f61ddb9 100644 (file)
@@ -34,7 +34,6 @@ static int ahci_probe(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct ahci_platform_data *pdata = dev_get_platdata(dev);
        struct ahci_host_priv *hpriv;
-       unsigned long hflags = 0;
        int rc;
 
        hpriv = ahci_platform_get_resources(pdev);
@@ -58,10 +57,9 @@ static int ahci_probe(struct platform_device *pdev)
        }
 
        if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
-               hflags |= AHCI_HFLAG_NO_FBS;
+               hpriv->flags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
 
-       rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
-                                    hflags, 0, 0);
+       rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info);
        if (rc)
                goto pdata_exit;
 
@@ -78,6 +76,8 @@ static SIMPLE_DEV_PM_OPS(ahci_pm_ops, ahci_platform_suspend,
                         ahci_platform_resume);
 
 static const struct of_device_id ahci_of_match[] = {
+       { .compatible = "generic-ahci", },
+       /* Keep the following compatibles for device tree compatibility */
        { .compatible = "snps,spear-ahci", },
        { .compatible = "snps,exynos5440-ahci", },
        { .compatible = "ibm,476gtr-ahci", },
index 2595598..835d6ee 100644 (file)
@@ -166,7 +166,7 @@ static int st_ahci_probe(struct platform_device *pdev)
        if (err)
                return err;
 
-       err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info, 0, 0, 0);
+       err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info);
        if (err) {
                ahci_platform_disable_resources(hpriv);
                return err;
@@ -221,7 +221,7 @@ static int st_ahci_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(st_ahci_pm_ops, st_ahci_suspend, st_ahci_resume);
 
-static struct of_device_id st_ahci_match[] = {
+static const struct of_device_id st_ahci_match[] = {
        { .compatible = "st,ahci", },
        {},
 };
index 02002f1..e44d675 100644 (file)
@@ -167,7 +167,6 @@ static int ahci_sunxi_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct ahci_host_priv *hpriv;
-       unsigned long hflags;
        int rc;
 
        hpriv = ahci_platform_get_resources(pdev);
@@ -184,11 +183,10 @@ static int ahci_sunxi_probe(struct platform_device *pdev)
        if (rc)
                goto disable_resources;
 
-       hflags = AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
-                AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ;
+       hpriv->flags = AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
+                      AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ;
 
-       rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info,
-                                    hflags, 0, 0);
+       rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info);
        if (rc)
                goto disable_resources;
 
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c
new file mode 100644 (file)
index 0000000..fc3df47
--- /dev/null
@@ -0,0 +1,376 @@
+/*
+ * drivers/ata/ahci_tegra.c
+ *
+ * Copyright (c) 2014, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * Author:
+ *     Mikko Perttunen <mperttunen@nvidia.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ahci_platform.h>
+#include <linux/reset.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/tegra-powergate.h>
+#include <linux/regulator/consumer.h>
+#include "ahci.h"
+
+#define SATA_CONFIGURATION_0                           0x180
+#define SATA_CONFIGURATION_EN_FPCI                     BIT(0)
+
+#define SCFG_OFFSET                                    0x1000
+
+#define T_SATA0_CFG_1                                  0x04
+#define T_SATA0_CFG_1_IO_SPACE                         BIT(0)
+#define T_SATA0_CFG_1_MEMORY_SPACE                     BIT(1)
+#define T_SATA0_CFG_1_BUS_MASTER                       BIT(2)
+#define T_SATA0_CFG_1_SERR                             BIT(8)
+
+#define T_SATA0_CFG_9                                  0x24
+#define T_SATA0_CFG_9_BASE_ADDRESS_SHIFT               13
+
+#define SATA_FPCI_BAR5                                 0x94
+#define SATA_FPCI_BAR5_START_SHIFT                     4
+
+#define SATA_INTR_MASK                                 0x188
+#define SATA_INTR_MASK_IP_INT_MASK                     BIT(16)
+
+#define T_SATA0_AHCI_HBA_CAP_BKDR                      0x300
+
+#define T_SATA0_BKDOOR_CC                              0x4a4
+
+#define T_SATA0_CFG_SATA                               0x54c
+#define T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN           BIT(12)
+
+#define T_SATA0_CFG_MISC                               0x550
+
+#define T_SATA0_INDEX                                  0x680
+
+#define T_SATA0_CHX_PHY_CTRL1_GEN1                     0x690
+#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_MASK         0xff
+#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT                0
+#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_MASK                (0xff << 8)
+#define T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT       8
+
+#define T_SATA0_CHX_PHY_CTRL1_GEN2                     0x694
+#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_MASK         0xff
+#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_SHIFT                0
+#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_MASK                (0xff << 12)
+#define T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_SHIFT       12
+
+#define T_SATA0_CHX_PHY_CTRL2                          0x69c
+#define T_SATA0_CHX_PHY_CTRL2_CDR_CNTL_GEN1            0x23
+
+#define T_SATA0_CHX_PHY_CTRL11                         0x6d0
+#define T_SATA0_CHX_PHY_CTRL11_GEN2_RX_EQ              (0x2800 << 16)
+
+#define FUSE_SATA_CALIB                                        0x124
+#define FUSE_SATA_CALIB_MASK                           0x3
+
+struct sata_pad_calibration {
+       u8 gen1_tx_amp;
+       u8 gen1_tx_peak;
+       u8 gen2_tx_amp;
+       u8 gen2_tx_peak;
+};
+
+static const struct sata_pad_calibration tegra124_pad_calibration[] = {
+       {0x18, 0x04, 0x18, 0x0a},
+       {0x0e, 0x04, 0x14, 0x0a},
+       {0x0e, 0x07, 0x1a, 0x0e},
+       {0x14, 0x0e, 0x1a, 0x0e},
+};
+
+struct tegra_ahci_priv {
+       struct platform_device     *pdev;
+       void __iomem               *sata_regs;
+       struct reset_control       *sata_rst;
+       struct reset_control       *sata_oob_rst;
+       struct reset_control       *sata_cold_rst;
+       /* Needs special handling, cannot use ahci_platform */
+       struct clk                 *sata_clk;
+       struct regulator_bulk_data supplies[5];
+};
+
+static int tegra_ahci_power_on(struct ahci_host_priv *hpriv)
+{
+       struct tegra_ahci_priv *tegra = hpriv->plat_data;
+       int ret;
+
+       ret = regulator_bulk_enable(ARRAY_SIZE(tegra->supplies),
+                                   tegra->supplies);
+       if (ret)
+               return ret;
+
+       ret = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_SATA,
+                                               tegra->sata_clk,
+                                               tegra->sata_rst);
+       if (ret)
+               goto disable_regulators;
+
+       reset_control_assert(tegra->sata_oob_rst);
+       reset_control_assert(tegra->sata_cold_rst);
+
+       ret = ahci_platform_enable_resources(hpriv);
+       if (ret)
+               goto disable_power;
+
+       reset_control_deassert(tegra->sata_cold_rst);
+       reset_control_deassert(tegra->sata_oob_rst);
+
+       return 0;
+
+disable_power:
+       clk_disable_unprepare(tegra->sata_clk);
+
+       tegra_powergate_power_off(TEGRA_POWERGATE_SATA);
+
+disable_regulators:
+       regulator_bulk_disable(ARRAY_SIZE(tegra->supplies), tegra->supplies);
+
+       return ret;
+}
+
+static void tegra_ahci_power_off(struct ahci_host_priv *hpriv)
+{
+       struct tegra_ahci_priv *tegra = hpriv->plat_data;
+
+       ahci_platform_disable_resources(hpriv);
+
+       reset_control_assert(tegra->sata_rst);
+       reset_control_assert(tegra->sata_oob_rst);
+       reset_control_assert(tegra->sata_cold_rst);
+
+       clk_disable_unprepare(tegra->sata_clk);
+       tegra_powergate_power_off(TEGRA_POWERGATE_SATA);
+
+       regulator_bulk_disable(ARRAY_SIZE(tegra->supplies), tegra->supplies);
+}
+
+static int tegra_ahci_controller_init(struct ahci_host_priv *hpriv)
+{
+       struct tegra_ahci_priv *tegra = hpriv->plat_data;
+       int ret;
+       unsigned int val;
+       struct sata_pad_calibration calib;
+
+       ret = tegra_ahci_power_on(hpriv);
+       if (ret) {
+               dev_err(&tegra->pdev->dev,
+                       "failed to power on AHCI controller: %d\n", ret);
+               return ret;
+       }
+
+       val = readl(tegra->sata_regs + SATA_CONFIGURATION_0);
+       val |= SATA_CONFIGURATION_EN_FPCI;
+       writel(val, tegra->sata_regs + SATA_CONFIGURATION_0);
+
+       /* Pad calibration */
+
+       /* FIXME Always use calibration 0. Change this to read the calibration
+        * fuse once the fuse driver has landed. */
+       val = 0;
+
+       calib = tegra124_pad_calibration[val & FUSE_SATA_CALIB_MASK];
+
+       writel(BIT(0), tegra->sata_regs + SCFG_OFFSET + T_SATA0_INDEX);
+
+       val = readl(tegra->sata_regs +
+               SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN1);
+       val &= ~T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_MASK;
+       val &= ~T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_MASK;
+       val |= calib.gen1_tx_amp <<
+                       T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT;
+       val |= calib.gen1_tx_peak <<
+                       T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT;
+       writel(val, tegra->sata_regs + SCFG_OFFSET +
+               T_SATA0_CHX_PHY_CTRL1_GEN1);
+
+       val = readl(tegra->sata_regs +
+                       SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL1_GEN2);
+       val &= ~T_SATA0_CHX_PHY_CTRL1_GEN2_TX_AMP_MASK;
+       val &= ~T_SATA0_CHX_PHY_CTRL1_GEN2_TX_PEAK_MASK;
+       val |= calib.gen2_tx_amp <<
+                       T_SATA0_CHX_PHY_CTRL1_GEN1_TX_AMP_SHIFT;
+       val |= calib.gen2_tx_peak <<
+                       T_SATA0_CHX_PHY_CTRL1_GEN1_TX_PEAK_SHIFT;
+       writel(val, tegra->sata_regs + SCFG_OFFSET +
+               T_SATA0_CHX_PHY_CTRL1_GEN2);
+
+       writel(T_SATA0_CHX_PHY_CTRL11_GEN2_RX_EQ,
+               tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL11);
+       writel(T_SATA0_CHX_PHY_CTRL2_CDR_CNTL_GEN1,
+               tegra->sata_regs + SCFG_OFFSET + T_SATA0_CHX_PHY_CTRL2);
+
+       writel(0, tegra->sata_regs + SCFG_OFFSET + T_SATA0_INDEX);
+
+       /* Program controller device ID */
+
+       val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
+       val |= T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN;
+       writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
+
+       writel(0x01060100, tegra->sata_regs + SCFG_OFFSET + T_SATA0_BKDOOR_CC);
+
+       val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
+       val &= ~T_SATA0_CFG_SATA_BACKDOOR_PROG_IF_EN;
+       writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_SATA);
+
+       /* Enable IO & memory access, bus master mode */
+
+       val = readl(tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_1);
+       val |= T_SATA0_CFG_1_IO_SPACE | T_SATA0_CFG_1_MEMORY_SPACE |
+               T_SATA0_CFG_1_BUS_MASTER | T_SATA0_CFG_1_SERR;
+       writel(val, tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_1);
+
+       /* Program SATA MMIO */
+
+       writel(0x10000 << SATA_FPCI_BAR5_START_SHIFT,
+              tegra->sata_regs + SATA_FPCI_BAR5);
+
+       writel(0x08000 << T_SATA0_CFG_9_BASE_ADDRESS_SHIFT,
+              tegra->sata_regs + SCFG_OFFSET + T_SATA0_CFG_9);
+
+       /* Unmask SATA interrupts */
+
+       val = readl(tegra->sata_regs + SATA_INTR_MASK);
+       val |= SATA_INTR_MASK_IP_INT_MASK;
+       writel(val, tegra->sata_regs + SATA_INTR_MASK);
+
+       return 0;
+}
+
+static void tegra_ahci_controller_deinit(struct ahci_host_priv *hpriv)
+{
+       tegra_ahci_power_off(hpriv);
+}
+
+static void tegra_ahci_host_stop(struct ata_host *host)
+{
+       struct ahci_host_priv *hpriv = host->private_data;
+
+       tegra_ahci_controller_deinit(hpriv);
+}
+
+static struct ata_port_operations ahci_tegra_port_ops = {
+       .inherits       = &ahci_ops,
+       .host_stop      = tegra_ahci_host_stop,
+};
+
+static const struct ata_port_info ahci_tegra_port_info = {
+       .flags          = AHCI_FLAG_COMMON,
+       .pio_mask       = ATA_PIO4,
+       .udma_mask      = ATA_UDMA6,
+       .port_ops       = &ahci_tegra_port_ops,
+};
+
+static const struct of_device_id tegra_ahci_of_match[] = {
+       { .compatible = "nvidia,tegra124-ahci" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, tegra_ahci_of_match);
+
+static int tegra_ahci_probe(struct platform_device *pdev)
+{
+       struct ahci_host_priv *hpriv;
+       struct tegra_ahci_priv *tegra;
+       struct resource *res;
+       int ret;
+
+       hpriv = ahci_platform_get_resources(pdev);
+       if (IS_ERR(hpriv))
+               return PTR_ERR(hpriv);
+
+       tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
+       if (!tegra)
+               return -ENOMEM;
+
+       hpriv->plat_data = tegra;
+
+       tegra->pdev = pdev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       tegra->sata_regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(tegra->sata_regs))
+               return PTR_ERR(tegra->sata_regs);
+
+       tegra->sata_rst = devm_reset_control_get(&pdev->dev, "sata");
+       if (IS_ERR(tegra->sata_rst)) {
+               dev_err(&pdev->dev, "Failed to get sata reset\n");
+               return PTR_ERR(tegra->sata_rst);
+       }
+
+       tegra->sata_oob_rst = devm_reset_control_get(&pdev->dev, "sata-oob");
+       if (IS_ERR(tegra->sata_oob_rst)) {
+               dev_err(&pdev->dev, "Failed to get sata-oob reset\n");
+               return PTR_ERR(tegra->sata_oob_rst);
+       }
+
+       tegra->sata_cold_rst = devm_reset_control_get(&pdev->dev, "sata-cold");
+       if (IS_ERR(tegra->sata_cold_rst)) {
+               dev_err(&pdev->dev, "Failed to get sata-cold reset\n");
+               return PTR_ERR(tegra->sata_cold_rst);
+       }
+
+       tegra->sata_clk = devm_clk_get(&pdev->dev, "sata");
+       if (IS_ERR(tegra->sata_clk)) {
+               dev_err(&pdev->dev, "Failed to get sata clock\n");
+               return PTR_ERR(tegra->sata_clk);
+       }
+
+       tegra->supplies[0].supply = "avdd";
+       tegra->supplies[1].supply = "hvdd";
+       tegra->supplies[2].supply = "vddio";
+       tegra->supplies[3].supply = "target-5v";
+       tegra->supplies[4].supply = "target-12v";
+
+       ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(tegra->supplies),
+                                     tegra->supplies);
+       if (ret) {
+               dev_err(&pdev->dev, "Failed to get regulators\n");
+               return ret;
+       }
+
+       ret = tegra_ahci_controller_init(hpriv);
+       if (ret)
+               return ret;
+
+       ret = ahci_platform_init_host(pdev, hpriv, &ahci_tegra_port_info);
+       if (ret)
+               goto deinit_controller;
+
+       return 0;
+
+deinit_controller:
+       tegra_ahci_controller_deinit(hpriv);
+
+       return ret;
+};
+
+static struct platform_driver tegra_ahci_driver = {
+       .probe = tegra_ahci_probe,
+       .remove = ata_platform_remove_one,
+       .driver = {
+               .name = "tegra-ahci",
+               .of_match_table = tegra_ahci_of_match,
+       },
+       /* LP0 suspend support not implemented */
+};
+module_platform_driver(tegra_ahci_driver);
+
+MODULE_AUTHOR("Mikko Perttunen <mperttunen@nvidia.com>");
+MODULE_DESCRIPTION("Tegra124 AHCI SATA driver");
+MODULE_LICENSE("GPL v2");
index 042a9bb..bc28111 100644 (file)
@@ -67,6 +67,9 @@
 #define PORTAXICFG                     0x000000bc
 #define PORTAXICFG_OUTTRANS_SET(dst, src) \
                (((dst) & ~0x00f00000) | (((u32)(src) << 0x14) & 0x00f00000))
+#define PORTRANSCFG                    0x000000c8
+#define PORTRANSCFG_RXWM_SET(dst, src)         \
+               (((dst) & ~0x0000007f) | (((u32)(src)) & 0x0000007f))
 
 /* SATA host controller AXI CSR */
 #define INT_SLV_TMOMASK                        0x00000010
@@ -78,6 +81,7 @@
 struct xgene_ahci_context {
        struct ahci_host_priv *hpriv;
        struct device *dev;
+       u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
        void __iomem *csr_core;         /* Core CSR address of IP */
        void __iomem *csr_diag;         /* Diag CSR address of IP */
        void __iomem *csr_axi;          /* AXI CSR address of IP */
@@ -97,6 +101,50 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
        return 0;
 }
 
+/**
+ * xgene_ahci_restart_engine - Restart the dma engine.
+ * @ap : ATA port of interest
+ *
+ * Restarts the dma engine inside the controller.
+ */
+static int xgene_ahci_restart_engine(struct ata_port *ap)
+{
+       struct ahci_host_priv *hpriv = ap->host->private_data;
+
+       ahci_stop_engine(ap);
+       ahci_start_fis_rx(ap);
+       hpriv->start_engine(ap);
+
+       return 0;
+}
+
+/**
+ * xgene_ahci_qc_issue - Issue commands to the device
+ * @qc: Command to issue
+ *
+ * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
+ * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
+ * state machine goes into the CMFatalErrorUpdate state and locks up. By
+ * restarting the dma engine, it removes the controller out of lock up state.
+ */
+static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
+{
+       struct ata_port *ap = qc->ap;
+       struct ahci_host_priv *hpriv = ap->host->private_data;
+       struct xgene_ahci_context *ctx = hpriv->plat_data;
+       int rc = 0;
+
+       if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA))
+               xgene_ahci_restart_engine(ap);
+
+       rc = ahci_qc_issue(qc);
+
+       /* Save the last command issued */
+       ctx->last_cmd[ap->port_no] = qc->tf.command;
+
+       return rc;
+}
+
 /**
  * xgene_ahci_read_id - Read ID data from the specified device
  * @dev: device
@@ -104,14 +152,12 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
  * @id: data buffer
  *
  * This custom read ID function is required due to the fact that the HW
- * does not support DEVSLP and the controller state machine may get stuck
- * after processing the ID query command.
+ * does not support DEVSLP.
  */
 static unsigned int xgene_ahci_read_id(struct ata_device *dev,
                                       struct ata_taskfile *tf, u16 *id)
 {
        u32 err_mask;
-       void __iomem *port_mmio = ahci_port_base(dev->link->ap);
 
        err_mask = ata_do_dev_read_id(dev, tf, id);
        if (err_mask)
@@ -133,16 +179,6 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev,
         */
        id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
 
-       /*
-        * Due to HW errata, restart the port if no other command active.
-        * Otherwise the controller may get stuck.
-        */
-       if (!readl(port_mmio + PORT_CMD_ISSUE)) {
-               writel(PORT_CMD_FIS_RX, port_mmio + PORT_CMD);
-               readl(port_mmio + PORT_CMD);    /* Force a barrier */
-               writel(PORT_CMD_FIS_RX | PORT_CMD_START, port_mmio + PORT_CMD);
-               readl(port_mmio + PORT_CMD);    /* Force a barrier */
-       }
        return 0;
 }
 
@@ -160,11 +196,11 @@ static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
        /* Disable fix rate */
        writel(0x0001fffe, mmio + PORTPHY1CFG);
        readl(mmio + PORTPHY1CFG); /* Force a barrier */
-       writel(0x5018461c, mmio + PORTPHY2CFG);
+       writel(0x28183219, mmio + PORTPHY2CFG);
        readl(mmio + PORTPHY2CFG); /* Force a barrier */
-       writel(0x1c081907, mmio + PORTPHY3CFG);
+       writel(0x13081008, mmio + PORTPHY3CFG);
        readl(mmio + PORTPHY3CFG); /* Force a barrier */
-       writel(0x1c080815, mmio + PORTPHY4CFG);
+       writel(0x00480815, mmio + PORTPHY4CFG);
        readl(mmio + PORTPHY4CFG); /* Force a barrier */
        /* Set window negotiation */
        val = readl(mmio + PORTPHY5CFG);
@@ -176,6 +212,10 @@ static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel)
        val = PORTAXICFG_OUTTRANS_SET(val, 0xe); /* Set outstanding */
        writel(val, mmio + PORTAXICFG);
        readl(mmio + PORTAXICFG); /* Force a barrier */
+       /* Set the watermark threshold of the receive FIFO */
+       val = readl(mmio + PORTRANSCFG);
+       val = PORTRANSCFG_RXWM_SET(val, 0x30);
+       writel(val, mmio + PORTRANSCFG);
 }
 
 /**
@@ -300,6 +340,7 @@ static struct ata_port_operations xgene_ahci_ops = {
        .host_stop = xgene_ahci_host_stop,
        .hardreset = xgene_ahci_hardreset,
        .read_id = xgene_ahci_read_id,
+       .qc_issue = xgene_ahci_qc_issue,
 };
 
 static const struct ata_port_info xgene_ahci_port_info = {
@@ -381,7 +422,6 @@ static int xgene_ahci_probe(struct platform_device *pdev)
        struct ahci_host_priv *hpriv;
        struct xgene_ahci_context *ctx;
        struct resource *res;
-       unsigned long hflags;
        int rc;
 
        hpriv = ahci_platform_get_resources(pdev);
@@ -440,20 +480,9 @@ static int xgene_ahci_probe(struct platform_device *pdev)
        /* Configure the host controller */
        xgene_ahci_hw_init(hpriv);
 
-       /*
-        * Setup DMA mask. This is preliminary until the DMA range is sorted
-        * out.
-        */
-       rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
-       if (rc) {
-               dev_err(dev, "Unable to set dma mask\n");
-               goto disable_resources;
-       }
-
-       hflags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ;
+       hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ;
 
-       rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info,
-                                    hflags, 0, 0);
+       rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info);
        if (rc)
                goto disable_resources;
 
index 40ea583..b784e9d 100644 (file)
@@ -68,7 +68,6 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
 
 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
 static int ahci_port_start(struct ata_port *ap);
 static void ahci_port_stop(struct ata_port *ap);
@@ -383,8 +382,6 @@ static ssize_t ahci_show_em_supported(struct device *dev,
  *     ahci_save_initial_config - Save and fixup initial config values
  *     @dev: target AHCI device
  *     @hpriv: host private area to store config values
- *     @force_port_map: force port map to a specified value
- *     @mask_port_map: mask out particular bits from port map
  *
  *     Some registers containing configuration info might be setup by
  *     BIOS and might be cleared on reset.  This function saves the
@@ -399,10 +396,7 @@ static ssize_t ahci_show_em_supported(struct device *dev,
  *     LOCKING:
  *     None.
  */
-void ahci_save_initial_config(struct device *dev,
-                             struct ahci_host_priv *hpriv,
-                             unsigned int force_port_map,
-                             unsigned int mask_port_map)
+void ahci_save_initial_config(struct device *dev, struct ahci_host_priv *hpriv)
 {
        void __iomem *mmio = hpriv->mmio;
        u32 cap, cap2, vers, port_map;
@@ -469,17 +463,17 @@ void ahci_save_initial_config(struct device *dev,
                cap &= ~HOST_CAP_FBS;
        }
 
-       if (force_port_map && port_map != force_port_map) {
+       if (hpriv->force_port_map && port_map != hpriv->force_port_map) {
                dev_info(dev, "forcing port_map 0x%x -> 0x%x\n",
-                        port_map, force_port_map);
-               port_map = force_port_map;
+                        port_map, hpriv->force_port_map);
+               port_map = hpriv->force_port_map;
        }
 
-       if (mask_port_map) {
+       if (hpriv->mask_port_map) {
                dev_warn(dev, "masking port_map 0x%x -> 0x%x\n",
                        port_map,
-                       port_map & mask_port_map);
-               port_map &= mask_port_map;
+                       port_map & hpriv->mask_port_map);
+               port_map &= hpriv->mask_port_map;
        }
 
        /* cross check port_map and cap.n_ports */
@@ -620,7 +614,7 @@ int ahci_stop_engine(struct ata_port *ap)
 }
 EXPORT_SYMBOL_GPL(ahci_stop_engine);
 
-static void ahci_start_fis_rx(struct ata_port *ap)
+void ahci_start_fis_rx(struct ata_port *ap)
 {
        void __iomem *port_mmio = ahci_port_base(ap);
        struct ahci_host_priv *hpriv = ap->host->private_data;
@@ -646,6 +640,7 @@ static void ahci_start_fis_rx(struct ata_port *ap)
        /* flush */
        readl(port_mmio + PORT_CMD);
 }
+EXPORT_SYMBOL_GPL(ahci_start_fis_rx);
 
 static int ahci_stop_fis_rx(struct ata_port *ap)
 {
@@ -1945,7 +1940,7 @@ irqreturn_t ahci_interrupt(int irq, void *dev_instance)
 }
 EXPORT_SYMBOL_GPL(ahci_interrupt);
 
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
+unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
        void __iomem *port_mmio = ahci_port_base(ap);
@@ -1974,6 +1969,7 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(ahci_qc_issue);
 
 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
 {
index 3a5b4ed..5b92c29 100644 (file)
@@ -38,6 +38,67 @@ static struct scsi_host_template ahci_platform_sht = {
        AHCI_SHT("ahci_platform"),
 };
 
+/**
+ * ahci_platform_enable_phys - Enable PHYs
+ * @hpriv: host private area to store config values
+ *
+ * This function enables all the PHYs found in hpriv->phys, if any.
+ * If a PHY fails to be enabled, it disables all the PHYs already
+ * enabled in reverse order and returns an error.
+ *
+ * RETURNS:
+ * 0 on success otherwise a negative error code
+ */
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
+{
+       int rc, i;
+
+       for (i = 0; i < hpriv->nports; i++) {
+               if (!hpriv->phys[i])
+                       continue;
+
+               rc = phy_init(hpriv->phys[i]);
+               if (rc)
+                       goto disable_phys;
+
+               rc = phy_power_on(hpriv->phys[i]);
+               if (rc) {
+                       phy_exit(hpriv->phys[i]);
+                       goto disable_phys;
+               }
+       }
+
+       return 0;
+
+disable_phys:
+       while (--i >= 0) {
+               phy_power_off(hpriv->phys[i]);
+               phy_exit(hpriv->phys[i]);
+       }
+       return rc;
+}
+EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
+
+/**
+ * ahci_platform_disable_phys - Disable PHYs
+ * @hpriv: host private area to store config values
+ *
+ * This function disables all PHYs found in hpriv->phys.
+ */
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
+{
+       int i;
+
+       for (i = 0; i < hpriv->nports; i++) {
+               if (!hpriv->phys[i])
+                       continue;
+
+               phy_power_off(hpriv->phys[i]);
+               phy_exit(hpriv->phys[i]);
+       }
+}
+EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
+
 /**
  * ahci_platform_enable_clks - Enable platform clocks
  * @hpriv: host private area to store config values
@@ -92,7 +153,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_disable_clks);
  * following order:
  * 1) Regulator
  * 2) Clocks (through ahci_platform_enable_clks)
- * 3) Phy
+ * 3) Phys
  *
  * If resource enabling fails at any point the previous enabled resources
  * are disabled in reverse order.
@@ -114,17 +175,9 @@ int ahci_platform_enable_resources(struct ahci_host_priv *hpriv)
        if (rc)
                goto disable_regulator;
 
-       if (hpriv->phy) {
-               rc = phy_init(hpriv->phy);
-               if (rc)
-                       goto disable_clks;
-
-               rc = phy_power_on(hpriv->phy);
-               if (rc) {
-                       phy_exit(hpriv->phy);
-                       goto disable_clks;
-               }
-       }
+       rc = ahci_platform_enable_phys(hpriv);
+       if (rc)
+               goto disable_clks;
 
        return 0;
 
@@ -144,16 +197,13 @@ EXPORT_SYMBOL_GPL(ahci_platform_enable_resources);
  *
  * This function disables all ahci_platform managed resources in the
  * following order:
- * 1) Phy
+ * 1) Phys
  * 2) Clocks (through ahci_platform_disable_clks)
  * 3) Regulator
  */
 void ahci_platform_disable_resources(struct ahci_host_priv *hpriv)
 {
-       if (hpriv->phy) {
-               phy_power_off(hpriv->phy);
-               phy_exit(hpriv->phy);
-       }
+       ahci_platform_disable_phys(hpriv);
 
        ahci_platform_disable_clks(hpriv);
 
@@ -187,7 +237,7 @@ static void ahci_platform_put_resources(struct device *dev, void *res)
  * 2) regulator for controlling the targets power (optional)
  * 3) 0 - AHCI_MAX_CLKS clocks, as specified in the devs devicetree node,
  *    or for non devicetree enabled platforms a single clock
- *     4) phy (optional)
+ *     4) phys (optional)
  *
  * RETURNS:
  * The allocated ahci_host_priv on success, otherwise an ERR_PTR value
@@ -197,7 +247,9 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
        struct device *dev = &pdev->dev;
        struct ahci_host_priv *hpriv;
        struct clk *clk;
-       int i, rc = -ENOMEM;
+       struct device_node *child;
+       int i, enabled_ports = 0, rc = -ENOMEM;
+       u32 mask_port_map = 0;
 
        if (!devres_open_group(dev, NULL, GFP_KERNEL))
                return ERR_PTR(-ENOMEM);
@@ -246,23 +298,89 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
                hpriv->clks[i] = clk;
        }
 
-       hpriv->phy = devm_phy_get(dev, "sata-phy");
-       if (IS_ERR(hpriv->phy)) {
-               rc = PTR_ERR(hpriv->phy);
-               switch (rc) {
-               case -ENODEV:
-               case -ENOSYS:
-                       /* continue normally */
-                       hpriv->phy = NULL;
-                       break;
+       hpriv->nports = of_get_child_count(dev->of_node);
 
-               case -EPROBE_DEFER:
+       if (hpriv->nports) {
+               hpriv->phys = devm_kzalloc(dev,
+                                          hpriv->nports * sizeof(*hpriv->phys),
+                                          GFP_KERNEL);
+               if (!hpriv->phys) {
+                       rc = -ENOMEM;
                        goto err_out;
+               }
+
+               for_each_child_of_node(dev->of_node, child) {
+                       u32 port;
+
+                       if (!of_device_is_available(child))
+                               continue;
+
+                       if (of_property_read_u32(child, "reg", &port)) {
+                               rc = -EINVAL;
+                               goto err_out;
+                       }
 
-               default:
-                       dev_err(dev, "couldn't get sata-phy\n");
+                       if (port >= hpriv->nports) {
+                               dev_warn(dev, "invalid port number %d\n", port);
+                               continue;
+                       }
+
+                       mask_port_map |= BIT(port);
+
+                       hpriv->phys[port] = devm_of_phy_get(dev, child, NULL);
+                       if (IS_ERR(hpriv->phys[port])) {
+                               rc = PTR_ERR(hpriv->phys[port]);
+                               dev_err(dev,
+                                       "couldn't get PHY in node %s: %d\n",
+                                       child->name, rc);
+                               goto err_out;
+                       }
+
+                       enabled_ports++;
+               }
+               if (!enabled_ports) {
+                       dev_warn(dev, "No port enabled\n");
+                       rc = -ENODEV;
                        goto err_out;
                }
+
+               if (!hpriv->mask_port_map)
+                       hpriv->mask_port_map = mask_port_map;
+       } else {
+               /*
+                * If no sub-node was found, keep this for device tree
+                * compatibility
+                */
+               struct phy *phy = devm_phy_get(dev, "sata-phy");
+               if (!IS_ERR(phy)) {
+                       hpriv->phys = devm_kzalloc(dev, sizeof(*hpriv->phys),
+                                                  GFP_KERNEL);
+                       if (!hpriv->phys) {
+                               rc = -ENOMEM;
+                               goto err_out;
+                       }
+
+                       hpriv->phys[0] = phy;
+                       hpriv->nports = 1;
+               } else {
+                       rc = PTR_ERR(phy);
+                       switch (rc) {
+                               case -ENOSYS:
+                                       /* No PHY support. Check if PHY is required. */
+                                       if (of_find_property(dev->of_node, "phys", NULL)) {
+                                               dev_err(dev, "couldn't get sata-phy: ENOSYS\n");
+                                               goto err_out;
+                                       }
+                               case -ENODEV:
+                                       /* continue normally */
+                                       hpriv->phys = NULL;
+                                       break;
+
+                               default:
+                                       goto err_out;
+
+                       }
+               }
        }
 
        pm_runtime_enable(dev);
@@ -283,12 +401,9 @@ EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
  * @pdev: platform device pointer for the host
  * @hpriv: ahci-host private data for the host
  * @pi_template: template for the ata_port_info to use
- * @host_flags: ahci host flags used in ahci_host_priv
- * @force_port_map: param passed to ahci_save_initial_config
- * @mask_port_map: param passed to ahci_save_initial_config
  *
  * This function does all the usual steps needed to bring up an
- * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
+ * ahci-platform host, note any necessary resources (ie clks, phys, etc.)
  * must be initialized / enabled before calling this.
  *
  * RETURNS:
@@ -296,10 +411,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_get_resources);
  */
 int ahci_platform_init_host(struct platform_device *pdev,
                            struct ahci_host_priv *hpriv,
-                           const struct ata_port_info *pi_template,
-                           unsigned long host_flags,
-                           unsigned int force_port_map,
-                           unsigned int mask_port_map)
+                           const struct ata_port_info *pi_template)
 {
        struct device *dev = &pdev->dev;
        struct ata_port_info pi = *pi_template;
@@ -314,10 +426,9 @@ int ahci_platform_init_host(struct platform_device *pdev,
        }
 
        /* prepare host */
-       pi.private_data = (void *)host_flags;
-       hpriv->flags |= host_flags;
+       pi.private_data = (void *)(unsigned long)hpriv->flags;
 
-       ahci_save_initial_config(dev, hpriv, force_port_map, mask_port_map);
+       ahci_save_initial_config(dev, hpriv);
 
        if (hpriv->cap & HOST_CAP_NCQ)
                pi.flags |= ATA_FLAG_NCQ;
@@ -364,6 +475,19 @@ int ahci_platform_init_host(struct platform_device *pdev,
                        ap->ops = &ata_dummy_port_ops;
        }
 
+       if (hpriv->cap & HOST_CAP_64) {
+               rc = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+               if (rc) {
+                       rc = dma_coerce_mask_and_coherent(dev,
+                                                         DMA_BIT_MASK(32));
+                       if (rc) {
+                               dev_err(dev, "Failed to enable 64-bit DMA.\n");
+                               return rc;
+                       }
+                       dev_warn(dev, "Enable 32-bit DMA instead of 64-bit.\n");
+               }
+       }
+
        rc = ahci_reset_controller(host);
        if (rc)
                return rc;
@@ -394,7 +518,7 @@ static void ahci_host_stop(struct ata_host *host)
  * @dev: device pointer for the host
  *
  * This function does all the usual steps needed to suspend an
- * ahci-platform host, note any necessary resources (ie clks, phy, etc.)
+ * ahci-platform host, note any necessary resources (ie clks, phys, etc.)
  * must be disabled after calling this.
  *
  * RETURNS:
@@ -431,7 +555,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_suspend_host);
  * @dev: device pointer for the host
  *
  * This function does all the usual steps needed to resume an ahci-platform
- * host, note any necessary resources (ie clks, phy, etc.)  must be
+ * host, note any necessary resources (ie clks, phys, etc.)  must be
  * initialized / enabled before calling this.
  *
  * RETURNS:
index 18d97d5..677c0c1 100644 (file)
@@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
  *     ata_qc_new - Request an available ATA command, for queueing
  *     @ap: target port
  *
+ *     Some ATA host controllers may implement a queue depth which is less
+ *     than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
+ *     the hardware limitation.
+ *
  *     LOCKING:
  *     None.
  */
@@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
 {
        struct ata_queued_cmd *qc = NULL;
+       unsigned int max_queue = ap->host->n_tags;
        unsigned int i, tag;
 
        /* no command while frozen */
        if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
                return NULL;
 
-       for (i = 0; i < ATA_MAX_QUEUE; i++) {
-               tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
+       for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
+               tag = tag < max_queue ? tag : 0;
 
                /* the last tag is reserved for internal command. */
                if (tag == ATA_TAG_INTERNAL)
@@ -6088,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
 {
        spin_lock_init(&host->lock);
        mutex_init(&host->eh_mutex);
+       host->n_tags = ATA_MAX_QUEUE - 1;
        host->dev = dev;
        host->ops = ops;
 }
@@ -6169,6 +6175,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
 {
        int i, rc;
 
+       host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
+
        /* host must have been started */
        if (!(host->flags & ATA_HOST_STARTED)) {
                dev_err(host->dev, "BUG: trying to register unstarted host\n");
index 6760fc4..dad83df 100644 (file)
@@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
        case ATA_DEV_ATA:
                if (err & ATA_ICRC)
                        qc->err_mask |= AC_ERR_ATA_BUS;
-               if (err & ATA_UNC)
+               if (err & (ATA_UNC | ATA_AMNF))
                        qc->err_mask |= AC_ERR_MEDIA;
                if (err & ATA_IDNF)
                        qc->err_mask |= AC_ERR_INVALID;
@@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link)
                }
 
                if (cmd->command != ATA_CMD_PACKET &&
-                   (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
-                                    ATA_ABORTED)))
-                       ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
+                   (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
+                                    ATA_IDNF | ATA_ABORTED)))
+                       ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
                          res->feature & ATA_ICRC ? "ICRC " : "",
                          res->feature & ATA_UNC ? "UNC " : "",
+                         res->feature & ATA_AMNF ? "AMNF " : "",
                          res->feature & ATA_IDNF ? "IDNF " : "",
                          res->feature & ATA_ABORTED ? "ABRT " : "");
 #endif
index 6ad5c07..4d37c54 100644 (file)
@@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
        struct ep93xx_pata_data *drv_data;
        struct ata_host *host;
        struct ata_port *ap;
-       unsigned int irq;
+       int irq;
        struct resource *mem_res;
        void __iomem *ide_base;
        int err;
index fb52883..2578fc1 100644 (file)
@@ -54,7 +54,6 @@
 
 enum s3c_cpu_type {
        TYPE_S3C64XX,
-       TYPE_S5PC100,
        TYPE_S5PV210,
 };
 
@@ -476,10 +475,6 @@ static void pata_s3c_hwinit(struct s3c_ide_info *info,
                writel(0x1b, info->ide_addr + S3C_ATA_IRQ_MSK);
                break;
 
-       case TYPE_S5PC100:
-               pata_s3c_cfg_mode(info->sfr_addr);
-               /* FALLTHROUGH */
-
        case TYPE_S5PV210:
                /* Configure as little endian */
                pata_s3c_set_endian(info->ide_addr, 0);
@@ -549,11 +544,6 @@ static int __init pata_s3c_probe(struct platform_device *pdev)
                info->sfr_addr = info->ide_addr + 0x1800;
                info->ide_addr += 0x1900;
                info->fifo_status_reg = 0x94;
-       } else if (cpu_type == TYPE_S5PC100) {
-               ap->ops = &pata_s5p_port_ops;
-               info->sfr_addr = info->ide_addr + 0x1800;
-               info->ide_addr += 0x1900;
-               info->fifo_status_reg = 0x84;
        } else {
                ap->ops = &pata_s5p_port_ops;
                info->fifo_status_reg = 0x84;
@@ -652,9 +642,6 @@ static struct platform_device_id pata_s3c_driver_ids[] = {
        {
                .name           = "s3c64xx-pata",
                .driver_data    = TYPE_S3C64XX,
-       }, {
-               .name           = "s5pc100-pata",
-               .driver_data    = TYPE_S5PC100,
        }, {
                .name           = "s5pv210-pata",
                .driver_data    = TYPE_S5PV210,
index 616a6d2..07bc7e4 100644 (file)
@@ -734,13 +734,12 @@ static int sata_fsl_port_start(struct ata_port *ap)
        if (!pp)
                return -ENOMEM;
 
-       mem = dma_alloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
-                                GFP_KERNEL);
+       mem = dma_zalloc_coherent(dev, SATA_FSL_PORT_PRIV_DMA_SZ, &mem_dma,
+                                 GFP_KERNEL);
        if (!mem) {
                kfree(pp);
                return -ENOMEM;
        }
-       memset(mem, 0, SATA_FSL_PORT_PRIV_DMA_SZ);
 
        pp->cmdslot = mem;
        pp->cmdslot_paddr = mem_dma;
index 65965cf..da3bc27 100644 (file)
@@ -512,7 +512,7 @@ static int ahci_highbank_probe(struct platform_device *pdev)
                return rc;
 
 
-       ahci_save_initial_config(dev, hpriv, 0, 0);
+       ahci_save_initial_config(dev, hpriv);
 
        /* prepare host */
        if (hpriv->cap & HOST_CAP_NCQ)
index 0534890..d81b20d 100644 (file)
@@ -1154,8 +1154,8 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
        status = readl(host_base + HOST_IRQ_STAT);
 
        if (status == 0xffffffff) {
-               printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
-                      "PCI fault or device removal?\n");
+               dev_err(host->dev, "IRQ status == 0xffffffff, "
+                       "PCI fault or device removal?\n");
                goto out;
        }
 
index 83969f8..6467c91 100644 (file)
@@ -176,14 +176,24 @@ static int __init cma_activate_area(struct cma *cma)
                base_pfn = pfn;
                for (j = pageblock_nr_pages; j; --j, pfn++) {
                        WARN_ON_ONCE(!pfn_valid(pfn));
+                       /*
+                        * alloc_contig_range requires the pfn range
+                        * specified to be in the same zone. Make this
+                        * simple by forcing the entire CMA resv range
+                        * to be in the same zone.
+                        */
                        if (page_zone(pfn_to_page(pfn)) != zone)
-                               return -EINVAL;
+                               goto err;
                }
                init_cma_reserved_pageblock(pfn_to_page(base_pfn));
        } while (--i);
 
        mutex_init(&cma->lock);
        return 0;
+
+err:
+       kfree(cma->bitmap);
+       return -EINVAL;
 }
 
 static struct cma cma_areas[MAX_CMA_AREAS];
index 9e9227e..00f2208 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/idr.h>
 #include <linux/acpi.h>
+#include <linux/clk/clk-conf.h>
 
 #include "base.h"
 #include "power/power.h"
@@ -89,8 +90,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
        return dev->archdata.irqs[num];
 #else
        struct resource *r;
-       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
-               return of_irq_get(dev->dev.of_node, num);
+       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+               int ret;
+
+               ret = of_irq_get(dev->dev.of_node, num);
+               if (ret >= 0 || ret == -EPROBE_DEFER)
+                       return ret;
+       }
 
        r = platform_get_resource(dev, IORESOURCE_IRQ, num);
 
@@ -133,8 +139,13 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
 {
        struct resource *r;
 
-       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
-               return of_irq_get_byname(dev->dev.of_node, name);
+       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+               int ret;
+
+               ret = of_irq_get_byname(dev->dev.of_node, name);
+               if (ret >= 0 || ret == -EPROBE_DEFER)
+                       return ret;
+       }
 
        r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
        return r ? r->start : -ENXIO;
@@ -489,6 +500,10 @@ static int platform_drv_probe(struct device *_dev)
        struct platform_device *dev = to_platform_device(_dev);
        int ret;
 
+       ret = of_clk_set_defaults(_dev->of_node, false);
+       if (ret < 0)
+               return ret;
+
        acpi_dev_pm_attach(_dev, true);
 
        ret = drv->probe(dev);
index 1b35c45..3f2e167 100644 (file)
@@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
        struct task_struct *opa;
 
        kref_get(&connection->kref);
+       /* We may just have force_sig()'ed this thread
+        * to get it out of some blocking network function.
+        * Clear signals; otherwise kthread_run(), which internally uses
+        * wait_on_completion_killable(), will mistake our pending signal
+        * for a new fatal signal and fail. */
+       flush_signals(current);
        opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
        if (IS_ERR(opa)) {
                drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
index b6c8aaf..5b17ec8 100644 (file)
@@ -1337,8 +1337,11 @@ int drbd_submit_peer_request(struct drbd_device *device,
                return 0;
        }
 
+       /* Discards don't have any payload.
+        * But the scsi layer still expects a bio_vec it can use internally,
+        * see sd_setup_discard_cmnd() and blk_add_request_payload(). */
        if (peer_req->flags & EE_IS_TRIM)
-               nr_pages = 0; /* discards don't have any payload. */
+               nr_pages = 1;
 
        /* In most cases, we will only need one bio.  But in case the lower
         * level restrictions happen to be different at this offset on this
index 677db04..56d46ff 100644 (file)
@@ -3777,7 +3777,7 @@ static void floppy_rb0_cb(struct bio *bio, int err)
        int drive = cbdata->drive;
 
        if (err) {
-               pr_info("floppy: error %d while reading block 0", err);
+               pr_info("floppy: error %d while reading block 0\n", err);
                set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
        }
        complete(&cbdata->complete);
index 77087a2..a3b042c 100644 (file)
@@ -79,7 +79,7 @@ MODULE_PARM_DESC(home_node, "Home node for the device");
 
 static int queue_mode = NULL_Q_MQ;
 module_param(queue_mode, int, S_IRUGO);
-MODULE_PARM_DESC(use_mq, "Use blk-mq interface (0=bio,1=rq,2=multiqueue)");
+MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
 
 static int gb = 250;
 module_param(gb, int, S_IRUGO);
@@ -227,7 +227,10 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
 
 static void null_softirq_done_fn(struct request *rq)
 {
-       end_cmd(blk_mq_rq_to_pdu(rq));
+       if (queue_mode == NULL_Q_MQ)
+               end_cmd(blk_mq_rq_to_pdu(rq));
+       else
+               end_cmd(rq->special);
 }
 
 static inline void null_handle_cmd(struct nullb_cmd *cmd)
index bbeb404..b2c98c1 100644 (file)
@@ -1431,6 +1431,14 @@ static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
        return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
 }
 
+static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
+{
+       struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
+
+       return obj_request->img_offset <
+           round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
+}
+
 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
 {
        dout("%s: obj %p (was %d)\n", __func__, obj_request,
@@ -2748,7 +2756,7 @@ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
         */
        if (!img_request_write_test(img_request) ||
                !img_request_layered_test(img_request) ||
-               rbd_dev->parent_overlap <= obj_request->img_offset ||
+               !obj_request_overlaps_parent(obj_request) ||
                ((known = obj_request_known_test(obj_request)) &&
                        obj_request_exists_test(obj_request))) {
 
index 48eccb3..36e54be 100644 (file)
@@ -624,7 +624,16 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
        zram->disksize = 0;
        if (reset_capacity)
                set_capacity(zram->disk, 0);
+
        up_write(&zram->init_lock);
+
+       /*
+        * Revalidate disk out of the init_lock to avoid lockdep splat.
+        * It's okay because disk's capacity is protected by init_lock
+        * so that revalidate_disk always sees up-to-date capacity.
+        */
+       if (reset_capacity)
+               revalidate_disk(zram->disk);
 }
 
 static ssize_t disksize_store(struct device *dev,
@@ -665,6 +674,14 @@ static ssize_t disksize_store(struct device *dev,
        zram->disksize = disksize;
        set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
        up_write(&zram->init_lock);
+
+       /*
+        * Revalidate disk out of the init_lock to avoid lockdep splat.
+        * It's okay because disk's capacity is protected by init_lock
+        * so that revalidate_disk always sees up-to-date capacity.
+        */
+       revalidate_disk(zram->disk);
+
        return len;
 
 out_destroy_comp:
index f983806..f50dffc 100644 (file)
@@ -90,7 +90,6 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x0b05, 0x17d0) },
        { USB_DEVICE(0x0CF3, 0x0036) },
        { USB_DEVICE(0x0CF3, 0x3004) },
-       { USB_DEVICE(0x0CF3, 0x3005) },
        { USB_DEVICE(0x0CF3, 0x3008) },
        { USB_DEVICE(0x0CF3, 0x311D) },
        { USB_DEVICE(0x0CF3, 0x311E) },
@@ -140,7 +139,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
index a1c80b0..6250fc2 100644 (file)
@@ -162,7 +162,6 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
-       { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
index 04680ea..fede8ca 100644 (file)
@@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
            H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
                BT_ERR("Non-link packet received in non-active state");
                h5_reset_rx(h5);
+               return 0;
        }
 
        h5->rx_func = h5_rx_payload;
index a118ec1..1f37d98 100644 (file)
@@ -45,7 +45,7 @@ config OMAP_INTERCONNECT
 
 config ARM_CCI
        bool "ARM CCI driver support"
-       depends on ARM
+       depends on ARM && OF && CPU_V7
        help
          Driver supporting the CCI cache coherent interconnect for ARM
          platforms.
index 334601c..c4419ea 100644 (file)
@@ -55,16 +55,41 @@ static DEFINE_MUTEX(rng_mutex);
 static int data_avail;
 static u8 *rng_buffer;
 
+static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+                              int wait);
+
 static size_t rng_buffer_size(void)
 {
        return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
 }
 
+static void add_early_randomness(struct hwrng *rng)
+{
+       unsigned char bytes[16];
+       int bytes_read;
+
+       /*
+        * Currently only virtio-rng cannot return data during device
+        * probe, and that's handled in virtio-rng.c itself.  If there
+        * are more such devices, this call to rng_get_data can be
+        * made conditional here instead of doing it per-device.
+        */
+       bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+       if (bytes_read > 0)
+               add_device_randomness(bytes, bytes_read);
+}
+
 static inline int hwrng_init(struct hwrng *rng)
 {
-       if (!rng->init)
-               return 0;
-       return rng->init(rng);
+       if (rng->init) {
+               int ret;
+
+               ret =  rng->init(rng);
+               if (ret)
+                       return ret;
+       }
+       add_early_randomness(rng);
+       return 0;
 }
 
 static inline void hwrng_cleanup(struct hwrng *rng)
@@ -304,8 +329,6 @@ int hwrng_register(struct hwrng *rng)
 {
        int err = -EINVAL;
        struct hwrng *old_rng, *tmp;
-       unsigned char bytes[16];
-       int bytes_read;
 
        if (rng->name == NULL ||
            (rng->data_read == NULL && rng->read == NULL))
@@ -347,9 +370,17 @@ int hwrng_register(struct hwrng *rng)
        INIT_LIST_HEAD(&rng->list);
        list_add_tail(&rng->list, &rng_list);
 
-       bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
-       if (bytes_read > 0)
-               add_device_randomness(bytes, bytes_read);
+       if (old_rng && !rng->init) {
+               /*
+                * Use a new device's input to add some randomness to
+                * the system.  If this rng device isn't going to be
+                * used right away, its init function hasn't been
+                * called yet; so only use the randomness from devices
+                * that don't need an init callback.
+                */
+               add_early_randomness(rng);
+       }
+
 out_unlock:
        mutex_unlock(&rng_mutex);
 out:
index f3e7150..e9b15bc 100644 (file)
@@ -38,6 +38,8 @@ struct virtrng_info {
        int index;
 };
 
+static bool probe_done;
+
 static void random_recv_done(struct virtqueue *vq)
 {
        struct virtrng_info *vi = vq->vdev->priv;
@@ -67,6 +69,13 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
        int ret;
        struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
 
+       /*
+        * Don't ask host for data till we're setup.  This call can
+        * happen during hwrng_register(), after commit d9e7972619.
+        */
+       if (unlikely(!probe_done))
+               return 0;
+
        if (!vi->busy) {
                vi->busy = true;
                init_completion(&vi->have_data);
@@ -137,6 +146,7 @@ static int probe_common(struct virtio_device *vdev)
                return err;
        }
 
+       probe_done = true;
        return 0;
 }
 
index d915707..93dcad0 100644 (file)
@@ -138,7 +138,9 @@ static int i8k_smm(struct smm_regs *regs)
        if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
                return -ENOMEM;
        cpumask_copy(old_mask, &current->cpus_allowed);
-       set_cpus_allowed_ptr(current, cpumask_of(0));
+       rc = set_cpus_allowed_ptr(current, cpumask_of(0));
+       if (rc)
+               goto out;
        if (smp_processor_id() != 0) {
                rc = -EBUSY;
                goto out;
index 4ad71ef..71529e1 100644 (file)
@@ -641,7 +641,7 @@ retry:
                } while (unlikely(entropy_count < pool_size-2 && pnfrac));
        }
 
-       if (entropy_count < 0) {
+       if (unlikely(entropy_count < 0)) {
                pr_warn("random: negative entropy/overflow: pool %s count %d\n",
                        r->name, entropy_count);
                WARN_ON(1);
@@ -980,26 +980,37 @@ static void push_to_pool(struct work_struct *work)
 static size_t account(struct entropy_store *r, size_t nbytes, int min,
                      int reserved)
 {
-       int have_bytes;
        int entropy_count, orig;
-       size_t ibytes;
+       size_t ibytes, nfrac;
 
        BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
 
        /* Can we pull enough? */
 retry:
        entropy_count = orig = ACCESS_ONCE(r->entropy_count);
-       have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
        ibytes = nbytes;
        /* If limited, never pull more than available */
-       if (r->limit)
-               ibytes = min_t(size_t, ibytes, have_bytes - reserved);
+       if (r->limit) {
+               int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
+
+               if ((have_bytes -= reserved) < 0)
+                       have_bytes = 0;
+               ibytes = min_t(size_t, ibytes, have_bytes);
+       }
        if (ibytes < min)
                ibytes = 0;
-       if (have_bytes >= ibytes + reserved)
-               entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
+
+       if (unlikely(entropy_count < 0)) {
+               pr_warn("random: negative entropy count: pool %s count %d\n",
+                       r->name, entropy_count);
+               WARN_ON(1);
+               entropy_count = 0;
+       }
+       nfrac = ibytes << (ENTROPY_SHIFT + 3);
+       if ((size_t) entropy_count > nfrac)
+               entropy_count -= nfrac;
        else
-               entropy_count = reserved << (ENTROPY_SHIFT + 3);
+               entropy_count = 0;
 
        if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
                goto retry;
@@ -1375,6 +1386,7 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
                            "with %d bits of entropy available\n",
                            current->comm, nonblocking_pool.entropy_total);
 
+       nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
        ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
 
        trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
index 9f9c5ae..cfd3af7 100644 (file)
@@ -102,6 +102,13 @@ config COMMON_CLK_KEYSTONE
           Supports clock drivers for Keystone based SOCs. These SOCs have local
          a power sleep control module that gate the clock to the IPs and PLLs.
 
+config COMMON_CLK_PALMAS
+       tristate "Clock driver for TI Palmas devices"
+       depends on MFD_PALMAS
+       ---help---
+         This driver supports TI Palmas devices 32KHz output KG and KG_AUDIO
+         using common clock framework.
+
 source "drivers/clk/qcom/Kconfig"
 
 endmenu
index 567f102..f537a0b 100644 (file)
@@ -9,12 +9,16 @@ obj-$(CONFIG_COMMON_CLK)      += clk-gate.o
 obj-$(CONFIG_COMMON_CLK)       += clk-mux.o
 obj-$(CONFIG_COMMON_CLK)       += clk-composite.o
 obj-$(CONFIG_COMMON_CLK)       += clk-fractional-divider.o
+ifeq ($(CONFIG_OF), y)
+obj-$(CONFIG_COMMON_CLK)       += clk-conf.o
+endif
 
 # hardware specific clock types
 # please keep this section sorted lexicographically by file/directory path name
 obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN)    += clk-axi-clkgen.o
 obj-$(CONFIG_ARCH_AXXIA)               += clk-axm5516.o
 obj-$(CONFIG_ARCH_BCM2835)             += clk-bcm2835.o
+obj-$(CONFIG_ARCH_CLPS711X)            += clk-clps711x.o
 obj-$(CONFIG_ARCH_EFM32)               += clk-efm32gg.o
 obj-$(CONFIG_ARCH_HIGHBANK)            += clk-highbank.o
 obj-$(CONFIG_MACH_LOONGSON1)           += clk-ls1x.o
@@ -22,6 +26,7 @@ obj-$(CONFIG_COMMON_CLK_MAX77686)     += clk-max77686.o
 obj-$(CONFIG_ARCH_MOXART)              += clk-moxart.o
 obj-$(CONFIG_ARCH_NOMADIK)             += clk-nomadik.o
 obj-$(CONFIG_ARCH_NSPIRE)              += clk-nspire.o
+obj-$(CONFIG_COMMON_CLK_PALMAS)                += clk-palmas.o
 obj-$(CONFIG_CLK_PPC_CORENET)          += clk-ppc-corenet.o
 obj-$(CONFIG_COMMON_CLK_S2MPS11)       += clk-s2mps11.o
 obj-$(CONFIG_COMMON_CLK_SI5351)                += clk-si5351.o
index 7333061..59fa3cc 100644 (file)
@@ -388,6 +388,7 @@ static unsigned long clk_main_recalc_rate(struct at91_pmc *pmc,
        if (parent_rate)
                return parent_rate;
 
+       pr_warn("Main crystal frequency not set, using approximate value\n");
        tmp = pmc_read(pmc, AT91_CKGR_MCFR);
        if (!(tmp & AT91_PMC_MAINRDY))
                return 0;
diff --git a/drivers/clk/clk-clps711x.c b/drivers/clk/clk-clps711x.c
new file mode 100644 (file)
index 0000000..715eec1
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+ *  Cirrus Logic CLPS711X CLK driver
+ *
+ *  Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mfd/syscon/clps711x.h>
+
+#include <dt-bindings/clock/clps711x-clock.h>
+
+#define CLPS711X_SYSCON1       (0x0100)
+#define CLPS711X_SYSCON2       (0x1100)
+#define CLPS711X_SYSFLG2       (CLPS711X_SYSCON2 + SYSFLG_OFFSET)
+#define CLPS711X_PLLR          (0xa5a8)
+
+#define CLPS711X_EXT_FREQ      (13000000)
+#define CLPS711X_OSC_FREQ      (3686400)
+
+static const struct clk_div_table spi_div_table[] = {
+       { .val = 0, .div = 32, },
+       { .val = 1, .div = 8, },
+       { .val = 2, .div = 2, },
+       { .val = 3, .div = 1, },
+};
+
+static const struct clk_div_table timer_div_table[] = {
+       { .val = 0, .div = 256, },
+       { .val = 1, .div = 1, },
+};
+
+struct clps711x_clk {
+       struct clk_onecell_data clk_data;
+       spinlock_t              lock;
+       struct clk              *clks[CLPS711X_CLK_MAX];
+};
+
+static struct clps711x_clk * __init _clps711x_clk_init(void __iomem *base,
+                                                      u32 fref)
+{
+       u32 tmp, f_cpu, f_pll, f_bus, f_tim, f_pwm, f_spi;
+       struct clps711x_clk *clps711x_clk;
+       unsigned i;
+
+       if (!base)
+               return ERR_PTR(-ENOMEM);
+
+       clps711x_clk = kzalloc(sizeof(*clps711x_clk), GFP_KERNEL);
+       if (!clps711x_clk)
+               return ERR_PTR(-ENOMEM);
+
+       spin_lock_init(&clps711x_clk->lock);
+
+       /* Read PLL multiplier value and sanity check */
+       tmp = readl(base + CLPS711X_PLLR) >> 24;
+       if (((tmp >= 10) && (tmp <= 50)) || !fref)
+               f_pll = DIV_ROUND_UP(CLPS711X_OSC_FREQ * tmp, 2);
+       else
+               f_pll = fref;
+
+       tmp = readl(base + CLPS711X_SYSFLG2);
+       if (tmp & SYSFLG2_CKMODE) {
+               f_cpu = CLPS711X_EXT_FREQ;
+               f_bus = CLPS711X_EXT_FREQ;
+               f_spi = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 96);
+               f_pll = 0;
+               f_pwm = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 128);
+       } else {
+               f_cpu = f_pll;
+               if (f_cpu > 36864000)
+                       f_bus = DIV_ROUND_UP(f_cpu, 2);
+               else
+                       f_bus = 36864000 / 2;
+               f_spi = DIV_ROUND_CLOSEST(f_cpu, 576);
+               f_pwm = DIV_ROUND_CLOSEST(f_cpu, 768);
+       }
+
+       if (tmp & SYSFLG2_CKMODE) {
+               if (readl(base + CLPS711X_SYSCON2) & SYSCON2_OSTB)
+                       f_tim = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 26);
+               else
+                       f_tim = DIV_ROUND_CLOSEST(CLPS711X_EXT_FREQ, 24);
+       } else
+               f_tim = DIV_ROUND_CLOSEST(f_cpu, 144);
+
+       tmp = readl(base + CLPS711X_SYSCON1);
+       /* Timer1 in free running mode.
+        * Counter will wrap around to 0xffff when it underflows
+        * and will continue to count down.
+        */
+       tmp &= ~(SYSCON1_TC1M | SYSCON1_TC1S);
+       /* Timer2 in prescale mode.
+        * Value writen is automatically re-loaded when
+        * the counter underflows.
+        */
+       tmp |= SYSCON1_TC2M | SYSCON1_TC2S;
+       writel(tmp, base + CLPS711X_SYSCON1);
+
+       clps711x_clk->clks[CLPS711X_CLK_DUMMY] =
+               clk_register_fixed_rate(NULL, "dummy", NULL, CLK_IS_ROOT, 0);
+       clps711x_clk->clks[CLPS711X_CLK_CPU] =
+               clk_register_fixed_rate(NULL, "cpu", NULL, CLK_IS_ROOT, f_cpu);
+       clps711x_clk->clks[CLPS711X_CLK_BUS] =
+               clk_register_fixed_rate(NULL, "bus", NULL, CLK_IS_ROOT, f_bus);
+       clps711x_clk->clks[CLPS711X_CLK_PLL] =
+               clk_register_fixed_rate(NULL, "pll", NULL, CLK_IS_ROOT, f_pll);
+       clps711x_clk->clks[CLPS711X_CLK_TIMERREF] =
+               clk_register_fixed_rate(NULL, "timer_ref", NULL, CLK_IS_ROOT,
+                                       f_tim);
+       clps711x_clk->clks[CLPS711X_CLK_TIMER1] =
+               clk_register_divider_table(NULL, "timer1", "timer_ref", 0,
+                                          base + CLPS711X_SYSCON1, 5, 1, 0,
+                                          timer_div_table, &clps711x_clk->lock);
+       clps711x_clk->clks[CLPS711X_CLK_TIMER2] =
+               clk_register_divider_table(NULL, "timer2", "timer_ref", 0,
+                                          base + CLPS711X_SYSCON1, 7, 1, 0,
+                                          timer_div_table, &clps711x_clk->lock);
+       clps711x_clk->clks[CLPS711X_CLK_PWM] =
+               clk_register_fixed_rate(NULL, "pwm", NULL, CLK_IS_ROOT, f_pwm);
+       clps711x_clk->clks[CLPS711X_CLK_SPIREF] =
+               clk_register_fixed_rate(NULL, "spi_ref", NULL, CLK_IS_ROOT,
+                                       f_spi);
+       clps711x_clk->clks[CLPS711X_CLK_SPI] =
+               clk_register_divider_table(NULL, "spi", "spi_ref", 0,
+                                          base + CLPS711X_SYSCON1, 16, 2, 0,
+                                          spi_div_table, &clps711x_clk->lock);
+       clps711x_clk->clks[CLPS711X_CLK_UART] =
+               clk_register_fixed_factor(NULL, "uart", "bus", 0, 1, 10);
+       clps711x_clk->clks[CLPS711X_CLK_TICK] =
+               clk_register_fixed_rate(NULL, "tick", NULL, CLK_IS_ROOT, 64);
+
+       for (i = 0; i < CLPS711X_CLK_MAX; i++)
+               if (IS_ERR(clps711x_clk->clks[i]))
+                       pr_err("clk %i: register failed with %ld\n",
+                              i, PTR_ERR(clps711x_clk->clks[i]));
+
+       return clps711x_clk;
+}
+
+void __init clps711x_clk_init(void __iomem *base)
+{
+       struct clps711x_clk *clps711x_clk;
+
+       clps711x_clk = _clps711x_clk_init(base, 73728000);
+
+       BUG_ON(IS_ERR(clps711x_clk));
+
+       /* Clocksource */
+       clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_TIMER1],
+                           NULL, "clps711x-timer.0");
+       clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_TIMER2],
+                           NULL, "clps711x-timer.1");
+
+       /* Drivers */
+       clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_PWM],
+                           NULL, "clps711x-pwm");
+       clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_UART],
+                           NULL, "clps711x-uart.0");
+       clk_register_clkdev(clps711x_clk->clks[CLPS711X_CLK_UART],
+                           NULL, "clps711x-uart.1");
+}
+
+#ifdef CONFIG_OF
+static void __init clps711x_clk_init_dt(struct device_node *np)
+{
+       void __iomem *base = of_iomap(np, 0);
+       struct clps711x_clk *clps711x_clk;
+       u32 fref = 0;
+
+       WARN_ON(of_property_read_u32(np, "startup-frequency", &fref));
+
+       clps711x_clk = _clps711x_clk_init(base, fref);
+       BUG_ON(IS_ERR(clps711x_clk));
+
+       clps711x_clk->clk_data.clks = clps711x_clk->clks;
+       clps711x_clk->clk_data.clk_num = CLPS711X_CLK_MAX;
+       of_clk_add_provider(np, of_clk_src_onecell_get,
+                           &clps711x_clk->clk_data);
+}
+CLK_OF_DECLARE(clps711x, "cirrus,clps711x-clk", clps711x_clk_init_dt);
+#endif
index 57a078e..b9355da 100644 (file)
@@ -64,11 +64,56 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
        const struct clk_ops *mux_ops = composite->mux_ops;
        struct clk_hw *rate_hw = composite->rate_hw;
        struct clk_hw *mux_hw = composite->mux_hw;
+       struct clk *parent;
+       unsigned long parent_rate;
+       long tmp_rate, best_rate = 0;
+       unsigned long rate_diff;
+       unsigned long best_rate_diff = ULONG_MAX;
+       int i;
 
        if (rate_hw && rate_ops && rate_ops->determine_rate) {
                rate_hw->clk = hw->clk;
                return rate_ops->determine_rate(rate_hw, rate, best_parent_rate,
                                                best_parent_p);
+       } else if (rate_hw && rate_ops && rate_ops->round_rate &&
+                  mux_hw && mux_ops && mux_ops->set_parent) {
+               *best_parent_p = NULL;
+
+               if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) {
+                       *best_parent_p = clk_get_parent(mux_hw->clk);
+                       *best_parent_rate = __clk_get_rate(*best_parent_p);
+
+                       return rate_ops->round_rate(rate_hw, rate,
+                                                   best_parent_rate);
+               }
+
+               for (i = 0; i < __clk_get_num_parents(mux_hw->clk); i++) {
+                       parent = clk_get_parent_by_index(mux_hw->clk, i);
+                       if (!parent)
+                               continue;
+
+                       parent_rate = __clk_get_rate(parent);
+
+                       tmp_rate = rate_ops->round_rate(rate_hw, rate,
+                                                       &parent_rate);
+                       if (tmp_rate < 0)
+                               continue;
+
+                       rate_diff = abs(rate - tmp_rate);
+
+                       if (!rate_diff || !*best_parent_p
+                                      || best_rate_diff > rate_diff) {
+                               *best_parent_p = parent;
+                               *best_parent_rate = parent_rate;
+                               best_rate_diff = rate_diff;
+                               best_rate = tmp_rate;
+                       }
+
+                       if (!rate_diff)
+                               return rate;
+               }
+
+               return best_rate;
        } else if (mux_hw && mux_ops && mux_ops->determine_rate) {
                mux_hw->clk = hw->clk;
                return mux_ops->determine_rate(mux_hw, rate, best_parent_rate,
@@ -162,7 +207,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
        clk_composite_ops = &composite->ops;
 
        if (mux_hw && mux_ops) {
-               if (!mux_ops->get_parent || !mux_ops->set_parent) {
+               if (!mux_ops->get_parent) {
                        clk = ERR_PTR(-EINVAL);
                        goto err;
                }
@@ -170,7 +215,8 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
                composite->mux_hw = mux_hw;
                composite->mux_ops = mux_ops;
                clk_composite_ops->get_parent = clk_composite_get_parent;
-               clk_composite_ops->set_parent = clk_composite_set_parent;
+               if (mux_ops->set_parent)
+                       clk_composite_ops->set_parent = clk_composite_set_parent;
                if (mux_ops->determine_rate)
                        clk_composite_ops->determine_rate = clk_composite_determine_rate;
        }
@@ -180,24 +226,27 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
                        clk = ERR_PTR(-EINVAL);
                        goto err;
                }
+               clk_composite_ops->recalc_rate = clk_composite_recalc_rate;
 
-               /* .round_rate is a prerequisite for .set_rate */
-               if (rate_ops->round_rate) {
-                       clk_composite_ops->round_rate = clk_composite_round_rate;
-                       if (rate_ops->set_rate) {
-                               clk_composite_ops->set_rate = clk_composite_set_rate;
-                       }
-               } else {
-                       WARN(rate_ops->set_rate,
-                               "%s: missing round_rate op is required\n",
-                               __func__);
+               if (rate_ops->determine_rate)
+                       clk_composite_ops->determine_rate =
+                               clk_composite_determine_rate;
+               else if (rate_ops->round_rate)
+                       clk_composite_ops->round_rate =
+                               clk_composite_round_rate;
+
+               /* .set_rate requires either .round_rate or .determine_rate */
+               if (rate_ops->set_rate) {
+                       if (rate_ops->determine_rate || rate_ops->round_rate)
+                               clk_composite_ops->set_rate =
+                                               clk_composite_set_rate;
+                       else
+                               WARN(1, "%s: missing round_rate op is required\n",
+                                               __func__);
                }
 
                composite->rate_hw = rate_hw;
                composite->rate_ops = rate_ops;
-               clk_composite_ops->recalc_rate = clk_composite_recalc_rate;
-               if (rate_ops->determine_rate)
-                       clk_composite_ops->determine_rate = clk_composite_determine_rate;
        }
 
        if (gate_hw && gate_ops) {
diff --git a/drivers/clk/clk-conf.c b/drivers/clk/clk-conf.c
new file mode 100644 (file)
index 0000000..aad4796
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2014 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/printk.h>
+#include "clk.h"
+
+static int __set_clk_parents(struct device_node *node, bool clk_supplier)
+{
+       struct of_phandle_args clkspec;
+       int index, rc, num_parents;
+       struct clk *clk, *pclk;
+
+       num_parents = of_count_phandle_with_args(node, "assigned-clock-parents",
+                                                "#clock-cells");
+       if (num_parents == -EINVAL)
+               pr_err("clk: invalid value of clock-parents property at %s\n",
+                      node->full_name);
+
+       for (index = 0; index < num_parents; index++) {
+               rc = of_parse_phandle_with_args(node, "assigned-clock-parents",
+                                       "#clock-cells", index, &clkspec);
+               if (rc < 0) {
+                       /* skip empty (null) phandles */
+                       if (rc == -ENOENT)
+                               continue;
+                       else
+                               return rc;
+               }
+               if (clkspec.np == node && !clk_supplier)
+                       return 0;
+               pclk = of_clk_get_by_clkspec(&clkspec);
+               if (IS_ERR(pclk)) {
+                       pr_warn("clk: couldn't get parent clock %d for %s\n",
+                               index, node->full_name);
+                       return PTR_ERR(pclk);
+               }
+
+               rc = of_parse_phandle_with_args(node, "assigned-clocks",
+                                       "#clock-cells", index, &clkspec);
+               if (rc < 0)
+                       goto err;
+               if (clkspec.np == node && !clk_supplier) {
+                       rc = 0;
+                       goto err;
+               }
+               clk = of_clk_get_by_clkspec(&clkspec);
+               if (IS_ERR(clk)) {
+                       pr_warn("clk: couldn't get parent clock %d for %s\n",
+                               index, node->full_name);
+                       rc = PTR_ERR(clk);
+                       goto err;
+               }
+
+               rc = clk_set_parent(clk, pclk);
+               if (rc < 0)
+                       pr_err("clk: failed to reparent %s to %s: %d\n",
+                              __clk_get_name(clk), __clk_get_name(pclk), rc);
+               clk_put(clk);
+               clk_put(pclk);
+       }
+       return 0;
+err:
+       clk_put(pclk);
+       return rc;
+}
+
+static int __set_clk_rates(struct device_node *node, bool clk_supplier)
+{
+       struct of_phandle_args clkspec;
+       struct property *prop;
+       const __be32 *cur;
+       int rc, index = 0;
+       struct clk *clk;
+       u32 rate;
+
+       of_property_for_each_u32(node, "assigned-clock-rates", prop, cur, rate) {
+               if (rate) {
+                       rc = of_parse_phandle_with_args(node, "assigned-clocks",
+                                       "#clock-cells", index, &clkspec);
+                       if (rc < 0) {
+                               /* skip empty (null) phandles */
+                               if (rc == -ENOENT)
+                                       continue;
+                               else
+                                       return rc;
+                       }
+                       if (clkspec.np == node && !clk_supplier)
+                               return 0;
+
+                       clk = of_clk_get_by_clkspec(&clkspec);
+                       if (IS_ERR(clk)) {
+                               pr_warn("clk: couldn't get clock %d for %s\n",
+                                       index, node->full_name);
+                               return PTR_ERR(clk);
+                       }
+
+                       rc = clk_set_rate(clk, rate);
+                       if (rc < 0)
+                               pr_err("clk: couldn't set %s clock rate: %d\n",
+                                      __clk_get_name(clk), rc);
+                       clk_put(clk);
+               }
+               index++;
+       }
+       return 0;
+}
+
+/**
+ * of_clk_set_defaults() - parse and set assigned clocks configuration
+ * @node: device node to apply clock settings for
+ * @clk_supplier: true if clocks supplied by @node should also be considered
+ *
+ * This function parses 'assigned-{clocks/clock-parents/clock-rates}' properties
+ * and sets any specified clock parents and rates. The @clk_supplier argument
+ * should be set to true if @node may be also a clock supplier of any clock
+ * listed in its 'assigned-clocks' or 'assigned-clock-parents' properties.
+ * If @clk_supplier is false the function exits returnning 0 as soon as it
+ * determines the @node is also a supplier of any of the clocks.
+ */
+int of_clk_set_defaults(struct device_node *node, bool clk_supplier)
+{
+       int rc;
+
+       if (!node)
+               return 0;
+
+       rc = __set_clk_parents(node, clk_supplier);
+       if (rc < 0)
+               return rc;
+
+       return __set_clk_rates(node, clk_supplier);
+}
+EXPORT_SYMBOL_GPL(of_clk_set_defaults);
diff --git a/drivers/clk/clk-palmas.c b/drivers/clk/clk-palmas.c
new file mode 100644 (file)
index 0000000..781630e
--- /dev/null
@@ -0,0 +1,307 @@
+/*
+ * Clock driver for Palmas device.
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ * Copyright (c) 2013-2014 Texas Instruments, Inc.
+ *
+ * Author:     Laxman Dewangan <ldewangan@nvidia.com>
+ *             Peter Ujfalusi <peter.ujfalusi@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind,
+ * whether express or implied; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/palmas.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define PALMAS_CLOCK_DT_EXT_CONTROL_ENABLE1    1
+#define PALMAS_CLOCK_DT_EXT_CONTROL_ENABLE2    2
+#define PALMAS_CLOCK_DT_EXT_CONTROL_NSLEEP     3
+
+struct palmas_clk32k_desc {
+       const char *clk_name;
+       unsigned int control_reg;
+       unsigned int enable_mask;
+       unsigned int sleep_mask;
+       unsigned int sleep_reqstr_id;
+       int delay;
+};
+
+struct palmas_clock_info {
+       struct device *dev;
+       struct clk *clk;
+       struct clk_hw hw;
+       struct palmas *palmas;
+       struct palmas_clk32k_desc *clk_desc;
+       int ext_control_pin;
+};
+
+static inline struct palmas_clock_info *to_palmas_clks_info(struct clk_hw *hw)
+{
+       return container_of(hw, struct palmas_clock_info, hw);
+}
+
+static unsigned long palmas_clks_recalc_rate(struct clk_hw *hw,
+                                            unsigned long parent_rate)
+{
+       return 32768;
+}
+
+static int palmas_clks_prepare(struct clk_hw *hw)
+{
+       struct palmas_clock_info *cinfo = to_palmas_clks_info(hw);
+       int ret;
+
+       ret = palmas_update_bits(cinfo->palmas, PALMAS_RESOURCE_BASE,
+                                cinfo->clk_desc->control_reg,
+                                cinfo->clk_desc->enable_mask,
+                                cinfo->clk_desc->enable_mask);
+       if (ret < 0)
+               dev_err(cinfo->dev, "Reg 0x%02x update failed, %d\n",
+                       cinfo->clk_desc->control_reg, ret);
+       else if (cinfo->clk_desc->delay)
+               udelay(cinfo->clk_desc->delay);
+
+       return ret;
+}
+
+static void palmas_clks_unprepare(struct clk_hw *hw)
+{
+       struct palmas_clock_info *cinfo = to_palmas_clks_info(hw);
+       int ret;
+
+       /*
+        * Clock can be disabled through external pin if it is externally
+        * controlled.
+        */
+       if (cinfo->ext_control_pin)
+               return;
+
+       ret = palmas_update_bits(cinfo->palmas, PALMAS_RESOURCE_BASE,
+                                cinfo->clk_desc->control_reg,
+                                cinfo->clk_desc->enable_mask, 0);
+       if (ret < 0)
+               dev_err(cinfo->dev, "Reg 0x%02x update failed, %d\n",
+                       cinfo->clk_desc->control_reg, ret);
+}
+
+static int palmas_clks_is_prepared(struct clk_hw *hw)
+{
+       struct palmas_clock_info *cinfo = to_palmas_clks_info(hw);
+       int ret;
+       u32 val;
+
+       if (cinfo->ext_control_pin)
+               return 1;
+
+       ret = palmas_read(cinfo->palmas, PALMAS_RESOURCE_BASE,
+                         cinfo->clk_desc->control_reg, &val);
+       if (ret < 0) {
+               dev_err(cinfo->dev, "Reg 0x%02x read failed, %d\n",
+                       cinfo->clk_desc->control_reg, ret);
+               return ret;
+       }
+       return !!(val & cinfo->clk_desc->enable_mask);
+}
+
+static struct clk_ops palmas_clks_ops = {
+       .prepare        = palmas_clks_prepare,
+       .unprepare      = palmas_clks_unprepare,
+       .is_prepared    = palmas_clks_is_prepared,
+       .recalc_rate    = palmas_clks_recalc_rate,
+};
+
+struct palmas_clks_of_match_data {
+       struct clk_init_data init;
+       struct palmas_clk32k_desc desc;
+};
+
+static struct palmas_clks_of_match_data palmas_of_clk32kg = {
+       .init = {
+               .name = "clk32kg",
+               .ops = &palmas_clks_ops,
+               .flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED,
+       },
+       .desc = {
+               .clk_name = "clk32kg",
+               .control_reg = PALMAS_CLK32KG_CTRL,
+               .enable_mask = PALMAS_CLK32KG_CTRL_MODE_ACTIVE,
+               .sleep_mask = PALMAS_CLK32KG_CTRL_MODE_SLEEP,
+               .sleep_reqstr_id = PALMAS_EXTERNAL_REQSTR_ID_CLK32KG,
+               .delay = 200,
+       },
+};
+
+static struct palmas_clks_of_match_data palmas_of_clk32kgaudio = {
+       .init = {
+               .name = "clk32kgaudio",
+               .ops = &palmas_clks_ops,
+               .flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED,
+       },
+       .desc = {
+               .clk_name = "clk32kgaudio",
+               .control_reg = PALMAS_CLK32KGAUDIO_CTRL,
+               .enable_mask = PALMAS_CLK32KG_CTRL_MODE_ACTIVE,
+               .sleep_mask = PALMAS_CLK32KG_CTRL_MODE_SLEEP,
+               .sleep_reqstr_id = PALMAS_EXTERNAL_REQSTR_ID_CLK32KGAUDIO,
+               .delay = 200,
+       },
+};
+
+static struct of_device_id palmas_clks_of_match[] = {
+       {
+               .compatible = "ti,palmas-clk32kg",
+               .data = &palmas_of_clk32kg,
+       },
+       {
+               .compatible = "ti,palmas-clk32kgaudio",
+               .data = &palmas_of_clk32kgaudio,
+       },
+       { },
+};
+MODULE_DEVICE_TABLE(of, palmas_clks_of_match);
+
+static void palmas_clks_get_clk_data(struct platform_device *pdev,
+                                    struct palmas_clock_info *cinfo)
+{
+       struct device_node *node = pdev->dev.of_node;
+       unsigned int prop;
+       int ret;
+
+       ret = of_property_read_u32(node, "ti,external-sleep-control",
+                                  &prop);
+       if (ret)
+               return;
+
+       switch (prop) {
+       case PALMAS_CLOCK_DT_EXT_CONTROL_ENABLE1:
+               prop = PALMAS_EXT_CONTROL_ENABLE1;
+               break;
+       case PALMAS_CLOCK_DT_EXT_CONTROL_ENABLE2:
+               prop = PALMAS_EXT_CONTROL_ENABLE2;
+               break;
+       case PALMAS_CLOCK_DT_EXT_CONTROL_NSLEEP:
+               prop = PALMAS_EXT_CONTROL_NSLEEP;
+               break;
+       default:
+               dev_warn(&pdev->dev, "%s: Invalid ext control option: %u\n",
+                        node->name, prop);
+               prop = 0;
+               break;
+       }
+       cinfo->ext_control_pin = prop;
+}
+
+static int palmas_clks_init_configure(struct palmas_clock_info *cinfo)
+{
+       int ret;
+
+       ret = palmas_update_bits(cinfo->palmas, PALMAS_RESOURCE_BASE,
+                                cinfo->clk_desc->control_reg,
+                                cinfo->clk_desc->sleep_mask, 0);
+       if (ret < 0) {
+               dev_err(cinfo->dev, "Reg 0x%02x update failed, %d\n",
+                       cinfo->clk_desc->control_reg, ret);
+               return ret;
+       }
+
+       if (cinfo->ext_control_pin) {
+               ret = clk_prepare(cinfo->clk);
+               if (ret < 0) {
+                       dev_err(cinfo->dev, "Clock prep failed, %d\n", ret);
+                       return ret;
+               }
+
+               ret = palmas_ext_control_req_config(cinfo->palmas,
+                                       cinfo->clk_desc->sleep_reqstr_id,
+                                       cinfo->ext_control_pin, true);
+               if (ret < 0) {
+                       dev_err(cinfo->dev, "Ext config for %s failed, %d\n",
+                               cinfo->clk_desc->clk_name, ret);
+                       return ret;
+               }
+       }
+
+       return ret;
+}
+static int palmas_clks_probe(struct platform_device *pdev)
+{
+       struct palmas *palmas = dev_get_drvdata(pdev->dev.parent);
+       struct device_node *node = pdev->dev.of_node;
+       struct palmas_clks_of_match_data *match_data;
+       const struct of_device_id *match;
+       struct palmas_clock_info *cinfo;
+       struct clk *clk;
+       int ret;
+
+       match = of_match_device(palmas_clks_of_match, &pdev->dev);
+       match_data = (struct palmas_clks_of_match_data *)match->data;
+
+       cinfo = devm_kzalloc(&pdev->dev, sizeof(*cinfo), GFP_KERNEL);
+       if (!cinfo)
+               return -ENOMEM;
+
+       palmas_clks_get_clk_data(pdev, cinfo);
+       platform_set_drvdata(pdev, cinfo);
+
+       cinfo->dev = &pdev->dev;
+       cinfo->palmas = palmas;
+
+       cinfo->clk_desc = &match_data->desc;
+       cinfo->hw.init = &match_data->init;
+       clk = devm_clk_register(&pdev->dev, &cinfo->hw);
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               dev_err(&pdev->dev, "Fail to register clock %s, %d\n",
+                       match_data->desc.clk_name, ret);
+               return ret;
+       }
+
+       cinfo->clk = clk;
+       ret = palmas_clks_init_configure(cinfo);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Clock config failed, %d\n", ret);
+               return ret;
+       }
+
+       ret = of_clk_add_provider(node, of_clk_src_simple_get, cinfo->clk);
+       if (ret < 0)
+               dev_err(&pdev->dev, "Fail to add clock driver, %d\n", ret);
+       return ret;
+}
+
+static int palmas_clks_remove(struct platform_device *pdev)
+{
+       of_clk_del_provider(pdev->dev.of_node);
+       return 0;
+}
+
+static struct platform_driver palmas_clks_driver = {
+       .driver = {
+               .name = "palmas-clk",
+               .owner = THIS_MODULE,
+               .of_match_table = palmas_clks_of_match,
+       },
+       .probe = palmas_clks_probe,
+       .remove = palmas_clks_remove,
+};
+
+module_platform_driver(palmas_clks_driver);
+
+MODULE_DESCRIPTION("Clock driver for Palmas Series Devices");
+MODULE_ALIAS("platform:palmas-clk");
+MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
+MODULE_LICENSE("GPL v2");
index 8b284be..8e58edf 100644 (file)
@@ -291,7 +291,7 @@ static const struct of_device_id ppc_clk_ids[] __initconst = {
        {}
 };
 
-static struct platform_driver ppc_corenet_clk_driver = {
+static struct platform_driver ppc_corenet_clk_driver __initdata = {
        .driver = {
                .name = "ppc_corenet_clock",
                .owner = THIS_MODULE,
index 9b7b585..b7797fb 100644 (file)
@@ -46,7 +46,6 @@ struct s2mps11_clk {
        struct clk *clk;
        struct clk_lookup *lookup;
        u32 mask;
-       bool enabled;
        unsigned int reg;
 };
 
@@ -63,8 +62,6 @@ static int s2mps11_clk_prepare(struct clk_hw *hw)
        ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
                                 s2mps11->reg,
                                 s2mps11->mask, s2mps11->mask);
-       if (!ret)
-               s2mps11->enabled = true;
 
        return ret;
 }
@@ -76,32 +73,32 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw)
 
        ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, s2mps11->reg,
                           s2mps11->mask, ~s2mps11->mask);
-
-       if (!ret)
-               s2mps11->enabled = false;
 }
 
-static int s2mps11_clk_is_enabled(struct clk_hw *hw)
+static int s2mps11_clk_is_prepared(struct clk_hw *hw)
 {
+       int ret;
+       u32 val;
        struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
 
-       return s2mps11->enabled;
+       ret = regmap_read(s2mps11->iodev->regmap_pmic,
+                               s2mps11->reg, &val);
+       if (ret < 0)
+               return -EINVAL;
+
+       return val & s2mps11->mask;
 }
 
 static unsigned long s2mps11_clk_recalc_rate(struct clk_hw *hw,
                                             unsigned long parent_rate)
 {
-       struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
-       if (s2mps11->enabled)
-               return 32768;
-       else
-               return 0;
+       return 32768;
 }
 
 static struct clk_ops s2mps11_clk_ops = {
        .prepare        = s2mps11_clk_prepare,
        .unprepare      = s2mps11_clk_unprepare,
-       .is_enabled     = s2mps11_clk_is_enabled,
+       .is_prepared    = s2mps11_clk_is_prepared,
        .recalc_rate    = s2mps11_clk_recalc_rate,
 };
 
@@ -169,7 +166,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
        unsigned int s2mps11_reg;
        struct clk_init_data *clks_init;
        int i, ret = 0;
-       u32 val;
 
        s2mps11_clks = devm_kzalloc(&pdev->dev, sizeof(*s2mps11_clk) *
                                        S2MPS11_CLKS_NUM, GFP_KERNEL);
@@ -214,13 +210,6 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
                s2mps11_clk->mask = 1 << i;
                s2mps11_clk->reg = s2mps11_reg;
 
-               ret = regmap_read(s2mps11_clk->iodev->regmap_pmic,
-                                 s2mps11_clk->reg, &val);
-               if (ret < 0)
-                       goto err_reg;
-
-               s2mps11_clk->enabled = val & s2mps11_clk->mask;
-
                s2mps11_clk->clk = devm_clk_register(&pdev->dev,
                                                        &s2mps11_clk->hw);
                if (IS_ERR(s2mps11_clk->clk)) {
@@ -230,16 +219,13 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
                        goto err_reg;
                }
 
-               s2mps11_clk->lookup = devm_kzalloc(&pdev->dev,
-                                       sizeof(struct clk_lookup), GFP_KERNEL);
+               s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk,
+                                       s2mps11_name(s2mps11_clk), NULL);
                if (!s2mps11_clk->lookup) {
                        ret = -ENOMEM;
                        goto err_lup;
                }
 
-               s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk);
-               s2mps11_clk->lookup->clk = s2mps11_clk->clk;
-
                clkdev_add(s2mps11_clk->lookup);
        }
 
index 8b73ede..b76fa69 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <linux/clk-private.h>
+#include <linux/clk/clk-conf.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
 #include <linux/spinlock.h>
@@ -98,9 +99,19 @@ static void clk_enable_unlock(unsigned long flags)
 #include <linux/debugfs.h>
 
 static struct dentry *rootdir;
-static struct dentry *orphandir;
 static int inited = 0;
 
+static struct hlist_head *all_lists[] = {
+       &clk_root_list,
+       &clk_orphan_list,
+       NULL,
+};
+
+static struct hlist_head *orphan_list[] = {
+       &clk_orphan_list,
+       NULL,
+};
+
 static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
 {
        if (!c)
@@ -130,17 +141,16 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
 static int clk_summary_show(struct seq_file *s, void *data)
 {
        struct clk *c;
+       struct hlist_head **lists = (struct hlist_head **)s->private;
 
        seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy\n");
        seq_puts(s, "--------------------------------------------------------------------------------\n");
 
        clk_prepare_lock();
 
-       hlist_for_each_entry(c, &clk_root_list, child_node)
-               clk_summary_show_subtree(s, c, 0);
-
-       hlist_for_each_entry(c, &clk_orphan_list, child_node)
-               clk_summary_show_subtree(s, c, 0);
+       for (; *lists; lists++)
+               hlist_for_each_entry(c, *lists, child_node)
+                       clk_summary_show_subtree(s, c, 0);
 
        clk_prepare_unlock();
 
@@ -193,21 +203,19 @@ static int clk_dump(struct seq_file *s, void *data)
 {
        struct clk *c;
        bool first_node = true;
+       struct hlist_head **lists = (struct hlist_head **)s->private;
 
        seq_printf(s, "{");
 
        clk_prepare_lock();
 
-       hlist_for_each_entry(c, &clk_root_list, child_node) {
-               if (!first_node)
-                       seq_printf(s, ",");
-               first_node = false;
-               clk_dump_subtree(s, c, 0);
-       }
-
-       hlist_for_each_entry(c, &clk_orphan_list, child_node) {
-               seq_printf(s, ",");
-               clk_dump_subtree(s, c, 0);
+       for (; *lists; lists++) {
+               hlist_for_each_entry(c, *lists, child_node) {
+                       if (!first_node)
+                               seq_puts(s, ",");
+                       first_node = false;
+                       clk_dump_subtree(s, c, 0);
+               }
        }
 
        clk_prepare_unlock();
@@ -276,9 +284,11 @@ static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
        if (!d)
                goto err_out;
 
-       if (clk->ops->debug_init)
-               if (clk->ops->debug_init(clk->hw, clk->dentry))
+       if (clk->ops->debug_init) {
+               ret = clk->ops->debug_init(clk->hw, clk->dentry);
+               if (ret)
                        goto err_out;
+       }
 
        ret = 0;
        goto out;
@@ -305,7 +315,7 @@ static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
                goto out;
 
        hlist_for_each_entry(child, &clk->children, child_node)
-               clk_debug_create_subtree(child, clk->dentry);
+               clk_debug_create_subtree(child, pdentry);
 
        ret = 0;
 out:
@@ -325,31 +335,12 @@ out:
  */
 static int clk_debug_register(struct clk *clk)
 {
-       struct clk *parent;
-       struct dentry *pdentry;
        int ret = 0;
 
        if (!inited)
                goto out;
 
-       parent = clk->parent;
-
-       /*
-        * Check to see if a clk is a root clk.  Also check that it is
-        * safe to add this clk to debugfs
-        */
-       if (!parent)
-               if (clk->flags & CLK_IS_ROOT)
-                       pdentry = rootdir;
-               else
-                       pdentry = orphandir;
-       else
-               if (parent->dentry)
-                       pdentry = parent->dentry;
-               else
-                       goto out;
-
-       ret = clk_debug_create_subtree(clk, pdentry);
+       ret = clk_debug_create_subtree(clk, rootdir);
 
 out:
        return ret;
@@ -370,38 +361,17 @@ static void clk_debug_unregister(struct clk *clk)
        debugfs_remove_recursive(clk->dentry);
 }
 
-/**
- * clk_debug_reparent - reparent clk node in the debugfs clk tree
- * @clk: the clk being reparented
- * @new_parent: the new clk parent, may be NULL
- *
- * Rename clk entry in the debugfs clk tree if debugfs has been
- * initialized.  Otherwise it bails out early since the debugfs clk tree
- * will be created lazily by clk_debug_init as part of a late_initcall.
- *
- * Caller must hold prepare_lock.
- */
-static void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
+struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode,
+                               void *data, const struct file_operations *fops)
 {
-       struct dentry *d;
-       struct dentry *new_parent_d;
-
-       if (!inited)
-               return;
+       struct dentry *d = NULL;
 
-       if (new_parent)
-               new_parent_d = new_parent->dentry;
-       else
-               new_parent_d = orphandir;
+       if (clk->dentry)
+               d = debugfs_create_file(name, mode, clk->dentry, data, fops);
 
-       d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
-                       new_parent_d, clk->name);
-       if (d)
-               clk->dentry = d;
-       else
-               pr_debug("%s: failed to rename debugfs entry for %s\n",
-                               __func__, clk->name);
+       return d;
 }
+EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
 
 /**
  * clk_debug_init - lazily create the debugfs clk tree visualization
@@ -425,19 +395,24 @@ static int __init clk_debug_init(void)
        if (!rootdir)
                return -ENOMEM;
 
-       d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
+       d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, &all_lists,
                                &clk_summary_fops);
        if (!d)
                return -ENOMEM;
 
-       d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
+       d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, &all_lists,
                                &clk_dump_fops);
        if (!d)
                return -ENOMEM;
 
-       orphandir = debugfs_create_dir("orphans", rootdir);
+       d = debugfs_create_file("clk_orphan_summary", S_IRUGO, rootdir,
+                               &orphan_list, &clk_summary_fops);
+       if (!d)
+               return -ENOMEM;
 
-       if (!orphandir)
+       d = debugfs_create_file("clk_orphan_dump", S_IRUGO, rootdir,
+                               &orphan_list, &clk_dump_fops);
+       if (!d)
                return -ENOMEM;
 
        clk_prepare_lock();
@@ -446,7 +421,7 @@ static int __init clk_debug_init(void)
                clk_debug_create_subtree(clk, rootdir);
 
        hlist_for_each_entry(clk, &clk_orphan_list, child_node)
-               clk_debug_create_subtree(clk, orphandir);
+               clk_debug_create_subtree(clk, rootdir);
 
        inited = 1;
 
@@ -1284,9 +1259,6 @@ static void __clk_set_parent_after(struct clk *clk, struct clk *parent,
                clk_disable(old_parent);
                __clk_unprepare(old_parent);
        }
-
-       /* update debugfs with new clk tree topology */
-       clk_debug_reparent(clk, parent);
 }
 
 static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
@@ -1683,7 +1655,6 @@ out:
 void __clk_reparent(struct clk *clk, struct clk *new_parent)
 {
        clk_reparent(clk, new_parent);
-       clk_debug_reparent(clk, new_parent);
        __clk_recalc_accuracies(clk);
        __clk_recalc_rates(clk, POST_RATE_CHANGE);
 }
@@ -2414,6 +2385,7 @@ int of_clk_add_provider(struct device_node *np,
                        void *data)
 {
        struct of_clk_provider *cp;
+       int ret;
 
        cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
        if (!cp)
@@ -2428,7 +2400,11 @@ int of_clk_add_provider(struct device_node *np,
        mutex_unlock(&of_clk_mutex);
        pr_debug("Added clock from %s\n", np->full_name);
 
-       return 0;
+       ret = of_clk_set_defaults(np, true);
+       if (ret < 0)
+               of_clk_del_provider(np);
+
+       return ret;
 }
 EXPORT_SYMBOL_GPL(of_clk_add_provider);
 
@@ -2605,7 +2581,10 @@ void __init of_clk_init(const struct of_device_id *matches)
                list_for_each_entry_safe(clk_provider, next,
                                        &clk_provider_list, node) {
                        if (force || parent_ready(clk_provider->np)) {
+
                                clk_provider->clk_init_cb(clk_provider->np);
+                               of_clk_set_defaults(clk_provider->np, true);
+
                                list_del(&clk_provider->node);
                                kfree(clk_provider);
                                is_init_done = true;
@@ -2620,7 +2599,6 @@ void __init of_clk_init(const struct of_device_id *matches)
                 */
                if (!is_init_done)
                        force = true;
-
        }
 }
 #endif
index f890b90..da4bda8 100644 (file)
@@ -101,8 +101,9 @@ struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
                if (!IS_ERR(clk))
                        break;
                else if (name && index >= 0) {
-                       pr_err("ERROR: could not get clock %s:%s(%i)\n",
-                               np->full_name, name ? name : "", index);
+                       if (PTR_ERR(clk) != -EPROBE_DEFER)
+                               pr_err("ERROR: could not get clock %s:%s(%i)\n",
+                                       np->full_name, name ? name : "", index);
                        return clk;
                }
 
index 7f696b7..1107351 100644 (file)
@@ -4,6 +4,31 @@ config COMMON_CLK_QCOM
        select REGMAP_MMIO
        select RESET_CONTROLLER
 
+config APQ_GCC_8084
+       tristate "APQ8084 Global Clock Controller"
+       depends on COMMON_CLK_QCOM
+       help
+         Support for the global clock controller on apq8084 devices.
+         Say Y if you want to use peripheral devices such as UART, SPI,
+         i2c, USB, SD/eMMC, SATA, PCIe, etc.
+
+config APQ_MMCC_8084
+       tristate "APQ8084 Multimedia Clock Controller"
+       select APQ_GCC_8084
+       depends on COMMON_CLK_QCOM
+       help
+         Support for the multimedia clock controller on apq8084 devices.
+         Say Y if you want to support multimedia devices such as display,
+         graphics, video encode/decode, camera, etc.
+
+config IPQ_GCC_806X
+       tristate "IPQ806x Global Clock Controller"
+       depends on COMMON_CLK_QCOM
+       help
+         Support for the global clock controller on ipq806x devices.
+         Say Y if you want to use peripheral devices such as UART, SPI,
+         i2c, USB, SD/eMMC, etc.
+
 config MSM_GCC_8660
        tristate "MSM8660 Global Clock Controller"
        depends on COMMON_CLK_QCOM
index 689e05b..783cfb2 100644 (file)
@@ -8,6 +8,9 @@ clk-qcom-y += clk-rcg2.o
 clk-qcom-y += clk-branch.o
 clk-qcom-y += reset.o
 
+obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
+obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
+obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
 obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
 obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
index 0f927c5..9db03d3 100644 (file)
@@ -166,7 +166,7 @@ const struct clk_ops clk_pll_vote_ops = {
 EXPORT_SYMBOL_GPL(clk_pll_vote_ops);
 
 static void
-clk_pll_set_fsm_mode(struct clk_pll *pll, struct regmap *regmap)
+clk_pll_set_fsm_mode(struct clk_pll *pll, struct regmap *regmap, u8 lock_count)
 {
        u32 val;
        u32 mask;
@@ -175,7 +175,7 @@ clk_pll_set_fsm_mode(struct clk_pll *pll, struct regmap *regmap)
        regmap_update_bits(regmap, pll->mode_reg, PLL_VOTE_FSM_RESET, 0);
 
        /* Program bias count and lock count */
-       val = 1 << PLL_BIAS_COUNT_SHIFT;
+       val = 1 << PLL_BIAS_COUNT_SHIFT | lock_count << PLL_LOCK_COUNT_SHIFT;
        mask = PLL_BIAS_COUNT_MASK << PLL_BIAS_COUNT_SHIFT;
        mask |= PLL_LOCK_COUNT_MASK << PLL_LOCK_COUNT_SHIFT;
        regmap_update_bits(regmap, pll->mode_reg, mask, val);
@@ -212,11 +212,20 @@ static void clk_pll_configure(struct clk_pll *pll, struct regmap *regmap,
        regmap_update_bits(regmap, pll->config_reg, mask, val);
 }
 
+void clk_pll_configure_sr(struct clk_pll *pll, struct regmap *regmap,
+               const struct pll_config *config, bool fsm_mode)
+{
+       clk_pll_configure(pll, regmap, config);
+       if (fsm_mode)
+               clk_pll_set_fsm_mode(pll, regmap, 8);
+}
+EXPORT_SYMBOL_GPL(clk_pll_configure_sr);
+
 void clk_pll_configure_sr_hpm_lp(struct clk_pll *pll, struct regmap *regmap,
                const struct pll_config *config, bool fsm_mode)
 {
        clk_pll_configure(pll, regmap, config);
        if (fsm_mode)
-               clk_pll_set_fsm_mode(pll, regmap);
+               clk_pll_set_fsm_mode(pll, regmap, 0);
 }
 EXPORT_SYMBOL_GPL(clk_pll_configure_sr_hpm_lp);
index 0775a99..3003e99 100644 (file)
@@ -60,6 +60,8 @@ struct pll_config {
        u32 aux_output_mask;
 };
 
+void clk_pll_configure_sr(struct clk_pll *pll, struct regmap *regmap,
+               const struct pll_config *config, bool fsm_mode);
 void clk_pll_configure_sr_hpm_lp(struct clk_pll *pll, struct regmap *regmap,
                const struct pll_config *config, bool fsm_mode);
 
index abfc2b6..b638c58 100644 (file)
@@ -417,20 +417,25 @@ static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
        return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
 }
 
-static int clk_rcg_set_rate(struct clk_hw *hw, unsigned long rate,
-                           unsigned long parent_rate)
+static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long *p_rate, struct clk **p)
 {
        struct clk_rcg *rcg = to_clk_rcg(hw);
-       const struct freq_tbl *f;
+       const struct freq_tbl *f = rcg->freq_tbl;
+
+       *p = clk_get_parent_by_index(hw->clk, f->src);
+       *p_rate = __clk_round_rate(*p, rate);
+
+       return *p_rate;
+}
+
+static int __clk_rcg_set_rate(struct clk_rcg *rcg, const struct freq_tbl *f)
+{
        u32 ns, md, ctl;
        struct mn *mn = &rcg->mn;
        u32 mask = 0;
        unsigned int reset_reg;
 
-       f = find_freq(rcg->freq_tbl, rate);
-       if (!f)
-               return -EINVAL;
-
        if (rcg->mn.reset_in_cc)
                reset_reg = rcg->clkr.enable_reg;
        else
@@ -466,6 +471,27 @@ static int clk_rcg_set_rate(struct clk_hw *hw, unsigned long rate,
        return 0;
 }
 
+static int clk_rcg_set_rate(struct clk_hw *hw, unsigned long rate,
+                           unsigned long parent_rate)
+{
+       struct clk_rcg *rcg = to_clk_rcg(hw);
+       const struct freq_tbl *f;
+
+       f = find_freq(rcg->freq_tbl, rate);
+       if (!f)
+               return -EINVAL;
+
+       return __clk_rcg_set_rate(rcg, f);
+}
+
+static int clk_rcg_bypass_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long parent_rate)
+{
+       struct clk_rcg *rcg = to_clk_rcg(hw);
+
+       return __clk_rcg_set_rate(rcg, rcg->freq_tbl);
+}
+
 static int __clk_dyn_rcg_set_rate(struct clk_hw *hw, unsigned long rate)
 {
        struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
@@ -503,6 +529,17 @@ const struct clk_ops clk_rcg_ops = {
 };
 EXPORT_SYMBOL_GPL(clk_rcg_ops);
 
+const struct clk_ops clk_rcg_bypass_ops = {
+       .enable = clk_enable_regmap,
+       .disable = clk_disable_regmap,
+       .get_parent = clk_rcg_get_parent,
+       .set_parent = clk_rcg_set_parent,
+       .recalc_rate = clk_rcg_recalc_rate,
+       .determine_rate = clk_rcg_bypass_determine_rate,
+       .set_rate = clk_rcg_bypass_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_rcg_bypass_ops);
+
 const struct clk_ops clk_dyn_rcg_ops = {
        .enable = clk_enable_regmap,
        .is_enabled = clk_is_enabled_regmap,
index b9ec11d..ba0523c 100644 (file)
@@ -95,6 +95,7 @@ struct clk_rcg {
 };
 
 extern const struct clk_ops clk_rcg_ops;
+extern const struct clk_ops clk_rcg_bypass_ops;
 
 #define to_clk_rcg(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg, clkr)
 
index 9b5a1cf..eeb3eea 100644 (file)
@@ -27,30 +27,35 @@ struct qcom_cc {
        struct clk *clks[];
 };
 
-int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc)
+struct regmap *
+qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
 {
        void __iomem *base;
        struct resource *res;
+       struct device *dev = &pdev->dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base))
+               return ERR_CAST(base);
+
+       return devm_regmap_init_mmio(dev, base, desc->config);
+}
+EXPORT_SYMBOL_GPL(qcom_cc_map);
+
+int qcom_cc_really_probe(struct platform_device *pdev,
+                        const struct qcom_cc_desc *desc, struct regmap *regmap)
+{
        int i, ret;
        struct device *dev = &pdev->dev;
        struct clk *clk;
        struct clk_onecell_data *data;
        struct clk **clks;
-       struct regmap *regmap;
        struct qcom_reset_controller *reset;
        struct qcom_cc *cc;
        size_t num_clks = desc->num_clks;
        struct clk_regmap **rclks = desc->clks;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       base = devm_ioremap_resource(dev, res);
-       if (IS_ERR(base))
-               return PTR_ERR(base);
-
-       regmap = devm_regmap_init_mmio(dev, base, desc->config);
-       if (IS_ERR(regmap))
-               return PTR_ERR(regmap);
-
        cc = devm_kzalloc(dev, sizeof(*cc) + sizeof(*clks) * num_clks,
                          GFP_KERNEL);
        if (!cc)
@@ -91,6 +96,18 @@ int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc)
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(qcom_cc_really_probe);
+
+int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc)
+{
+       struct regmap *regmap;
+
+       regmap = qcom_cc_map(pdev, desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       return qcom_cc_really_probe(pdev, desc, regmap);
+}
 EXPORT_SYMBOL_GPL(qcom_cc_probe);
 
 void qcom_cc_remove(struct platform_device *pdev)
index 2c3cfc8..2765e9d 100644 (file)
@@ -17,6 +17,7 @@ struct platform_device;
 struct regmap_config;
 struct clk_regmap;
 struct qcom_reset_map;
+struct regmap;
 
 struct qcom_cc_desc {
        const struct regmap_config *config;
@@ -26,6 +27,11 @@ struct qcom_cc_desc {
        size_t num_resets;
 };
 
+extern struct regmap *qcom_cc_map(struct platform_device *pdev,
+                                 const struct qcom_cc_desc *desc);
+extern int qcom_cc_really_probe(struct platform_device *pdev,
+                               const struct qcom_cc_desc *desc,
+                               struct regmap *regmap);
 extern int qcom_cc_probe(struct platform_device *pdev,
                         const struct qcom_cc_desc *desc);
 
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
new file mode 100644 (file)
index 0000000..ee52eb1
--- /dev/null
@@ -0,0 +1,3611 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-apq8084.h>
+#include <dt-bindings/reset/qcom,gcc-apq8084.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+#define P_XO   0
+#define P_GPLL0        1
+#define P_GPLL1        1
+#define P_GPLL4        2
+#define P_PCIE_0_1_PIPE_CLK 1
+#define P_SATA_ASIC0_CLK 1
+#define P_SATA_RX_CLK 1
+#define P_SLEEP_CLK 1
+
+static const u8 gcc_xo_gpll0_map[] = {
+       [P_XO]          = 0,
+       [P_GPLL0]       = 1,
+};
+
+static const char *gcc_xo_gpll0[] = {
+       "xo",
+       "gpll0_vote",
+};
+
+static const u8 gcc_xo_gpll0_gpll4_map[] = {
+       [P_XO]          = 0,
+       [P_GPLL0]       = 1,
+       [P_GPLL4]       = 5,
+};
+
+static const char *gcc_xo_gpll0_gpll4[] = {
+       "xo",
+       "gpll0_vote",
+       "gpll4_vote",
+};
+
+static const u8 gcc_xo_sata_asic0_map[] = {
+       [P_XO]                  = 0,
+       [P_SATA_ASIC0_CLK]      = 2,
+};
+
+static const char *gcc_xo_sata_asic0[] = {
+       "xo",
+       "sata_asic0_clk",
+};
+
+static const u8 gcc_xo_sata_rx_map[] = {
+       [P_XO]                  = 0,
+       [P_SATA_RX_CLK]         = 2,
+};
+
+static const char *gcc_xo_sata_rx[] = {
+       "xo",
+       "sata_rx_clk",
+};
+
+static const u8 gcc_xo_pcie_map[] = {
+       [P_XO]                  = 0,
+       [P_PCIE_0_1_PIPE_CLK]   = 2,
+};
+
+static const char *gcc_xo_pcie[] = {
+       "xo",
+       "pcie_pipe",
+};
+
+static const u8 gcc_xo_pcie_sleep_map[] = {
+       [P_XO]                  = 0,
+       [P_SLEEP_CLK]           = 6,
+};
+
+static const char *gcc_xo_pcie_sleep[] = {
+       "xo",
+       "sleep_clk_src",
+};
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static struct clk_pll gpll0 = {
+       .l_reg = 0x0004,
+       .m_reg = 0x0008,
+       .n_reg = 0x000c,
+       .config_reg = 0x0014,
+       .mode_reg = 0x0000,
+       .status_reg = 0x001c,
+       .status_bit = 17,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gpll0",
+               .parent_names = (const char *[]){ "xo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static struct clk_regmap gpll0_vote = {
+       .enable_reg = 0x1480,
+       .enable_mask = BIT(0),
+       .hw.init = &(struct clk_init_data){
+               .name = "gpll0_vote",
+               .parent_names = (const char *[]){ "gpll0" },
+               .num_parents = 1,
+               .ops = &clk_pll_vote_ops,
+       },
+};
+
+static struct clk_rcg2 config_noc_clk_src = {
+       .cmd_rcgr = 0x0150,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "config_noc_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 periph_noc_clk_src = {
+       .cmd_rcgr = 0x0190,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "periph_noc_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 system_noc_clk_src = {
+       .cmd_rcgr = 0x0120,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "system_noc_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_pll gpll1 = {
+       .l_reg = 0x0044,
+       .m_reg = 0x0048,
+       .n_reg = 0x004c,
+       .config_reg = 0x0054,
+       .mode_reg = 0x0040,
+       .status_reg = 0x005c,
+       .status_bit = 17,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gpll1",
+               .parent_names = (const char *[]){ "xo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static struct clk_regmap gpll1_vote = {
+       .enable_reg = 0x1480,
+       .enable_mask = BIT(1),
+       .hw.init = &(struct clk_init_data){
+               .name = "gpll1_vote",
+               .parent_names = (const char *[]){ "gpll1" },
+               .num_parents = 1,
+               .ops = &clk_pll_vote_ops,
+       },
+};
+
+static struct clk_pll gpll4 = {
+       .l_reg = 0x1dc4,
+       .m_reg = 0x1dc8,
+       .n_reg = 0x1dcc,
+       .config_reg = 0x1dd4,
+       .mode_reg = 0x1dc0,
+       .status_reg = 0x1ddc,
+       .status_bit = 17,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gpll4",
+               .parent_names = (const char *[]){ "xo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static struct clk_regmap gpll4_vote = {
+       .enable_reg = 0x1480,
+       .enable_mask = BIT(4),
+       .hw.init = &(struct clk_init_data){
+               .name = "gpll4_vote",
+               .parent_names = (const char *[]){ "gpll4" },
+               .num_parents = 1,
+               .ops = &clk_pll_vote_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_ufs_axi_clk[] = {
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(200000000, P_GPLL0, 3, 0, 0),
+       F(240000000, P_GPLL0, 2.5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 ufs_axi_clk_src = {
+       .cmd_rcgr = 0x1d64,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_ufs_axi_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "ufs_axi_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_master_clk[] = {
+       F(125000000, P_GPLL0, 1, 5, 24),
+       { }
+};
+
+static struct clk_rcg2 usb30_master_clk_src = {
+       .cmd_rcgr = 0x03d4,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_usb30_master_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "usb30_master_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_sec_master_clk[] = {
+       F(125000000, P_GPLL0, 1, 5, 24),
+       { }
+};
+
+static struct clk_rcg2 usb30_sec_master_clk_src = {
+       .cmd_rcgr = 0x1bd4,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_usb30_sec_master_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "usb30_sec_master_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch gcc_usb30_sec_mock_utmi_clk = {
+       .halt_reg = 0x1bd0,
+       .clkr = {
+               .enable_reg = 0x1bd0,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb30_sec_mock_utmi_clk",
+                       .parent_names = (const char *[]){
+                               "usb30_sec_mock_utmi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb30_sec_sleep_clk = {
+       .halt_reg = 0x1bcc,
+       .clkr = {
+               .enable_reg = 0x1bcc,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb30_sec_sleep_clk",
+                       .parent_names = (const char *[]){
+                               "sleep_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       F(50000000, P_GPLL0, 12, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x0660,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup1_i2c_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk[] = {
+       F(960000, P_XO, 10, 1, 2),
+       F(4800000, P_XO, 4, 0, 0),
+       F(9600000, P_XO, 2, 0, 0),
+       F(15000000, P_GPLL0, 10, 1, 4),
+       F(19200000, P_XO, 1, 0, 0),
+       F(25000000, P_GPLL0, 12, 1, 2),
+       F(50000000, P_GPLL0, 12, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
+       .cmd_rcgr = 0x064c,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup1_spi_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x06e0,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup2_i2c_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
+       .cmd_rcgr = 0x06cc,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup2_spi_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x0760,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup3_i2c_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
+       .cmd_rcgr = 0x074c,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup3_spi_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x07e0,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup4_i2c_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
+       .cmd_rcgr = 0x07cc,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup4_spi_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x0860,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup5_i2c_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
+       .cmd_rcgr = 0x084c,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup5_spi_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x08e0,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup6_i2c_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
+       .cmd_rcgr = 0x08cc,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_qup6_spi_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_blsp1_2_uart1_6_apps_clk[] = {
+       F(3686400, P_GPLL0, 1, 96, 15625),
+       F(7372800, P_GPLL0, 1, 192, 15625),
+       F(14745600, P_GPLL0, 1, 384, 15625),
+       F(16000000, P_GPLL0, 5, 2, 15),
+       F(19200000, P_XO, 1, 0, 0),
+       F(24000000, P_GPLL0, 5, 1, 5),
+       F(32000000, P_GPLL0, 1, 4, 75),
+       F(40000000, P_GPLL0, 15, 0, 0),
+       F(46400000, P_GPLL0, 1, 29, 375),
+       F(48000000, P_GPLL0, 12.5, 0, 0),
+       F(51200000, P_GPLL0, 1, 32, 375),
+       F(56000000, P_GPLL0, 1, 7, 75),
+       F(58982400, P_GPLL0, 1, 1536, 15625),
+       F(60000000, P_GPLL0, 10, 0, 0),
+       F(63160000, P_GPLL0, 9.5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
+       .cmd_rcgr = 0x068c,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_uart1_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
+       .cmd_rcgr = 0x070c,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_uart2_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
+       .cmd_rcgr = 0x078c,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_uart3_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
+       .cmd_rcgr = 0x080c,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_uart4_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
+       .cmd_rcgr = 0x088c,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_uart5_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
+       .cmd_rcgr = 0x090c,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp1_uart6_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x09a0,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_qup1_i2c_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
+       .cmd_rcgr = 0x098c,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_qup1_spi_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x0a20,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_qup2_i2c_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
+       .cmd_rcgr = 0x0a0c,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_qup2_spi_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x0aa0,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_qup3_i2c_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
+       .cmd_rcgr = 0x0a8c,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_qup3_spi_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x0b20,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_qup4_i2c_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
+       .cmd_rcgr = 0x0b0c,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_qup4_spi_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x0ba0,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_qup5_i2c_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
+       .cmd_rcgr = 0x0b8c,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_qup5_spi_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
+       .cmd_rcgr = 0x0c20,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_qup6_i2c_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
+       .cmd_rcgr = 0x0c0c,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_qup6_spi_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
+       .cmd_rcgr = 0x09cc,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_uart1_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
+       .cmd_rcgr = 0x0a4c,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_uart2_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
+       .cmd_rcgr = 0x0acc,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_uart3_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_uart4_apps_clk_src = {
+       .cmd_rcgr = 0x0b4c,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_uart4_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_uart5_apps_clk_src = {
+       .cmd_rcgr = 0x0bcc,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_uart5_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 blsp2_uart6_apps_clk_src = {
+       .cmd_rcgr = 0x0c4c,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "blsp2_uart6_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_ce1_clk[] = {
+       F(50000000, P_GPLL0, 12, 0, 0),
+       F(85710000, P_GPLL0, 7, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(171430000, P_GPLL0, 3.5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 ce1_clk_src = {
+       .cmd_rcgr = 0x1050,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_ce1_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "ce1_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_ce2_clk[] = {
+       F(50000000, P_GPLL0, 12, 0, 0),
+       F(85710000, P_GPLL0, 7, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(171430000, P_GPLL0, 3.5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 ce2_clk_src = {
+       .cmd_rcgr = 0x1090,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_ce2_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "ce2_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_ce3_clk[] = {
+       F(50000000, P_GPLL0, 12, 0, 0),
+       F(85710000, P_GPLL0, 7, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(171430000, P_GPLL0, 3.5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 ce3_clk_src = {
+       .cmd_rcgr = 0x1d10,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_ce3_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "ce3_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_gp_clk[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(200000000, P_GPLL0, 3, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 gp1_clk_src = {
+       .cmd_rcgr = 0x1904,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_gp_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gp1_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 gp2_clk_src = {
+       .cmd_rcgr = 0x1944,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_gp_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gp2_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 gp3_clk_src = {
+       .cmd_rcgr = 0x1984,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_gp_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gp3_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_1_aux_clk[] = {
+       F(1010000, P_XO, 1, 1, 19),
+       { }
+};
+
+static struct clk_rcg2 pcie_0_aux_clk_src = {
+       .cmd_rcgr = 0x1b2c,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_pcie_sleep_map,
+       .freq_tbl = ftbl_gcc_pcie_0_1_aux_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pcie_0_aux_clk_src",
+               .parent_names = gcc_xo_pcie_sleep,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 pcie_1_aux_clk_src = {
+       .cmd_rcgr = 0x1bac,
+       .mnd_width = 16,
+       .hid_width = 5,
+       .parent_map = gcc_xo_pcie_sleep_map,
+       .freq_tbl = ftbl_gcc_pcie_0_1_aux_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pcie_1_aux_clk_src",
+               .parent_names = gcc_xo_pcie_sleep,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_pcie_0_1_pipe_clk[] = {
+       F(125000000, P_PCIE_0_1_PIPE_CLK, 1, 0, 0),
+       F(250000000, P_PCIE_0_1_PIPE_CLK, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 pcie_0_pipe_clk_src = {
+       .cmd_rcgr = 0x1b18,
+       .hid_width = 5,
+       .parent_map = gcc_xo_pcie_map,
+       .freq_tbl = ftbl_gcc_pcie_0_1_pipe_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pcie_0_pipe_clk_src",
+               .parent_names = gcc_xo_pcie,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 pcie_1_pipe_clk_src = {
+       .cmd_rcgr = 0x1b98,
+       .hid_width = 5,
+       .parent_map = gcc_xo_pcie_map,
+       .freq_tbl = ftbl_gcc_pcie_0_1_pipe_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pcie_1_pipe_clk_src",
+               .parent_names = gcc_xo_pcie,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk[] = {
+       F(60000000, P_GPLL0, 10, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 pdm2_clk_src = {
+       .cmd_rcgr = 0x0cd0,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_pdm2_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pdm2_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_sata_asic0_clk[] = {
+       F(75000000, P_SATA_ASIC0_CLK, 1, 0, 0),
+       F(150000000, P_SATA_ASIC0_CLK, 1, 0, 0),
+       F(300000000, P_SATA_ASIC0_CLK, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 sata_asic0_clk_src = {
+       .cmd_rcgr = 0x1c94,
+       .hid_width = 5,
+       .parent_map = gcc_xo_sata_asic0_map,
+       .freq_tbl = ftbl_gcc_sata_asic0_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "sata_asic0_clk_src",
+               .parent_names = gcc_xo_sata_asic0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_sata_pmalive_clk[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       F(50000000, P_GPLL0, 12, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 sata_pmalive_clk_src = {
+       .cmd_rcgr = 0x1c80,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_sata_pmalive_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "sata_pmalive_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_sata_rx_clk[] = {
+       F(75000000, P_SATA_RX_CLK, 1, 0, 0),
+       F(150000000, P_SATA_RX_CLK, 1, 0, 0),
+       F(300000000, P_SATA_RX_CLK, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 sata_rx_clk_src = {
+       .cmd_rcgr = 0x1ca8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_sata_rx_map,
+       .freq_tbl = ftbl_gcc_sata_rx_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "sata_rx_clk_src",
+               .parent_names = gcc_xo_sata_rx,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_sata_rx_oob_clk[] = {
+       F(100000000, P_GPLL0, 6, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 sata_rx_oob_clk_src = {
+       .cmd_rcgr = 0x1c5c,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_sata_rx_oob_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "sata_rx_oob_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_4_apps_clk[] = {
+       F(144000, P_XO, 16, 3, 25),
+       F(400000, P_XO, 12, 1, 4),
+       F(20000000, P_GPLL0, 15, 1, 2),
+       F(25000000, P_GPLL0, 12, 1, 2),
+       F(50000000, P_GPLL0, 12, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(192000000, P_GPLL4, 4, 0, 0),
+       F(200000000, P_GPLL0, 3, 0, 0),
+       F(384000000, P_GPLL4, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 sdcc1_apps_clk_src = {
+       .cmd_rcgr = 0x04d0,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_gpll4_map,
+       .freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "sdcc1_apps_clk_src",
+               .parent_names = gcc_xo_gpll0_gpll4,
+               .num_parents = 3,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 sdcc2_apps_clk_src = {
+       .cmd_rcgr = 0x0510,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "sdcc2_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 sdcc3_apps_clk_src = {
+       .cmd_rcgr = 0x0550,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "sdcc3_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 sdcc4_apps_clk_src = {
+       .cmd_rcgr = 0x0590,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_sdcc1_4_apps_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "sdcc4_apps_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_tsif_ref_clk[] = {
+       F(105000, P_XO, 2, 1, 91),
+       { }
+};
+
+static struct clk_rcg2 tsif_ref_clk_src = {
+       .cmd_rcgr = 0x0d90,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_tsif_ref_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "tsif_ref_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_mock_utmi_clk[] = {
+       F(60000000, P_GPLL0, 10, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 usb30_mock_utmi_clk_src = {
+       .cmd_rcgr = 0x03e8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_usb30_mock_utmi_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "usb30_mock_utmi_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_sec_mock_utmi_clk[] = {
+       F(125000000, P_GPLL0, 1, 5, 24),
+       { }
+};
+
+static struct clk_rcg2 usb30_sec_mock_utmi_clk_src = {
+       .cmd_rcgr = 0x1be8,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_usb30_sec_mock_utmi_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "usb30_sec_mock_utmi_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hs_system_clk[] = {
+       F(75000000, P_GPLL0, 8, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 usb_hs_system_clk_src = {
+       .cmd_rcgr = 0x0490,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_usb_hs_system_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "usb_hs_system_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_clk[] = {
+       F(480000000, P_GPLL1, 1, 0, 0),
+       { }
+};
+
+static u8 usb_hsic_clk_src_map[] = {
+       [P_XO]          = 0,
+       [P_GPLL1]       = 4,
+};
+
+static struct clk_rcg2 usb_hsic_clk_src = {
+       .cmd_rcgr = 0x0440,
+       .hid_width = 5,
+       .parent_map = usb_hsic_clk_src_map,
+       .freq_tbl = ftbl_gcc_usb_hsic_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "usb_hsic_clk_src",
+               .parent_names = (const char *[]){
+                       "xo",
+                       "gpll1_vote",
+               },
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_ahb_clk_src[] = {
+       F(60000000, P_GPLL1, 8, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 usb_hsic_ahb_clk_src = {
+       .cmd_rcgr = 0x046c,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = usb_hsic_clk_src_map,
+       .freq_tbl = ftbl_gcc_usb_hsic_ahb_clk_src,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "usb_hsic_ahb_clk_src",
+               .parent_names = (const char *[]){
+                       "xo",
+                       "gpll1_vote",
+               },
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_io_cal_clk[] = {
+       F(9600000, P_XO, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 usb_hsic_io_cal_clk_src = {
+       .cmd_rcgr = 0x0458,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_usb_hsic_io_cal_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "usb_hsic_io_cal_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 1,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch gcc_usb_hsic_mock_utmi_clk = {
+       .halt_reg = 0x1f14,
+       .clkr = {
+               .enable_reg = 0x1f14,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb_hsic_mock_utmi_clk",
+                       .parent_names = (const char *[]){
+                               "usb_hsic_mock_utmi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_mock_utmi_clk[] = {
+       F(60000000, P_GPLL0, 10, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 usb_hsic_mock_utmi_clk_src = {
+       .cmd_rcgr = 0x1f00,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_usb_hsic_mock_utmi_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "usb_hsic_mock_utmi_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 1,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static const struct freq_tbl ftbl_gcc_usb_hsic_system_clk[] = {
+       F(75000000, P_GPLL0, 8, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 usb_hsic_system_clk_src = {
+       .cmd_rcgr = 0x041c,
+       .hid_width = 5,
+       .parent_map = gcc_xo_gpll0_map,
+       .freq_tbl = ftbl_gcc_usb_hsic_system_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "usb_hsic_system_clk_src",
+               .parent_names = gcc_xo_gpll0,
+               .num_parents = 2,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch gcc_bam_dma_ahb_clk = {
+       .halt_reg = 0x0d44,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1484,
+               .enable_mask = BIT(12),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_bam_dma_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "periph_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_ahb_clk = {
+       .halt_reg = 0x05c4,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1484,
+               .enable_mask = BIT(17),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "periph_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
+       .halt_reg = 0x0648,
+       .clkr = {
+               .enable_reg = 0x0648,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup1_i2c_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_qup1_i2c_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
+       .halt_reg = 0x0644,
+       .clkr = {
+               .enable_reg = 0x0644,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup1_spi_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_qup1_spi_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
+       .halt_reg = 0x06c8,
+       .clkr = {
+               .enable_reg = 0x06c8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup2_i2c_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_qup2_i2c_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
+       .halt_reg = 0x06c4,
+       .clkr = {
+               .enable_reg = 0x06c4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup2_spi_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_qup2_spi_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
+       .halt_reg = 0x0748,
+       .clkr = {
+               .enable_reg = 0x0748,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup3_i2c_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_qup3_i2c_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
+       .halt_reg = 0x0744,
+       .clkr = {
+               .enable_reg = 0x0744,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup3_spi_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_qup3_spi_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
+       .halt_reg = 0x07c8,
+       .clkr = {
+               .enable_reg = 0x07c8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup4_i2c_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_qup4_i2c_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
+       .halt_reg = 0x07c4,
+       .clkr = {
+               .enable_reg = 0x07c4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup4_spi_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_qup4_spi_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
+       .halt_reg = 0x0848,
+       .clkr = {
+               .enable_reg = 0x0848,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup5_i2c_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_qup5_i2c_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
+       .halt_reg = 0x0844,
+       .clkr = {
+               .enable_reg = 0x0844,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup5_spi_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_qup5_spi_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
+       .halt_reg = 0x08c8,
+       .clkr = {
+               .enable_reg = 0x08c8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup6_i2c_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_qup6_i2c_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
+       .halt_reg = 0x08c4,
+       .clkr = {
+               .enable_reg = 0x08c4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_qup6_spi_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_qup6_spi_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_uart1_apps_clk = {
+       .halt_reg = 0x0684,
+       .clkr = {
+               .enable_reg = 0x0684,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_uart1_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_uart1_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_uart2_apps_clk = {
+       .halt_reg = 0x0704,
+       .clkr = {
+               .enable_reg = 0x0704,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_uart2_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_uart2_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_uart3_apps_clk = {
+       .halt_reg = 0x0784,
+       .clkr = {
+               .enable_reg = 0x0784,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_uart3_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_uart3_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_uart4_apps_clk = {
+       .halt_reg = 0x0804,
+       .clkr = {
+               .enable_reg = 0x0804,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_uart4_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_uart4_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_uart5_apps_clk = {
+       .halt_reg = 0x0884,
+       .clkr = {
+               .enable_reg = 0x0884,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_uart5_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_uart5_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp1_uart6_apps_clk = {
+       .halt_reg = 0x0904,
+       .clkr = {
+               .enable_reg = 0x0904,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp1_uart6_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp1_uart6_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_ahb_clk = {
+       .halt_reg = 0x0944,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1484,
+               .enable_mask = BIT(15),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "periph_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
+       .halt_reg = 0x0988,
+       .clkr = {
+               .enable_reg = 0x0988,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_qup1_i2c_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_qup1_i2c_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
+       .halt_reg = 0x0984,
+       .clkr = {
+               .enable_reg = 0x0984,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_qup1_spi_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_qup1_spi_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
+       .halt_reg = 0x0a08,
+       .clkr = {
+               .enable_reg = 0x0a08,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_qup2_i2c_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_qup2_i2c_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
+       .halt_reg = 0x0a04,
+       .clkr = {
+               .enable_reg = 0x0a04,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_qup2_spi_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_qup2_spi_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
+       .halt_reg = 0x0a88,
+       .clkr = {
+               .enable_reg = 0x0a88,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_qup3_i2c_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_qup3_i2c_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
+       .halt_reg = 0x0a84,
+       .clkr = {
+               .enable_reg = 0x0a84,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_qup3_spi_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_qup3_spi_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
+       .halt_reg = 0x0b08,
+       .clkr = {
+               .enable_reg = 0x0b08,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_qup4_i2c_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_qup4_i2c_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
+       .halt_reg = 0x0b04,
+       .clkr = {
+               .enable_reg = 0x0b04,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_qup4_spi_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_qup4_spi_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
+       .halt_reg = 0x0b88,
+       .clkr = {
+               .enable_reg = 0x0b88,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_qup5_i2c_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_qup5_i2c_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
+       .halt_reg = 0x0b84,
+       .clkr = {
+               .enable_reg = 0x0b84,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_qup5_spi_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_qup5_spi_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
+       .halt_reg = 0x0c08,
+       .clkr = {
+               .enable_reg = 0x0c08,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_qup6_i2c_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_qup6_i2c_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
+       .halt_reg = 0x0c04,
+       .clkr = {
+               .enable_reg = 0x0c04,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_qup6_spi_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_qup6_spi_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_uart1_apps_clk = {
+       .halt_reg = 0x09c4,
+       .clkr = {
+               .enable_reg = 0x09c4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_uart1_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_uart1_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_uart2_apps_clk = {
+       .halt_reg = 0x0a44,
+       .clkr = {
+               .enable_reg = 0x0a44,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_uart2_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_uart2_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_uart3_apps_clk = {
+       .halt_reg = 0x0ac4,
+       .clkr = {
+               .enable_reg = 0x0ac4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_uart3_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_uart3_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_uart4_apps_clk = {
+       .halt_reg = 0x0b44,
+       .clkr = {
+               .enable_reg = 0x0b44,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_uart4_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_uart4_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_uart5_apps_clk = {
+       .halt_reg = 0x0bc4,
+       .clkr = {
+               .enable_reg = 0x0bc4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_uart5_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_uart5_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_blsp2_uart6_apps_clk = {
+       .halt_reg = 0x0c44,
+       .clkr = {
+               .enable_reg = 0x0c44,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_blsp2_uart6_apps_clk",
+                       .parent_names = (const char *[]){
+                               "blsp2_uart6_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+       .halt_reg = 0x0e04,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1484,
+               .enable_mask = BIT(10),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_boot_rom_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "config_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ce1_ahb_clk = {
+       .halt_reg = 0x104c,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1484,
+               .enable_mask = BIT(3),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ce1_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "config_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ce1_axi_clk = {
+       .halt_reg = 0x1048,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1484,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ce1_axi_clk",
+                       .parent_names = (const char *[]){
+                               "system_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ce1_clk = {
+       .halt_reg = 0x1050,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1484,
+               .enable_mask = BIT(5),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ce1_clk",
+                       .parent_names = (const char *[]){
+                               "ce1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ce2_ahb_clk = {
+       .halt_reg = 0x108c,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1484,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ce2_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "config_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ce2_axi_clk = {
+       .halt_reg = 0x1088,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1484,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ce2_axi_clk",
+                       .parent_names = (const char *[]){
+                               "system_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ce2_clk = {
+       .halt_reg = 0x1090,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1484,
+               .enable_mask = BIT(2),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ce2_clk",
+                       .parent_names = (const char *[]){
+                               "ce2_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ce3_ahb_clk = {
+       .halt_reg = 0x1d0c,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1d0c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ce3_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "config_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ce3_axi_clk = {
+       .halt_reg = 0x1088,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1d08,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ce3_axi_clk",
+                       .parent_names = (const char *[]){
+                               "system_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ce3_clk = {
+       .halt_reg = 0x1090,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1d04,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ce3_clk",
+                       .parent_names = (const char *[]){
+                               "ce3_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+       .halt_reg = 0x1900,
+       .clkr = {
+               .enable_reg = 0x1900,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_gp1_clk",
+                       .parent_names = (const char *[]){
+                               "gp1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+       .halt_reg = 0x1940,
+       .clkr = {
+               .enable_reg = 0x1940,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_gp2_clk",
+                       .parent_names = (const char *[]){
+                               "gp2_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+       .halt_reg = 0x1980,
+       .clkr = {
+               .enable_reg = 0x1980,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_gp3_clk",
+                       .parent_names = (const char *[]){
+                               "gp3_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ocmem_noc_cfg_ahb_clk = {
+       .halt_reg = 0x0248,
+       .clkr = {
+               .enable_reg = 0x0248,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ocmem_noc_cfg_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "config_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie_0_aux_clk = {
+       .halt_reg = 0x1b10,
+       .clkr = {
+               .enable_reg = 0x1b10,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie_0_aux_clk",
+                       .parent_names = (const char *[]){
+                               "pcie_0_aux_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
+       .halt_reg = 0x1b0c,
+       .clkr = {
+               .enable_reg = 0x1b0c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie_0_cfg_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "config_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
+       .halt_reg = 0x1b08,
+       .clkr = {
+               .enable_reg = 0x1b08,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie_0_mstr_axi_clk",
+                       .parent_names = (const char *[]){
+                               "config_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie_0_pipe_clk = {
+       .halt_reg = 0x1b14,
+       .clkr = {
+               .enable_reg = 0x1b14,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie_0_pipe_clk",
+                       .parent_names = (const char *[]){
+                               "pcie_0_pipe_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie_0_slv_axi_clk = {
+       .halt_reg = 0x1b04,
+       .clkr = {
+               .enable_reg = 0x1b04,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie_0_slv_axi_clk",
+                       .parent_names = (const char *[]){
+                               "config_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie_1_aux_clk = {
+       .halt_reg = 0x1b90,
+       .clkr = {
+               .enable_reg = 0x1b90,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie_1_aux_clk",
+                       .parent_names = (const char *[]){
+                               "pcie_1_aux_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
+       .halt_reg = 0x1b8c,
+       .clkr = {
+               .enable_reg = 0x1b8c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie_1_cfg_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "config_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
+       .halt_reg = 0x1b88,
+       .clkr = {
+               .enable_reg = 0x1b88,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie_1_mstr_axi_clk",
+                       .parent_names = (const char *[]){
+                               "config_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie_1_pipe_clk = {
+       .halt_reg = 0x1b94,
+       .clkr = {
+               .enable_reg = 0x1b94,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie_1_pipe_clk",
+                       .parent_names = (const char *[]){
+                               "pcie_1_pipe_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pcie_1_slv_axi_clk = {
+       .halt_reg = 0x1b84,
+       .clkr = {
+               .enable_reg = 0x1b84,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pcie_1_slv_axi_clk",
+                       .parent_names = (const char *[]){
+                               "config_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+       .halt_reg = 0x0ccc,
+       .clkr = {
+               .enable_reg = 0x0ccc,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pdm2_clk",
+                       .parent_names = (const char *[]){
+                               "pdm2_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+       .halt_reg = 0x0cc4,
+       .clkr = {
+               .enable_reg = 0x0cc4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_pdm_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "periph_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_periph_noc_usb_hsic_ahb_clk = {
+       .halt_reg = 0x01a4,
+       .clkr = {
+               .enable_reg = 0x01a4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_periph_noc_usb_hsic_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "usb_hsic_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+       .halt_reg = 0x0d04,
+       .halt_check = BRANCH_HALT_VOTED,
+       .clkr = {
+               .enable_reg = 0x1484,
+               .enable_mask = BIT(13),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_prng_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "periph_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sata_asic0_clk = {
+       .halt_reg = 0x1c54,
+       .clkr = {
+               .enable_reg = 0x1c54,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sata_asic0_clk",
+                       .parent_names = (const char *[]){
+                               "sata_asic0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sata_axi_clk = {
+       .halt_reg = 0x1c44,
+       .clkr = {
+               .enable_reg = 0x1c44,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sata_axi_clk",
+                       .parent_names = (const char *[]){
+                               "config_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sata_cfg_ahb_clk = {
+       .halt_reg = 0x1c48,
+       .clkr = {
+               .enable_reg = 0x1c48,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sata_cfg_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "config_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sata_pmalive_clk = {
+       .halt_reg = 0x1c50,
+       .clkr = {
+               .enable_reg = 0x1c50,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sata_pmalive_clk",
+                       .parent_names = (const char *[]){
+                               "sata_pmalive_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sata_rx_clk = {
+       .halt_reg = 0x1c58,
+       .clkr = {
+               .enable_reg = 0x1c58,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sata_rx_clk",
+                       .parent_names = (const char *[]){
+                               "sata_rx_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sata_rx_oob_clk = {
+       .halt_reg = 0x1c4c,
+       .clkr = {
+               .enable_reg = 0x1c4c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sata_rx_oob_clk",
+                       .parent_names = (const char *[]){
+                               "sata_rx_oob_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+       .halt_reg = 0x04c8,
+       .clkr = {
+               .enable_reg = 0x04c8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sdcc1_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "periph_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+       .halt_reg = 0x04c4,
+       .clkr = {
+               .enable_reg = 0x04c4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sdcc1_apps_clk",
+                       .parent_names = (const char *[]){
+                               "sdcc1_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sdcc1_cdccal_ff_clk = {
+       .halt_reg = 0x04e8,
+       .clkr = {
+               .enable_reg = 0x04e8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sdcc1_cdccal_ff_clk",
+                       .parent_names = (const char *[]){
+                               "xo"
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sdcc1_cdccal_sleep_clk = {
+       .halt_reg = 0x04e4,
+       .clkr = {
+               .enable_reg = 0x04e4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sdcc1_cdccal_sleep_clk",
+                       .parent_names = (const char *[]){
+                               "sleep_clk_src"
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+       .halt_reg = 0x0508,
+       .clkr = {
+               .enable_reg = 0x0508,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sdcc2_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "periph_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+       .halt_reg = 0x0504,
+       .clkr = {
+               .enable_reg = 0x0504,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sdcc2_apps_clk",
+                       .parent_names = (const char *[]){
+                               "sdcc2_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sdcc3_ahb_clk = {
+       .halt_reg = 0x0548,
+       .clkr = {
+               .enable_reg = 0x0548,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sdcc3_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "periph_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sdcc3_apps_clk = {
+       .halt_reg = 0x0544,
+       .clkr = {
+               .enable_reg = 0x0544,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sdcc3_apps_clk",
+                       .parent_names = (const char *[]){
+                               "sdcc3_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sdcc4_ahb_clk = {
+       .halt_reg = 0x0588,
+       .clkr = {
+               .enable_reg = 0x0588,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sdcc4_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "periph_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sdcc4_apps_clk = {
+       .halt_reg = 0x0584,
+       .clkr = {
+               .enable_reg = 0x0584,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sdcc4_apps_clk",
+                       .parent_names = (const char *[]){
+                               "sdcc4_apps_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sys_noc_ufs_axi_clk = {
+       .halt_reg = 0x013c,
+       .clkr = {
+               .enable_reg = 0x013c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sys_noc_ufs_axi_clk",
+                       .parent_names = (const char *[]){
+                               "ufs_axi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_axi_clk = {
+       .halt_reg = 0x0108,
+       .clkr = {
+               .enable_reg = 0x0108,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sys_noc_usb3_axi_clk",
+                       .parent_names = (const char *[]){
+                               "usb30_master_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_sec_axi_clk = {
+       .halt_reg = 0x0138,
+       .clkr = {
+               .enable_reg = 0x0138,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_sys_noc_usb3_sec_axi_clk",
+                       .parent_names = (const char *[]){
+                               "usb30_sec_master_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_tsif_ahb_clk = {
+       .halt_reg = 0x0d84,
+       .clkr = {
+               .enable_reg = 0x0d84,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_tsif_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "periph_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_tsif_inactivity_timers_clk = {
+       .halt_reg = 0x0d8c,
+       .clkr = {
+               .enable_reg = 0x0d8c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_tsif_inactivity_timers_clk",
+                       .parent_names = (const char *[]){
+                               "sleep_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_tsif_ref_clk = {
+       .halt_reg = 0x0d88,
+       .clkr = {
+               .enable_reg = 0x0d88,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_tsif_ref_clk",
+                       .parent_names = (const char *[]){
+                               "tsif_ref_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ufs_ahb_clk = {
+       .halt_reg = 0x1d48,
+       .clkr = {
+               .enable_reg = 0x1d48,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ufs_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "config_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ufs_axi_clk = {
+       .halt_reg = 0x1d44,
+       .clkr = {
+               .enable_reg = 0x1d44,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ufs_axi_clk",
+                       .parent_names = (const char *[]){
+                               "ufs_axi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ufs_rx_cfg_clk = {
+       .halt_reg = 0x1d50,
+       .clkr = {
+               .enable_reg = 0x1d50,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ufs_rx_cfg_clk",
+                       .parent_names = (const char *[]){
+                               "ufs_axi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
+       .halt_reg = 0x1d5c,
+       .clkr = {
+               .enable_reg = 0x1d5c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ufs_rx_symbol_0_clk",
+                       .parent_names = (const char *[]){
+                               "ufs_rx_symbol_0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
+       .halt_reg = 0x1d60,
+       .clkr = {
+               .enable_reg = 0x1d60,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ufs_rx_symbol_1_clk",
+                       .parent_names = (const char *[]){
+                               "ufs_rx_symbol_1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ufs_tx_cfg_clk = {
+       .halt_reg = 0x1d4c,
+       .clkr = {
+               .enable_reg = 0x1d4c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ufs_tx_cfg_clk",
+                       .parent_names = (const char *[]){
+                               "ufs_axi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
+       .halt_reg = 0x1d54,
+       .clkr = {
+               .enable_reg = 0x1d54,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ufs_tx_symbol_0_clk",
+                       .parent_names = (const char *[]){
+                               "ufs_tx_symbol_0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_ufs_tx_symbol_1_clk = {
+       .halt_reg = 0x1d58,
+       .clkr = {
+               .enable_reg = 0x1d58,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_ufs_tx_symbol_1_clk",
+                       .parent_names = (const char *[]){
+                               "ufs_tx_symbol_1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb2a_phy_sleep_clk = {
+       .halt_reg = 0x04ac,
+       .clkr = {
+               .enable_reg = 0x04ac,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb2a_phy_sleep_clk",
+                       .parent_names = (const char *[]){
+                               "sleep_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb2b_phy_sleep_clk = {
+       .halt_reg = 0x04b4,
+       .clkr = {
+               .enable_reg = 0x04b4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb2b_phy_sleep_clk",
+                       .parent_names = (const char *[]){
+                               "sleep_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb30_master_clk = {
+       .halt_reg = 0x03c8,
+       .clkr = {
+               .enable_reg = 0x03c8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb30_master_clk",
+                       .parent_names = (const char *[]){
+                               "usb30_master_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb30_sec_master_clk = {
+       .halt_reg = 0x1bc8,
+       .clkr = {
+               .enable_reg = 0x1bc8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb30_sec_master_clk",
+                       .parent_names = (const char *[]){
+                               "usb30_sec_master_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb30_mock_utmi_clk = {
+       .halt_reg = 0x03d0,
+       .clkr = {
+               .enable_reg = 0x03d0,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb30_mock_utmi_clk",
+                       .parent_names = (const char *[]){
+                               "usb30_mock_utmi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb30_sleep_clk = {
+       .halt_reg = 0x03cc,
+       .clkr = {
+               .enable_reg = 0x03cc,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb30_sleep_clk",
+                       .parent_names = (const char *[]){
+                               "sleep_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb_hs_ahb_clk = {
+       .halt_reg = 0x0488,
+       .clkr = {
+               .enable_reg = 0x0488,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb_hs_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "periph_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb_hs_inactivity_timers_clk = {
+       .halt_reg = 0x048c,
+       .clkr = {
+               .enable_reg = 0x048c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb_hs_inactivity_timers_clk",
+                       .parent_names = (const char *[]){
+                               "sleep_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb_hs_system_clk = {
+       .halt_reg = 0x0484,
+       .clkr = {
+               .enable_reg = 0x0484,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb_hs_system_clk",
+                       .parent_names = (const char *[]){
+                               "usb_hs_system_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb_hsic_ahb_clk = {
+       .halt_reg = 0x0408,
+       .clkr = {
+               .enable_reg = 0x0408,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb_hsic_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "periph_noc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb_hsic_clk = {
+       .halt_reg = 0x0410,
+       .clkr = {
+               .enable_reg = 0x0410,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb_hsic_clk",
+                       .parent_names = (const char *[]){
+                               "usb_hsic_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb_hsic_io_cal_clk = {
+       .halt_reg = 0x0414,
+       .clkr = {
+               .enable_reg = 0x0414,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb_hsic_io_cal_clk",
+                       .parent_names = (const char *[]){
+                               "usb_hsic_io_cal_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb_hsic_io_cal_sleep_clk = {
+       .halt_reg = 0x0418,
+       .clkr = {
+               .enable_reg = 0x0418,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb_hsic_io_cal_sleep_clk",
+                       .parent_names = (const char *[]){
+                               "sleep_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch gcc_usb_hsic_system_clk = {
+       .halt_reg = 0x040c,
+       .clkr = {
+               .enable_reg = 0x040c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gcc_usb_hsic_system_clk",
+                       .parent_names = (const char *[]){
+                               "usb_hsic_system_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_regmap *gcc_apq8084_clocks[] = {
+       [GPLL0] = &gpll0.clkr,
+       [GPLL0_VOTE] = &gpll0_vote,
+       [GPLL1] = &gpll1.clkr,
+       [GPLL1_VOTE] = &gpll1_vote,
+       [GPLL4] = &gpll4.clkr,
+       [GPLL4_VOTE] = &gpll4_vote,
+       [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
+       [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
+       [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
+       [UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
+       [USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
+       [USB30_SEC_MASTER_CLK_SRC] = &usb30_sec_master_clk_src.clkr,
+       [USB_HSIC_AHB_CLK_SRC] = &usb_hsic_ahb_clk_src.clkr,
+       [BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
+       [BLSP1_QUP1_SPI_APPS_CLK_SRC] = &blsp1_qup1_spi_apps_clk_src.clkr,
+       [BLSP1_QUP2_I2C_APPS_CLK_SRC] = &blsp1_qup2_i2c_apps_clk_src.clkr,
+       [BLSP1_QUP2_SPI_APPS_CLK_SRC] = &blsp1_qup2_spi_apps_clk_src.clkr,
+       [BLSP1_QUP3_I2C_APPS_CLK_SRC] = &blsp1_qup3_i2c_apps_clk_src.clkr,
+       [BLSP1_QUP3_SPI_APPS_CLK_SRC] = &blsp1_qup3_spi_apps_clk_src.clkr,
+       [BLSP1_QUP4_I2C_APPS_CLK_SRC] = &blsp1_qup4_i2c_apps_clk_src.clkr,
+       [BLSP1_QUP4_SPI_APPS_CLK_SRC] = &blsp1_qup4_spi_apps_clk_src.clkr,
+       [BLSP1_QUP5_I2C_APPS_CLK_SRC] = &blsp1_qup5_i2c_apps_clk_src.clkr,
+       [BLSP1_QUP5_SPI_APPS_CLK_SRC] = &blsp1_qup5_spi_apps_clk_src.clkr,
+       [BLSP1_QUP6_I2C_APPS_CLK_SRC] = &blsp1_qup6_i2c_apps_clk_src.clkr,
+       [BLSP1_QUP6_SPI_APPS_CLK_SRC] = &blsp1_qup6_spi_apps_clk_src.clkr,
+       [BLSP1_UART1_APPS_CLK_SRC] = &blsp1_uart1_apps_clk_src.clkr,
+       [BLSP1_UART2_APPS_CLK_SRC] = &blsp1_uart2_apps_clk_src.clkr,
+       [BLSP1_UART3_APPS_CLK_SRC] = &blsp1_uart3_apps_clk_src.clkr,
+       [BLSP1_UART4_APPS_CLK_SRC] = &blsp1_uart4_apps_clk_src.clkr,
+       [BLSP1_UART5_APPS_CLK_SRC] = &blsp1_uart5_apps_clk_src.clkr,
+       [BLSP1_UART6_APPS_CLK_SRC] = &blsp1_uart6_apps_clk_src.clkr,
+       [BLSP2_QUP1_I2C_APPS_CLK_SRC] = &blsp2_qup1_i2c_apps_clk_src.clkr,
+       [BLSP2_QUP1_SPI_APPS_CLK_SRC] = &blsp2_qup1_spi_apps_clk_src.clkr,
+       [BLSP2_QUP2_I2C_APPS_CLK_SRC] = &blsp2_qup2_i2c_apps_clk_src.clkr,
+       [BLSP2_QUP2_SPI_APPS_CLK_SRC] = &blsp2_qup2_spi_apps_clk_src.clkr,
+       [BLSP2_QUP3_I2C_APPS_CLK_SRC] = &blsp2_qup3_i2c_apps_clk_src.clkr,
+       [BLSP2_QUP3_SPI_APPS_CLK_SRC] = &blsp2_qup3_spi_apps_clk_src.clkr,
+       [BLSP2_QUP4_I2C_APPS_CLK_SRC] = &blsp2_qup4_i2c_apps_clk_src.clkr,
+       [BLSP2_QUP4_SPI_APPS_CLK_SRC] = &blsp2_qup4_spi_apps_clk_src.clkr,
+       [BLSP2_QUP5_I2C_APPS_CLK_SRC] = &blsp2_qup5_i2c_apps_clk_src.clkr,
+       [BLSP2_QUP5_SPI_APPS_CLK_SRC] = &blsp2_qup5_spi_apps_clk_src.clkr,
+       [BLSP2_QUP6_I2C_APPS_CLK_SRC] = &blsp2_qup6_i2c_apps_clk_src.clkr,
+       [BLSP2_QUP6_SPI_APPS_CLK_SRC] = &blsp2_qup6_spi_apps_clk_src.clkr,
+       [BLSP2_UART1_APPS_CLK_SRC] = &blsp2_uart1_apps_clk_src.clkr,
+       [BLSP2_UART2_APPS_CLK_SRC] = &blsp2_uart2_apps_clk_src.clkr,
+       [BLSP2_UART3_APPS_CLK_SRC] = &blsp2_uart3_apps_clk_src.clkr,
+       [BLSP2_UART4_APPS_CLK_SRC] = &blsp2_uart4_apps_clk_src.clkr,
+       [BLSP2_UART5_APPS_CLK_SRC] = &blsp2_uart5_apps_clk_src.clkr,
+       [BLSP2_UART6_APPS_CLK_SRC] = &blsp2_uart6_apps_clk_src.clkr,
+       [CE1_CLK_SRC] = &ce1_clk_src.clkr,
+       [CE2_CLK_SRC] = &ce2_clk_src.clkr,
+       [CE3_CLK_SRC] = &ce3_clk_src.clkr,
+       [GP1_CLK_SRC] = &gp1_clk_src.clkr,
+       [GP2_CLK_SRC] = &gp2_clk_src.clkr,
+       [GP3_CLK_SRC] = &gp3_clk_src.clkr,
+       [PCIE_0_AUX_CLK_SRC] = &pcie_0_aux_clk_src.clkr,
+       [PCIE_0_PIPE_CLK_SRC] = &pcie_0_pipe_clk_src.clkr,
+       [PCIE_1_AUX_CLK_SRC] = &pcie_1_aux_clk_src.clkr,
+       [PCIE_1_PIPE_CLK_SRC] = &pcie_1_pipe_clk_src.clkr,
+       [PDM2_CLK_SRC] = &pdm2_clk_src.clkr,
+       [SATA_ASIC0_CLK_SRC] = &sata_asic0_clk_src.clkr,
+       [SATA_PMALIVE_CLK_SRC] = &sata_pmalive_clk_src.clkr,
+       [SATA_RX_CLK_SRC] = &sata_rx_clk_src.clkr,
+       [SATA_RX_OOB_CLK_SRC] = &sata_rx_oob_clk_src.clkr,
+       [SDCC1_APPS_CLK_SRC] = &sdcc1_apps_clk_src.clkr,
+       [SDCC2_APPS_CLK_SRC] = &sdcc2_apps_clk_src.clkr,
+       [SDCC3_APPS_CLK_SRC] = &sdcc3_apps_clk_src.clkr,
+       [SDCC4_APPS_CLK_SRC] = &sdcc4_apps_clk_src.clkr,
+       [TSIF_REF_CLK_SRC] = &tsif_ref_clk_src.clkr,
+       [USB30_MOCK_UTMI_CLK_SRC] = &usb30_mock_utmi_clk_src.clkr,
+       [USB30_SEC_MOCK_UTMI_CLK_SRC] = &usb30_sec_mock_utmi_clk_src.clkr,
+       [USB_HS_SYSTEM_CLK_SRC] = &usb_hs_system_clk_src.clkr,
+       [USB_HSIC_CLK_SRC] = &usb_hsic_clk_src.clkr,
+       [USB_HSIC_IO_CAL_CLK_SRC] = &usb_hsic_io_cal_clk_src.clkr,
+       [USB_HSIC_MOCK_UTMI_CLK_SRC] = &usb_hsic_mock_utmi_clk_src.clkr,
+       [USB_HSIC_SYSTEM_CLK_SRC] = &usb_hsic_system_clk_src.clkr,
+       [GCC_BAM_DMA_AHB_CLK] = &gcc_bam_dma_ahb_clk.clkr,
+       [GCC_BLSP1_AHB_CLK] = &gcc_blsp1_ahb_clk.clkr,
+       [GCC_BLSP1_QUP1_I2C_APPS_CLK] = &gcc_blsp1_qup1_i2c_apps_clk.clkr,
+       [GCC_BLSP1_QUP1_SPI_APPS_CLK] = &gcc_blsp1_qup1_spi_apps_clk.clkr,
+       [GCC_BLSP1_QUP2_I2C_APPS_CLK] = &gcc_blsp1_qup2_i2c_apps_clk.clkr,
+       [GCC_BLSP1_QUP2_SPI_APPS_CLK] = &gcc_blsp1_qup2_spi_apps_clk.clkr,
+       [GCC_BLSP1_QUP3_I2C_APPS_CLK] = &gcc_blsp1_qup3_i2c_apps_clk.clkr,
+       [GCC_BLSP1_QUP3_SPI_APPS_CLK] = &gcc_blsp1_qup3_spi_apps_clk.clkr,
+       [GCC_BLSP1_QUP4_I2C_APPS_CLK] = &gcc_blsp1_qup4_i2c_apps_clk.clkr,
+       [GCC_BLSP1_QUP4_SPI_APPS_CLK] = &gcc_blsp1_qup4_spi_apps_clk.clkr,
+       [GCC_BLSP1_QUP5_I2C_APPS_CLK] = &gcc_blsp1_qup5_i2c_apps_clk.clkr,
+       [GCC_BLSP1_QUP5_SPI_APPS_CLK] = &gcc_blsp1_qup5_spi_apps_clk.clkr,
+       [GCC_BLSP1_QUP6_I2C_APPS_CLK] = &gcc_blsp1_qup6_i2c_apps_clk.clkr,
+       [GCC_BLSP1_QUP6_SPI_APPS_CLK] = &gcc_blsp1_qup6_spi_apps_clk.clkr,
+       [GCC_BLSP1_UART1_APPS_CLK] = &gcc_blsp1_uart1_apps_clk.clkr,
+       [GCC_BLSP1_UART2_APPS_CLK] = &gcc_blsp1_uart2_apps_clk.clkr,
+       [GCC_BLSP1_UART3_APPS_CLK] = &gcc_blsp1_uart3_apps_clk.clkr,
+       [GCC_BLSP1_UART4_APPS_CLK] = &gcc_blsp1_uart4_apps_clk.clkr,
+       [GCC_BLSP1_UART5_APPS_CLK] = &gcc_blsp1_uart5_apps_clk.clkr,
+       [GCC_BLSP1_UART6_APPS_CLK] = &gcc_blsp1_uart6_apps_clk.clkr,
+       [GCC_BLSP2_AHB_CLK] = &gcc_blsp2_ahb_clk.clkr,
+       [GCC_BLSP2_QUP1_I2C_APPS_CLK] = &gcc_blsp2_qup1_i2c_apps_clk.clkr,
+       [GCC_BLSP2_QUP1_SPI_APPS_CLK] = &gcc_blsp2_qup1_spi_apps_clk.clkr,
+       [GCC_BLSP2_QUP2_I2C_APPS_CLK] = &gcc_blsp2_qup2_i2c_apps_clk.clkr,
+       [GCC_BLSP2_QUP2_SPI_APPS_CLK] = &gcc_blsp2_qup2_spi_apps_clk.clkr,
+       [GCC_BLSP2_QUP3_I2C_APPS_CLK] = &gcc_blsp2_qup3_i2c_apps_clk.clkr,
+       [GCC_BLSP2_QUP3_SPI_APPS_CLK] = &gcc_blsp2_qup3_spi_apps_clk.clkr,
+       [GCC_BLSP2_QUP4_I2C_APPS_CLK] = &gcc_blsp2_qup4_i2c_apps_clk.clkr,
+       [GCC_BLSP2_QUP4_SPI_APPS_CLK] = &gcc_blsp2_qup4_spi_apps_clk.clkr,
+       [GCC_BLSP2_QUP5_I2C_APPS_CLK] = &gcc_blsp2_qup5_i2c_apps_clk.clkr,
+       [GCC_BLSP2_QUP5_SPI_APPS_CLK] = &gcc_blsp2_qup5_spi_apps_clk.clkr,
+       [GCC_BLSP2_QUP6_I2C_APPS_CLK] = &gcc_blsp2_qup6_i2c_apps_clk.clkr,
+       [GCC_BLSP2_QUP6_SPI_APPS_CLK] = &gcc_blsp2_qup6_spi_apps_clk.clkr,
+       [GCC_BLSP2_UART1_APPS_CLK] = &gcc_blsp2_uart1_apps_clk.clkr,
+       [GCC_BLSP2_UART2_APPS_CLK] = &gcc_blsp2_uart2_apps_clk.clkr,
+       [GCC_BLSP2_UART3_APPS_CLK] = &gcc_blsp2_uart3_apps_clk.clkr,
+       [GCC_BLSP2_UART4_APPS_CLK] = &gcc_blsp2_uart4_apps_clk.clkr,
+       [GCC_BLSP2_UART5_APPS_CLK] = &gcc_blsp2_uart5_apps_clk.clkr,
+       [GCC_BLSP2_UART6_APPS_CLK] = &gcc_blsp2_uart6_apps_clk.clkr,
+       [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+       [GCC_CE1_AHB_CLK] = &gcc_ce1_ahb_clk.clkr,
+       [GCC_CE1_AXI_CLK] = &gcc_ce1_axi_clk.clkr,
+       [GCC_CE1_CLK] = &gcc_ce1_clk.clkr,
+       [GCC_CE2_AHB_CLK] = &gcc_ce2_ahb_clk.clkr,
+       [GCC_CE2_AXI_CLK] = &gcc_ce2_axi_clk.clkr,
+       [GCC_CE2_CLK] = &gcc_ce2_clk.clkr,
+       [GCC_CE3_AHB_CLK] = &gcc_ce3_ahb_clk.clkr,
+       [GCC_CE3_AXI_CLK] = &gcc_ce3_axi_clk.clkr,
+       [GCC_CE3_CLK] = &gcc_ce3_clk.clkr,
+       [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+       [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+       [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+       [GCC_OCMEM_NOC_CFG_AHB_CLK] = &gcc_ocmem_noc_cfg_ahb_clk.clkr,
+       [GCC_PCIE_0_AUX_CLK] = &gcc_pcie_0_aux_clk.clkr,
+       [GCC_PCIE_0_CFG_AHB_CLK] = &gcc_pcie_0_cfg_ahb_clk.clkr,
+       [GCC_PCIE_0_MSTR_AXI_CLK] = &gcc_pcie_0_mstr_axi_clk.clkr,
+       [GCC_PCIE_0_PIPE_CLK] = &gcc_pcie_0_pipe_clk.clkr,
+       [GCC_PCIE_0_SLV_AXI_CLK] = &gcc_pcie_0_slv_axi_clk.clkr,
+       [GCC_PCIE_1_AUX_CLK] = &gcc_pcie_1_aux_clk.clkr,
+       [GCC_PCIE_1_CFG_AHB_CLK] = &gcc_pcie_1_cfg_ahb_clk.clkr,
+       [GCC_PCIE_1_MSTR_AXI_CLK] = &gcc_pcie_1_mstr_axi_clk.clkr,
+       [GCC_PCIE_1_PIPE_CLK] = &gcc_pcie_1_pipe_clk.clkr,
+       [GCC_PCIE_1_SLV_AXI_CLK] = &gcc_pcie_1_slv_axi_clk.clkr,
+       [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+       [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+       [GCC_PERIPH_NOC_USB_HSIC_AHB_CLK] = &gcc_periph_noc_usb_hsic_ahb_clk.clkr,
+       [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
+       [GCC_SATA_ASIC0_CLK] = &gcc_sata_asic0_clk.clkr,
+       [GCC_SATA_AXI_CLK] = &gcc_sata_axi_clk.clkr,
+       [GCC_SATA_CFG_AHB_CLK] = &gcc_sata_cfg_ahb_clk.clkr,
+       [GCC_SATA_PMALIVE_CLK] = &gcc_sata_pmalive_clk.clkr,
+       [GCC_SATA_RX_CLK] = &gcc_sata_rx_clk.clkr,
+       [GCC_SATA_RX_OOB_CLK] = &gcc_sata_rx_oob_clk.clkr,
+       [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+       [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+       [GCC_SDCC1_CDCCAL_FF_CLK] = &gcc_sdcc1_cdccal_ff_clk.clkr,
+       [GCC_SDCC1_CDCCAL_SLEEP_CLK] = &gcc_sdcc1_cdccal_sleep_clk.clkr,
+       [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+       [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+       [GCC_SDCC3_AHB_CLK] = &gcc_sdcc3_ahb_clk.clkr,
+       [GCC_SDCC3_APPS_CLK] = &gcc_sdcc3_apps_clk.clkr,
+       [GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
+       [GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
+       [GCC_SYS_NOC_UFS_AXI_CLK] = &gcc_sys_noc_ufs_axi_clk.clkr,
+       [GCC_SYS_NOC_USB3_AXI_CLK] = &gcc_sys_noc_usb3_axi_clk.clkr,
+       [GCC_SYS_NOC_USB3_SEC_AXI_CLK] = &gcc_sys_noc_usb3_sec_axi_clk.clkr,
+       [GCC_TSIF_AHB_CLK] = &gcc_tsif_ahb_clk.clkr,
+       [GCC_TSIF_INACTIVITY_TIMERS_CLK] = &gcc_tsif_inactivity_timers_clk.clkr,
+       [GCC_TSIF_REF_CLK] = &gcc_tsif_ref_clk.clkr,
+       [GCC_UFS_AHB_CLK] = &gcc_ufs_ahb_clk.clkr,
+       [GCC_UFS_AXI_CLK] = &gcc_ufs_axi_clk.clkr,
+       [GCC_UFS_RX_CFG_CLK] = &gcc_ufs_rx_cfg_clk.clkr,
+       [GCC_UFS_RX_SYMBOL_0_CLK] = &gcc_ufs_rx_symbol_0_clk.clkr,
+       [GCC_UFS_RX_SYMBOL_1_CLK] = &gcc_ufs_rx_symbol_1_clk.clkr,
+       [GCC_UFS_TX_CFG_CLK] = &gcc_ufs_tx_cfg_clk.clkr,
+       [GCC_UFS_TX_SYMBOL_0_CLK] = &gcc_ufs_tx_symbol_0_clk.clkr,
+       [GCC_UFS_TX_SYMBOL_1_CLK] = &gcc_ufs_tx_symbol_1_clk.clkr,
+       [GCC_USB2A_PHY_SLEEP_CLK] = &gcc_usb2a_phy_sleep_clk.clkr,
+       [GCC_USB2B_PHY_SLEEP_CLK] = &gcc_usb2b_phy_sleep_clk.clkr,
+       [GCC_USB30_MASTER_CLK] = &gcc_usb30_master_clk.clkr,
+       [GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
+       [GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
+       [GCC_USB30_SEC_MASTER_CLK] = &gcc_usb30_sec_master_clk.clkr,
+       [GCC_USB30_SEC_MOCK_UTMI_CLK] = &gcc_usb30_sec_mock_utmi_clk.clkr,
+       [GCC_USB30_SEC_SLEEP_CLK] = &gcc_usb30_sec_sleep_clk.clkr,
+       [GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
+       [GCC_USB_HS_INACTIVITY_TIMERS_CLK] = &gcc_usb_hs_inactivity_timers_clk.clkr,
+       [GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
+       [GCC_USB_HSIC_AHB_CLK] = &gcc_usb_hsic_ahb_clk.clkr,
+       [GCC_USB_HSIC_CLK] = &gcc_usb_hsic_clk.clkr,
+       [GCC_USB_HSIC_IO_CAL_CLK] = &gcc_usb_hsic_io_cal_clk.clkr,
+       [GCC_USB_HSIC_IO_CAL_SLEEP_CLK] = &gcc_usb_hsic_io_cal_sleep_clk.clkr,
+       [GCC_USB_HSIC_MOCK_UTMI_CLK] = &gcc_usb_hsic_mock_utmi_clk.clkr,
+       [GCC_USB_HSIC_SYSTEM_CLK] = &gcc_usb_hsic_system_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_apq8084_resets[] = {
+       [GCC_SYSTEM_NOC_BCR] = { 0x0100 },
+       [GCC_CONFIG_NOC_BCR] = { 0x0140 },
+       [GCC_PERIPH_NOC_BCR] = { 0x0180 },
+       [GCC_IMEM_BCR] = { 0x0200 },
+       [GCC_MMSS_BCR] = { 0x0240 },
+       [GCC_QDSS_BCR] = { 0x0300 },
+       [GCC_USB_30_BCR] = { 0x03c0 },
+       [GCC_USB3_PHY_BCR] = { 0x03fc },
+       [GCC_USB_HS_HSIC_BCR] = { 0x0400 },
+       [GCC_USB_HS_BCR] = { 0x0480 },
+       [GCC_USB2A_PHY_BCR] = { 0x04a8 },
+       [GCC_USB2B_PHY_BCR] = { 0x04b0 },
+       [GCC_SDCC1_BCR] = { 0x04c0 },
+       [GCC_SDCC2_BCR] = { 0x0500 },
+       [GCC_SDCC3_BCR] = { 0x0540 },
+       [GCC_SDCC4_BCR] = { 0x0580 },
+       [GCC_BLSP1_BCR] = { 0x05c0 },
+       [GCC_BLSP1_QUP1_BCR] = { 0x0640 },
+       [GCC_BLSP1_UART1_BCR] = { 0x0680 },
+       [GCC_BLSP1_QUP2_BCR] = { 0x06c0 },
+       [GCC_BLSP1_UART2_BCR] = { 0x0700 },
+       [GCC_BLSP1_QUP3_BCR] = { 0x0740 },
+       [GCC_BLSP1_UART3_BCR] = { 0x0780 },
+       [GCC_BLSP1_QUP4_BCR] = { 0x07c0 },
+       [GCC_BLSP1_UART4_BCR] = { 0x0800 },
+       [GCC_BLSP1_QUP5_BCR] = { 0x0840 },
+       [GCC_BLSP1_UART5_BCR] = { 0x0880 },
+       [GCC_BLSP1_QUP6_BCR] = { 0x08c0 },
+       [GCC_BLSP1_UART6_BCR] = { 0x0900 },
+       [GCC_BLSP2_BCR] = { 0x0940 },
+       [GCC_BLSP2_QUP1_BCR] = { 0x0980 },
+       [GCC_BLSP2_UART1_BCR] = { 0x09c0 },
+       [GCC_BLSP2_QUP2_BCR] = { 0x0a00 },
+       [GCC_BLSP2_UART2_BCR] = { 0x0a40 },
+       [GCC_BLSP2_QUP3_BCR] = { 0x0a80 },
+       [GCC_BLSP2_UART3_BCR] = { 0x0ac0 },
+       [GCC_BLSP2_QUP4_BCR] = { 0x0b00 },
+       [GCC_BLSP2_UART4_BCR] = { 0x0b40 },
+       [GCC_BLSP2_QUP5_BCR] = { 0x0b80 },
+       [GCC_BLSP2_UART5_BCR] = { 0x0bc0 },
+       [GCC_BLSP2_QUP6_BCR] = { 0x0c00 },
+       [GCC_BLSP2_UART6_BCR] = { 0x0c40 },
+       [GCC_PDM_BCR] = { 0x0cc0 },
+       [GCC_PRNG_BCR] = { 0x0d00 },
+       [GCC_BAM_DMA_BCR] = { 0x0d40 },
+       [GCC_TSIF_BCR] = { 0x0d80 },
+       [GCC_TCSR_BCR] = { 0x0dc0 },
+       [GCC_BOOT_ROM_BCR] = { 0x0e00 },
+       [GCC_MSG_RAM_BCR] = { 0x0e40 },
+       [GCC_TLMM_BCR] = { 0x0e80 },
+       [GCC_MPM_BCR] = { 0x0ec0 },
+       [GCC_MPM_AHB_RESET] = { 0x0ec4, 1 },
+       [GCC_MPM_NON_AHB_RESET] = { 0x0ec4, 2 },
+       [GCC_SEC_CTRL_BCR] = { 0x0f40 },
+       [GCC_SPMI_BCR] = { 0x0fc0 },
+       [GCC_SPDM_BCR] = { 0x1000 },
+       [GCC_CE1_BCR] = { 0x1040 },
+       [GCC_CE2_BCR] = { 0x1080 },
+       [GCC_BIMC_BCR] = { 0x1100 },
+       [GCC_SNOC_BUS_TIMEOUT0_BCR] = { 0x1240 },
+       [GCC_SNOC_BUS_TIMEOUT2_BCR] = { 0x1248 },
+       [GCC_PNOC_BUS_TIMEOUT0_BCR] = { 0x1280 },
+       [GCC_PNOC_BUS_TIMEOUT1_BCR] = { 0x1288 },
+       [GCC_PNOC_BUS_TIMEOUT2_BCR] = { 0x1290 },
+       [GCC_PNOC_BUS_TIMEOUT3_BCR] = { 0x1298 },
+       [GCC_PNOC_BUS_TIMEOUT4_BCR] = { 0x12a0 },
+       [GCC_CNOC_BUS_TIMEOUT0_BCR] = { 0x12c0 },
+       [GCC_CNOC_BUS_TIMEOUT1_BCR] = { 0x12c8 },
+       [GCC_CNOC_BUS_TIMEOUT2_BCR] = { 0x12d0 },
+       [GCC_CNOC_BUS_TIMEOUT3_BCR] = { 0x12d8 },
+       [GCC_CNOC_BUS_TIMEOUT4_BCR] = { 0x12e0 },
+       [GCC_CNOC_BUS_TIMEOUT5_BCR] = { 0x12e8 },
+       [GCC_CNOC_BUS_TIMEOUT6_BCR] = { 0x12f0 },
+       [GCC_DEHR_BCR] = { 0x1300 },
+       [GCC_RBCPR_BCR] = { 0x1380 },
+       [GCC_MSS_RESTART] = { 0x1680 },
+       [GCC_LPASS_RESTART] = { 0x16c0 },
+       [GCC_WCSS_RESTART] = { 0x1700 },
+       [GCC_VENUS_RESTART] = { 0x1740 },
+       [GCC_COPSS_SMMU_BCR] = { 0x1a40 },
+       [GCC_SPSS_BCR] = { 0x1a80 },
+       [GCC_PCIE_0_BCR] = { 0x1ac0 },
+       [GCC_PCIE_0_PHY_BCR] = { 0x1b00 },
+       [GCC_PCIE_1_BCR] = { 0x1b40 },
+       [GCC_PCIE_1_PHY_BCR] = { 0x1b80 },
+       [GCC_USB_30_SEC_BCR] = { 0x1bc0 },
+       [GCC_USB3_SEC_PHY_BCR] = { 0x1bfc },
+       [GCC_SATA_BCR] = { 0x1c40 },
+       [GCC_CE3_BCR] = { 0x1d00 },
+       [GCC_UFS_BCR] = { 0x1d40 },
+       [GCC_USB30_PHY_COM_BCR] = { 0x1e80 },
+};
+
+static const struct regmap_config gcc_apq8084_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x1fc0,
+       .fast_io        = true,
+};
+
+static const struct qcom_cc_desc gcc_apq8084_desc = {
+       .config = &gcc_apq8084_regmap_config,
+       .clks = gcc_apq8084_clocks,
+       .num_clks = ARRAY_SIZE(gcc_apq8084_clocks),
+       .resets = gcc_apq8084_resets,
+       .num_resets = ARRAY_SIZE(gcc_apq8084_resets),
+};
+
+static const struct of_device_id gcc_apq8084_match_table[] = {
+       { .compatible = "qcom,gcc-apq8084" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, gcc_apq8084_match_table);
+
+static int gcc_apq8084_probe(struct platform_device *pdev)
+{
+       struct clk *clk;
+       struct device *dev = &pdev->dev;
+
+       /* Temporary until RPM clocks supported */
+       clk = clk_register_fixed_rate(dev, "xo", NULL, CLK_IS_ROOT, 19200000);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       clk = clk_register_fixed_rate(dev, "sleep_clk_src", NULL,
+                                     CLK_IS_ROOT, 32768);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       return qcom_cc_probe(pdev, &gcc_apq8084_desc);
+}
+
+static int gcc_apq8084_remove(struct platform_device *pdev)
+{
+       qcom_cc_remove(pdev);
+       return 0;
+}
+
+static struct platform_driver gcc_apq8084_driver = {
+       .probe          = gcc_apq8084_probe,
+       .remove         = gcc_apq8084_remove,
+       .driver         = {
+               .name   = "gcc-apq8084",
+               .owner  = THIS_MODULE,
+               .of_match_table = gcc_apq8084_match_table,
+       },
+};
+
+static int __init gcc_apq8084_init(void)
+{
+       return platform_driver_register(&gcc_apq8084_driver);
+}
+core_initcall(gcc_apq8084_init);
+
+static void __exit gcc_apq8084_exit(void)
+{
+       platform_driver_unregister(&gcc_apq8084_driver);
+}
+module_exit(gcc_apq8084_exit);
+
+MODULE_DESCRIPTION("QCOM GCC APQ8084 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-apq8084");
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
new file mode 100644 (file)
index 0000000..4032e51
--- /dev/null
@@ -0,0 +1,2424 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,gcc-ipq806x.h>
+#include <dt-bindings/reset/qcom,gcc-ipq806x.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+static struct clk_pll pll3 = {
+       .l_reg = 0x3164,
+       .m_reg = 0x3168,
+       .n_reg = 0x316c,
+       .config_reg = 0x3174,
+       .mode_reg = 0x3160,
+       .status_reg = 0x3178,
+       .status_bit = 16,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pll3",
+               .parent_names = (const char *[]){ "pxo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static struct clk_pll pll8 = {
+       .l_reg = 0x3144,
+       .m_reg = 0x3148,
+       .n_reg = 0x314c,
+       .config_reg = 0x3154,
+       .mode_reg = 0x3140,
+       .status_reg = 0x3158,
+       .status_bit = 16,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pll8",
+               .parent_names = (const char *[]){ "pxo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static struct clk_regmap pll8_vote = {
+       .enable_reg = 0x34c0,
+       .enable_mask = BIT(8),
+       .hw.init = &(struct clk_init_data){
+               .name = "pll8_vote",
+               .parent_names = (const char *[]){ "pll8" },
+               .num_parents = 1,
+               .ops = &clk_pll_vote_ops,
+       },
+};
+
+static struct clk_pll pll14 = {
+       .l_reg = 0x31c4,
+       .m_reg = 0x31c8,
+       .n_reg = 0x31cc,
+       .config_reg = 0x31d4,
+       .mode_reg = 0x31c0,
+       .status_reg = 0x31d8,
+       .status_bit = 16,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pll14",
+               .parent_names = (const char *[]){ "pxo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static struct clk_regmap pll14_vote = {
+       .enable_reg = 0x34c0,
+       .enable_mask = BIT(14),
+       .hw.init = &(struct clk_init_data){
+               .name = "pll14_vote",
+               .parent_names = (const char *[]){ "pll14" },
+               .num_parents = 1,
+               .ops = &clk_pll_vote_ops,
+       },
+};
+
+#define P_PXO  0
+#define P_PLL8 1
+#define P_PLL3 1
+#define P_PLL0 2
+#define P_CXO  2
+
+static const u8 gcc_pxo_pll8_map[] = {
+       [P_PXO]         = 0,
+       [P_PLL8]        = 3,
+};
+
+static const char *gcc_pxo_pll8[] = {
+       "pxo",
+       "pll8_vote",
+};
+
+static const u8 gcc_pxo_pll8_cxo_map[] = {
+       [P_PXO]         = 0,
+       [P_PLL8]        = 3,
+       [P_CXO]         = 5,
+};
+
+static const char *gcc_pxo_pll8_cxo[] = {
+       "pxo",
+       "pll8_vote",
+       "cxo",
+};
+
+static const u8 gcc_pxo_pll3_map[] = {
+       [P_PXO]         = 0,
+       [P_PLL3]        = 1,
+};
+
+static const u8 gcc_pxo_pll3_sata_map[] = {
+       [P_PXO]         = 0,
+       [P_PLL3]        = 6,
+};
+
+static const char *gcc_pxo_pll3[] = {
+       "pxo",
+       "pll3",
+};
+
+static const u8 gcc_pxo_pll8_pll0[] = {
+       [P_PXO]         = 0,
+       [P_PLL8]        = 3,
+       [P_PLL0]        = 2,
+};
+
+static const char *gcc_pxo_pll8_pll0_map[] = {
+       "pxo",
+       "pll8_vote",
+       "pll0",
+};
+
+static struct freq_tbl clk_tbl_gsbi_uart[] = {
+       {  1843200, P_PLL8, 2,  6, 625 },
+       {  3686400, P_PLL8, 2, 12, 625 },
+       {  7372800, P_PLL8, 2, 24, 625 },
+       { 14745600, P_PLL8, 2, 48, 625 },
+       { 16000000, P_PLL8, 4,  1,   6 },
+       { 24000000, P_PLL8, 4,  1,   4 },
+       { 32000000, P_PLL8, 4,  1,   3 },
+       { 40000000, P_PLL8, 1,  5,  48 },
+       { 46400000, P_PLL8, 1, 29, 240 },
+       { 48000000, P_PLL8, 4,  1,   2 },
+       { 51200000, P_PLL8, 1,  2,  15 },
+       { 56000000, P_PLL8, 1,  7,  48 },
+       { 58982400, P_PLL8, 1, 96, 625 },
+       { 64000000, P_PLL8, 2,  1,   3 },
+       { }
+};
+
+static struct clk_rcg gsbi1_uart_src = {
+       .ns_reg = 0x29d4,
+       .md_reg = 0x29d0,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 16,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_gsbi_uart,
+       .clkr = {
+               .enable_reg = 0x29d4,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi1_uart_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_PARENT_GATE,
+               },
+       },
+};
+
+static struct clk_branch gsbi1_uart_clk = {
+       .halt_reg = 0x2fcc,
+       .halt_bit = 12,
+       .clkr = {
+               .enable_reg = 0x29d4,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi1_uart_clk",
+                       .parent_names = (const char *[]){
+                               "gsbi1_uart_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg gsbi2_uart_src = {
+       .ns_reg = 0x29f4,
+       .md_reg = 0x29f0,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 16,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_gsbi_uart,
+       .clkr = {
+               .enable_reg = 0x29f4,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi2_uart_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_PARENT_GATE,
+               },
+       },
+};
+
+static struct clk_branch gsbi2_uart_clk = {
+       .halt_reg = 0x2fcc,
+       .halt_bit = 8,
+       .clkr = {
+               .enable_reg = 0x29f4,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi2_uart_clk",
+                       .parent_names = (const char *[]){
+                               "gsbi2_uart_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg gsbi4_uart_src = {
+       .ns_reg = 0x2a34,
+       .md_reg = 0x2a30,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 16,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_gsbi_uart,
+       .clkr = {
+               .enable_reg = 0x2a34,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi4_uart_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_PARENT_GATE,
+               },
+       },
+};
+
+static struct clk_branch gsbi4_uart_clk = {
+       .halt_reg = 0x2fd0,
+       .halt_bit = 26,
+       .clkr = {
+               .enable_reg = 0x2a34,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi4_uart_clk",
+                       .parent_names = (const char *[]){
+                               "gsbi4_uart_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg gsbi5_uart_src = {
+       .ns_reg = 0x2a54,
+       .md_reg = 0x2a50,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 16,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_gsbi_uart,
+       .clkr = {
+               .enable_reg = 0x2a54,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi5_uart_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_PARENT_GATE,
+               },
+       },
+};
+
+static struct clk_branch gsbi5_uart_clk = {
+       .halt_reg = 0x2fd0,
+       .halt_bit = 22,
+       .clkr = {
+               .enable_reg = 0x2a54,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi5_uart_clk",
+                       .parent_names = (const char *[]){
+                               "gsbi5_uart_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg gsbi6_uart_src = {
+       .ns_reg = 0x2a74,
+       .md_reg = 0x2a70,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 16,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_gsbi_uart,
+       .clkr = {
+               .enable_reg = 0x2a74,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi6_uart_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_PARENT_GATE,
+               },
+       },
+};
+
+static struct clk_branch gsbi6_uart_clk = {
+       .halt_reg = 0x2fd0,
+       .halt_bit = 18,
+       .clkr = {
+               .enable_reg = 0x2a74,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi6_uart_clk",
+                       .parent_names = (const char *[]){
+                               "gsbi6_uart_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg gsbi7_uart_src = {
+       .ns_reg = 0x2a94,
+       .md_reg = 0x2a90,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 16,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_gsbi_uart,
+       .clkr = {
+               .enable_reg = 0x2a94,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi7_uart_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_PARENT_GATE,
+               },
+       },
+};
+
+static struct clk_branch gsbi7_uart_clk = {
+       .halt_reg = 0x2fd0,
+       .halt_bit = 14,
+       .clkr = {
+               .enable_reg = 0x2a94,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi7_uart_clk",
+                       .parent_names = (const char *[]){
+                               "gsbi7_uart_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct freq_tbl clk_tbl_gsbi_qup[] = {
+       {  1100000, P_PXO,  1, 2, 49 },
+       {  5400000, P_PXO,  1, 1,  5 },
+       { 10800000, P_PXO,  1, 2,  5 },
+       { 15060000, P_PLL8, 1, 2, 51 },
+       { 24000000, P_PLL8, 4, 1,  4 },
+       { 25600000, P_PLL8, 1, 1, 15 },
+       { 27000000, P_PXO,  1, 0,  0 },
+       { 48000000, P_PLL8, 4, 1,  2 },
+       { 51200000, P_PLL8, 1, 2, 15 },
+       { }
+};
+
+static struct clk_rcg gsbi1_qup_src = {
+       .ns_reg = 0x29cc,
+       .md_reg = 0x29c8,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_gsbi_qup,
+       .clkr = {
+               .enable_reg = 0x29cc,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi1_qup_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_PARENT_GATE,
+               },
+       },
+};
+
+static struct clk_branch gsbi1_qup_clk = {
+       .halt_reg = 0x2fcc,
+       .halt_bit = 11,
+       .clkr = {
+               .enable_reg = 0x29cc,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi1_qup_clk",
+                       .parent_names = (const char *[]){ "gsbi1_qup_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg gsbi2_qup_src = {
+       .ns_reg = 0x29ec,
+       .md_reg = 0x29e8,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_gsbi_qup,
+       .clkr = {
+               .enable_reg = 0x29ec,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi2_qup_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_PARENT_GATE,
+               },
+       },
+};
+
+static struct clk_branch gsbi2_qup_clk = {
+       .halt_reg = 0x2fcc,
+       .halt_bit = 6,
+       .clkr = {
+               .enable_reg = 0x29ec,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi2_qup_clk",
+                       .parent_names = (const char *[]){ "gsbi2_qup_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg gsbi4_qup_src = {
+       .ns_reg = 0x2a2c,
+       .md_reg = 0x2a28,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_gsbi_qup,
+       .clkr = {
+               .enable_reg = 0x2a2c,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi4_qup_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_PARENT_GATE,
+               },
+       },
+};
+
+static struct clk_branch gsbi4_qup_clk = {
+       .halt_reg = 0x2fd0,
+       .halt_bit = 24,
+       .clkr = {
+               .enable_reg = 0x2a2c,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi4_qup_clk",
+                       .parent_names = (const char *[]){ "gsbi4_qup_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg gsbi5_qup_src = {
+       .ns_reg = 0x2a4c,
+       .md_reg = 0x2a48,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_gsbi_qup,
+       .clkr = {
+               .enable_reg = 0x2a4c,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi5_qup_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_PARENT_GATE,
+               },
+       },
+};
+
+static struct clk_branch gsbi5_qup_clk = {
+       .halt_reg = 0x2fd0,
+       .halt_bit = 20,
+       .clkr = {
+               .enable_reg = 0x2a4c,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi5_qup_clk",
+                       .parent_names = (const char *[]){ "gsbi5_qup_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg gsbi6_qup_src = {
+       .ns_reg = 0x2a6c,
+       .md_reg = 0x2a68,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_gsbi_qup,
+       .clkr = {
+               .enable_reg = 0x2a6c,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi6_qup_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_PARENT_GATE,
+               },
+       },
+};
+
+static struct clk_branch gsbi6_qup_clk = {
+       .halt_reg = 0x2fd0,
+       .halt_bit = 16,
+       .clkr = {
+               .enable_reg = 0x2a6c,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi6_qup_clk",
+                       .parent_names = (const char *[]){ "gsbi6_qup_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg gsbi7_qup_src = {
+       .ns_reg = 0x2a8c,
+       .md_reg = 0x2a88,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_gsbi_qup,
+       .clkr = {
+               .enable_reg = 0x2a8c,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi7_qup_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_PARENT_GATE,
+               },
+       },
+};
+
+static struct clk_branch gsbi7_qup_clk = {
+       .halt_reg = 0x2fd0,
+       .halt_bit = 12,
+       .clkr = {
+               .enable_reg = 0x2a8c,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi7_qup_clk",
+                       .parent_names = (const char *[]){ "gsbi7_qup_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch gsbi1_h_clk = {
+       .hwcg_reg = 0x29c0,
+       .hwcg_bit = 6,
+       .halt_reg = 0x2fcc,
+       .halt_bit = 13,
+       .clkr = {
+               .enable_reg = 0x29c0,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi1_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch gsbi2_h_clk = {
+       .hwcg_reg = 0x29e0,
+       .hwcg_bit = 6,
+       .halt_reg = 0x2fcc,
+       .halt_bit = 9,
+       .clkr = {
+               .enable_reg = 0x29e0,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi2_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch gsbi4_h_clk = {
+       .hwcg_reg = 0x2a20,
+       .hwcg_bit = 6,
+       .halt_reg = 0x2fd0,
+       .halt_bit = 27,
+       .clkr = {
+               .enable_reg = 0x2a20,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi4_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch gsbi5_h_clk = {
+       .hwcg_reg = 0x2a40,
+       .hwcg_bit = 6,
+       .halt_reg = 0x2fd0,
+       .halt_bit = 23,
+       .clkr = {
+               .enable_reg = 0x2a40,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi5_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch gsbi6_h_clk = {
+       .hwcg_reg = 0x2a60,
+       .hwcg_bit = 6,
+       .halt_reg = 0x2fd0,
+       .halt_bit = 19,
+       .clkr = {
+               .enable_reg = 0x2a60,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi6_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch gsbi7_h_clk = {
+       .hwcg_reg = 0x2a80,
+       .hwcg_bit = 6,
+       .halt_reg = 0x2fd0,
+       .halt_bit = 15,
+       .clkr = {
+               .enable_reg = 0x2a80,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gsbi7_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_gp[] = {
+       { 12500000, P_PXO,  2, 0, 0 },
+       { 25000000, P_PXO,  1, 0, 0 },
+       { 64000000, P_PLL8, 2, 1, 3 },
+       { 76800000, P_PLL8, 1, 1, 5 },
+       { 96000000, P_PLL8, 4, 0, 0 },
+       { 128000000, P_PLL8, 3, 0, 0 },
+       { 192000000, P_PLL8, 2, 0, 0 },
+       { }
+};
+
+static struct clk_rcg gp0_src = {
+       .ns_reg = 0x2d24,
+       .md_reg = 0x2d00,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_cxo_map,
+       },
+       .freq_tbl = clk_tbl_gp,
+       .clkr = {
+               .enable_reg = 0x2d24,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gp0_src",
+                       .parent_names = gcc_pxo_pll8_cxo,
+                       .num_parents = 3,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_PARENT_GATE,
+               },
+       }
+};
+
+static struct clk_branch gp0_clk = {
+       .halt_reg = 0x2fd8,
+       .halt_bit = 7,
+       .clkr = {
+               .enable_reg = 0x2d24,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gp0_clk",
+                       .parent_names = (const char *[]){ "gp0_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg gp1_src = {
+       .ns_reg = 0x2d44,
+       .md_reg = 0x2d40,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_cxo_map,
+       },
+       .freq_tbl = clk_tbl_gp,
+       .clkr = {
+               .enable_reg = 0x2d44,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gp1_src",
+                       .parent_names = gcc_pxo_pll8_cxo,
+                       .num_parents = 3,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       }
+};
+
+static struct clk_branch gp1_clk = {
+       .halt_reg = 0x2fd8,
+       .halt_bit = 6,
+       .clkr = {
+               .enable_reg = 0x2d44,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gp1_clk",
+                       .parent_names = (const char *[]){ "gp1_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg gp2_src = {
+       .ns_reg = 0x2d64,
+       .md_reg = 0x2d60,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_cxo_map,
+       },
+       .freq_tbl = clk_tbl_gp,
+       .clkr = {
+               .enable_reg = 0x2d64,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gp2_src",
+                       .parent_names = gcc_pxo_pll8_cxo,
+                       .num_parents = 3,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       }
+};
+
+static struct clk_branch gp2_clk = {
+       .halt_reg = 0x2fd8,
+       .halt_bit = 5,
+       .clkr = {
+               .enable_reg = 0x2d64,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "gp2_clk",
+                       .parent_names = (const char *[]){ "gp2_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch pmem_clk = {
+       .hwcg_reg = 0x25a0,
+       .hwcg_bit = 6,
+       .halt_reg = 0x2fc8,
+       .halt_bit = 20,
+       .clkr = {
+               .enable_reg = 0x25a0,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pmem_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_rcg prng_src = {
+       .ns_reg = 0x2e80,
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "prng_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch prng_clk = {
+       .halt_reg = 0x2fd8,
+       .halt_check = BRANCH_HALT_VOTED,
+       .halt_bit = 10,
+       .clkr = {
+               .enable_reg = 0x3080,
+               .enable_mask = BIT(10),
+               .hw.init = &(struct clk_init_data){
+                       .name = "prng_clk",
+                       .parent_names = (const char *[]){ "prng_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_sdc[] = {
+       {    144000, P_PXO,   5, 18,625 },
+       {    400000, P_PLL8,  4, 1, 240 },
+       {  16000000, P_PLL8,  4, 1,   6 },
+       {  17070000, P_PLL8,  1, 2,  45 },
+       {  20210000, P_PLL8,  1, 1,  19 },
+       {  24000000, P_PLL8,  4, 1,   4 },
+       {  48000000, P_PLL8,  4, 1,   2 },
+       {  64000000, P_PLL8,  3, 1,   2 },
+       {  96000000, P_PLL8,  4, 0,   0 },
+       { 192000000, P_PLL8,  2, 0,   0 },
+       { }
+};
+
+static struct clk_rcg sdc1_src = {
+       .ns_reg = 0x282c,
+       .md_reg = 0x2828,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_sdc,
+       .clkr = {
+               .enable_reg = 0x282c,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sdc1_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       }
+};
+
+static struct clk_branch sdc1_clk = {
+       .halt_reg = 0x2fc8,
+       .halt_bit = 6,
+       .clkr = {
+               .enable_reg = 0x282c,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sdc1_clk",
+                       .parent_names = (const char *[]){ "sdc1_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg sdc3_src = {
+       .ns_reg = 0x286c,
+       .md_reg = 0x2868,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_sdc,
+       .clkr = {
+               .enable_reg = 0x286c,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sdc3_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       }
+};
+
+static struct clk_branch sdc3_clk = {
+       .halt_reg = 0x2fc8,
+       .halt_bit = 4,
+       .clkr = {
+               .enable_reg = 0x286c,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sdc3_clk",
+                       .parent_names = (const char *[]){ "sdc3_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch sdc1_h_clk = {
+       .hwcg_reg = 0x2820,
+       .hwcg_bit = 6,
+       .halt_reg = 0x2fc8,
+       .halt_bit = 11,
+       .clkr = {
+               .enable_reg = 0x2820,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sdc1_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch sdc3_h_clk = {
+       .hwcg_reg = 0x2860,
+       .hwcg_bit = 6,
+       .halt_reg = 0x2fc8,
+       .halt_bit = 9,
+       .clkr = {
+               .enable_reg = 0x2860,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sdc3_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_tsif_ref[] = {
+       { 105000, P_PXO,  1, 1, 256 },
+       { }
+};
+
+static struct clk_rcg tsif_ref_src = {
+       .ns_reg = 0x2710,
+       .md_reg = 0x270c,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 16,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_tsif_ref,
+       .clkr = {
+               .enable_reg = 0x2710,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "tsif_ref_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       }
+};
+
+static struct clk_branch tsif_ref_clk = {
+       .halt_reg = 0x2fd4,
+       .halt_bit = 5,
+       .clkr = {
+               .enable_reg = 0x2710,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "tsif_ref_clk",
+                       .parent_names = (const char *[]){ "tsif_ref_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch tsif_h_clk = {
+       .hwcg_reg = 0x2700,
+       .hwcg_bit = 6,
+       .halt_reg = 0x2fd4,
+       .halt_bit = 7,
+       .clkr = {
+               .enable_reg = 0x2700,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "tsif_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch dma_bam_h_clk = {
+       .hwcg_reg = 0x25c0,
+       .hwcg_bit = 6,
+       .halt_reg = 0x2fc8,
+       .halt_bit = 12,
+       .clkr = {
+               .enable_reg = 0x25c0,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "dma_bam_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch adm0_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_check = BRANCH_HALT_VOTED,
+       .halt_bit = 12,
+       .clkr = {
+               .enable_reg = 0x3080,
+               .enable_mask = BIT(2),
+               .hw.init = &(struct clk_init_data){
+                       .name = "adm0_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch adm0_pbus_clk = {
+       .hwcg_reg = 0x2208,
+       .hwcg_bit = 6,
+       .halt_reg = 0x2fdc,
+       .halt_check = BRANCH_HALT_VOTED,
+       .halt_bit = 11,
+       .clkr = {
+               .enable_reg = 0x3080,
+               .enable_mask = BIT(3),
+               .hw.init = &(struct clk_init_data){
+                       .name = "adm0_pbus_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pmic_arb0_h_clk = {
+       .halt_reg = 0x2fd8,
+       .halt_check = BRANCH_HALT_VOTED,
+       .halt_bit = 22,
+       .clkr = {
+               .enable_reg = 0x3080,
+               .enable_mask = BIT(8),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pmic_arb0_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pmic_arb1_h_clk = {
+       .halt_reg = 0x2fd8,
+       .halt_check = BRANCH_HALT_VOTED,
+       .halt_bit = 21,
+       .clkr = {
+               .enable_reg = 0x3080,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pmic_arb1_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pmic_ssbi2_clk = {
+       .halt_reg = 0x2fd8,
+       .halt_check = BRANCH_HALT_VOTED,
+       .halt_bit = 23,
+       .clkr = {
+               .enable_reg = 0x3080,
+               .enable_mask = BIT(7),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pmic_ssbi2_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch rpm_msg_ram_h_clk = {
+       .hwcg_reg = 0x27e0,
+       .hwcg_bit = 6,
+       .halt_reg = 0x2fd8,
+       .halt_check = BRANCH_HALT_VOTED,
+       .halt_bit = 12,
+       .clkr = {
+               .enable_reg = 0x3080,
+               .enable_mask = BIT(6),
+               .hw.init = &(struct clk_init_data){
+                       .name = "rpm_msg_ram_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_pcie_ref[] = {
+       { 100000000, P_PLL3,  12, 0, 0 },
+       { }
+};
+
+static struct clk_rcg pcie_ref_src = {
+       .ns_reg = 0x3860,
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll3_map,
+       },
+       .freq_tbl = clk_tbl_pcie_ref,
+       .clkr = {
+               .enable_reg = 0x3860,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie_ref_src",
+                       .parent_names = gcc_pxo_pll3,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch pcie_ref_src_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 30,
+       .clkr = {
+               .enable_reg = 0x3860,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie_ref_src_clk",
+                       .parent_names = (const char *[]){ "pcie_ref_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch pcie_a_clk = {
+       .halt_reg = 0x2fc0,
+       .halt_bit = 13,
+       .clkr = {
+               .enable_reg = 0x22c0,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie_a_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pcie_aux_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 31,
+       .clkr = {
+               .enable_reg = 0x22c8,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie_aux_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pcie_h_clk = {
+       .halt_reg = 0x2fd4,
+       .halt_bit = 8,
+       .clkr = {
+               .enable_reg = 0x22cc,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pcie_phy_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 29,
+       .clkr = {
+               .enable_reg = 0x22d0,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie_phy_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_rcg pcie1_ref_src = {
+       .ns_reg = 0x3aa0,
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll3_map,
+       },
+       .freq_tbl = clk_tbl_pcie_ref,
+       .clkr = {
+               .enable_reg = 0x3aa0,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie1_ref_src",
+                       .parent_names = gcc_pxo_pll3,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch pcie1_ref_src_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 27,
+       .clkr = {
+               .enable_reg = 0x3aa0,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie1_ref_src_clk",
+                       .parent_names = (const char *[]){ "pcie1_ref_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch pcie1_a_clk = {
+       .halt_reg = 0x2fc0,
+       .halt_bit = 10,
+       .clkr = {
+               .enable_reg = 0x3a80,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie1_a_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pcie1_aux_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 28,
+       .clkr = {
+               .enable_reg = 0x3a88,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie1_aux_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pcie1_h_clk = {
+       .halt_reg = 0x2fd4,
+       .halt_bit = 9,
+       .clkr = {
+               .enable_reg = 0x3a8c,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie1_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pcie1_phy_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 26,
+       .clkr = {
+               .enable_reg = 0x3a90,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie1_phy_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_rcg pcie2_ref_src = {
+       .ns_reg = 0x3ae0,
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll3_map,
+       },
+       .freq_tbl = clk_tbl_pcie_ref,
+       .clkr = {
+               .enable_reg = 0x3ae0,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie2_ref_src",
+                       .parent_names = gcc_pxo_pll3,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch pcie2_ref_src_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 24,
+       .clkr = {
+               .enable_reg = 0x3ae0,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie2_ref_src_clk",
+                       .parent_names = (const char *[]){ "pcie2_ref_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch pcie2_a_clk = {
+       .halt_reg = 0x2fc0,
+       .halt_bit = 9,
+       .clkr = {
+               .enable_reg = 0x3ac0,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie2_a_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pcie2_aux_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 25,
+       .clkr = {
+               .enable_reg = 0x3ac8,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie2_aux_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pcie2_h_clk = {
+       .halt_reg = 0x2fd4,
+       .halt_bit = 10,
+       .clkr = {
+               .enable_reg = 0x3acc,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie2_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pcie2_phy_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 23,
+       .clkr = {
+               .enable_reg = 0x3ad0,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie2_phy_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_sata_ref[] = {
+       { 100000000, P_PLL3,  12, 0, 0 },
+       { }
+};
+
+static struct clk_rcg sata_ref_src = {
+       .ns_reg = 0x2c08,
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll3_sata_map,
+       },
+       .freq_tbl = clk_tbl_sata_ref,
+       .clkr = {
+               .enable_reg = 0x2c08,
+               .enable_mask = BIT(7),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sata_ref_src",
+                       .parent_names = gcc_pxo_pll3,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch sata_rxoob_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 20,
+       .clkr = {
+               .enable_reg = 0x2c0c,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sata_rxoob_clk",
+                       .parent_names = (const char *[]){ "sata_ref_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch sata_pmalive_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 19,
+       .clkr = {
+               .enable_reg = 0x2c10,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sata_pmalive_clk",
+                       .parent_names = (const char *[]){ "sata_ref_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch sata_phy_ref_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 18,
+       .clkr = {
+               .enable_reg = 0x2c14,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sata_phy_ref_clk",
+                       .parent_names = (const char *[]){ "pxo" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+               },
+       },
+};
+
+static struct clk_branch sata_a_clk = {
+       .halt_reg = 0x2fc0,
+       .halt_bit = 12,
+       .clkr = {
+               .enable_reg = 0x2c20,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sata_a_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch sata_h_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 21,
+       .clkr = {
+               .enable_reg = 0x2c00,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sata_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch sfab_sata_s_h_clk = {
+       .halt_reg = 0x2fc4,
+       .halt_bit = 14,
+       .clkr = {
+               .enable_reg = 0x2480,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sfab_sata_s_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch sata_phy_cfg_clk = {
+       .halt_reg = 0x2fcc,
+       .halt_bit = 14,
+       .clkr = {
+               .enable_reg = 0x2c40,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sata_phy_cfg_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_usb30_master[] = {
+       { 125000000, P_PLL0,  1, 5, 32 },
+       { }
+};
+
+static struct clk_rcg usb30_master_clk_src = {
+       .ns_reg = 0x3b2c,
+       .md_reg = 0x3b28,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll0,
+       },
+       .freq_tbl = clk_tbl_usb30_master,
+       .clkr = {
+               .enable_reg = 0x3b2c,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb30_master_ref_src",
+                       .parent_names = gcc_pxo_pll8_pll0_map,
+                       .num_parents = 3,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch usb30_0_branch_clk = {
+       .halt_reg = 0x2fc4,
+       .halt_bit = 22,
+       .clkr = {
+               .enable_reg = 0x3b24,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb30_0_branch_clk",
+                       .parent_names = (const char *[]){ "usb30_master_ref_src", },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch usb30_1_branch_clk = {
+       .halt_reg = 0x2fc4,
+       .halt_bit = 17,
+       .clkr = {
+               .enable_reg = 0x3b34,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb30_1_branch_clk",
+                       .parent_names = (const char *[]){ "usb30_master_ref_src", },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_usb30_utmi[] = {
+       { 60000000, P_PLL8,  1, 5, 32 },
+       { }
+};
+
+static struct clk_rcg usb30_utmi_clk = {
+       .ns_reg = 0x3b44,
+       .md_reg = 0x3b40,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll0,
+       },
+       .freq_tbl = clk_tbl_usb30_utmi,
+       .clkr = {
+               .enable_reg = 0x3b44,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb30_utmi_clk",
+                       .parent_names = gcc_pxo_pll8_pll0_map,
+                       .num_parents = 3,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch usb30_0_utmi_clk_ctl = {
+       .halt_reg = 0x2fc4,
+       .halt_bit = 21,
+       .clkr = {
+               .enable_reg = 0x3b48,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb30_0_utmi_clk_ctl",
+                       .parent_names = (const char *[]){ "usb30_utmi_clk", },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch usb30_1_utmi_clk_ctl = {
+       .halt_reg = 0x2fc4,
+       .halt_bit = 15,
+       .clkr = {
+               .enable_reg = 0x3b4c,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb30_1_utmi_clk_ctl",
+                       .parent_names = (const char *[]){ "usb30_utmi_clk", },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_usb[] = {
+       { 60000000, P_PLL8,  1, 5, 32 },
+       { }
+};
+
+static struct clk_rcg usb_hs1_xcvr_clk_src = {
+       .ns_reg = 0x290C,
+       .md_reg = 0x2908,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll0,
+       },
+       .freq_tbl = clk_tbl_usb,
+       .clkr = {
+               .enable_reg = 0x2968,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb_hs1_xcvr_src",
+                       .parent_names = gcc_pxo_pll8_pll0_map,
+                       .num_parents = 3,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch usb_hs1_xcvr_clk = {
+       .halt_reg = 0x2fcc,
+       .halt_bit = 17,
+       .clkr = {
+               .enable_reg = 0x290c,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb_hs1_xcvr_clk",
+                       .parent_names = (const char *[]){ "usb_hs1_xcvr_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch usb_hs1_h_clk = {
+       .hwcg_reg = 0x2900,
+       .hwcg_bit = 6,
+       .halt_reg = 0x2fc8,
+       .halt_bit = 1,
+       .clkr = {
+               .enable_reg = 0x2900,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb_hs1_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_rcg usb_fs1_xcvr_clk_src = {
+       .ns_reg = 0x2968,
+       .md_reg = 0x2964,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll0,
+       },
+       .freq_tbl = clk_tbl_usb,
+       .clkr = {
+               .enable_reg = 0x2968,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb_fs1_xcvr_src",
+                       .parent_names = gcc_pxo_pll8_pll0_map,
+                       .num_parents = 3,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch usb_fs1_xcvr_clk = {
+       .halt_reg = 0x2fcc,
+       .halt_bit = 17,
+       .clkr = {
+               .enable_reg = 0x2968,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb_fs1_xcvr_clk",
+                       .parent_names = (const char *[]){ "usb_fs1_xcvr_src", },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch usb_fs1_sys_clk = {
+       .halt_reg = 0x2fcc,
+       .halt_bit = 18,
+       .clkr = {
+               .enable_reg = 0x296c,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb_fs1_sys_clk",
+                       .parent_names = (const char *[]){ "usb_fs1_xcvr_src", },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch usb_fs1_h_clk = {
+       .halt_reg = 0x2fcc,
+       .halt_bit = 19,
+       .clkr = {
+               .enable_reg = 0x2960,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb_fs1_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_regmap *gcc_ipq806x_clks[] = {
+       [PLL3] = &pll3.clkr,
+       [PLL8] = &pll8.clkr,
+       [PLL8_VOTE] = &pll8_vote,
+       [PLL14] = &pll14.clkr,
+       [PLL14_VOTE] = &pll14_vote,
+       [GSBI1_UART_SRC] = &gsbi1_uart_src.clkr,
+       [GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr,
+       [GSBI2_UART_SRC] = &gsbi2_uart_src.clkr,
+       [GSBI2_UART_CLK] = &gsbi2_uart_clk.clkr,
+       [GSBI4_UART_SRC] = &gsbi4_uart_src.clkr,
+       [GSBI4_UART_CLK] = &gsbi4_uart_clk.clkr,
+       [GSBI5_UART_SRC] = &gsbi5_uart_src.clkr,
+       [GSBI5_UART_CLK] = &gsbi5_uart_clk.clkr,
+       [GSBI6_UART_SRC] = &gsbi6_uart_src.clkr,
+       [GSBI6_UART_CLK] = &gsbi6_uart_clk.clkr,
+       [GSBI7_UART_SRC] = &gsbi7_uart_src.clkr,
+       [GSBI7_UART_CLK] = &gsbi7_uart_clk.clkr,
+       [GSBI1_QUP_SRC] = &gsbi1_qup_src.clkr,
+       [GSBI1_QUP_CLK] = &gsbi1_qup_clk.clkr,
+       [GSBI2_QUP_SRC] = &gsbi2_qup_src.clkr,
+       [GSBI2_QUP_CLK] = &gsbi2_qup_clk.clkr,
+       [GSBI4_QUP_SRC] = &gsbi4_qup_src.clkr,
+       [GSBI4_QUP_CLK] = &gsbi4_qup_clk.clkr,
+       [GSBI5_QUP_SRC] = &gsbi5_qup_src.clkr,
+       [GSBI5_QUP_CLK] = &gsbi5_qup_clk.clkr,
+       [GSBI6_QUP_SRC] = &gsbi6_qup_src.clkr,
+       [GSBI6_QUP_CLK] = &gsbi6_qup_clk.clkr,
+       [GSBI7_QUP_SRC] = &gsbi7_qup_src.clkr,
+       [GSBI7_QUP_CLK] = &gsbi7_qup_clk.clkr,
+       [GP0_SRC] = &gp0_src.clkr,
+       [GP0_CLK] = &gp0_clk.clkr,
+       [GP1_SRC] = &gp1_src.clkr,
+       [GP1_CLK] = &gp1_clk.clkr,
+       [GP2_SRC] = &gp2_src.clkr,
+       [GP2_CLK] = &gp2_clk.clkr,
+       [PMEM_A_CLK] = &pmem_clk.clkr,
+       [PRNG_SRC] = &prng_src.clkr,
+       [PRNG_CLK] = &prng_clk.clkr,
+       [SDC1_SRC] = &sdc1_src.clkr,
+       [SDC1_CLK] = &sdc1_clk.clkr,
+       [SDC3_SRC] = &sdc3_src.clkr,
+       [SDC3_CLK] = &sdc3_clk.clkr,
+       [TSIF_REF_SRC] = &tsif_ref_src.clkr,
+       [TSIF_REF_CLK] = &tsif_ref_clk.clkr,
+       [DMA_BAM_H_CLK] = &dma_bam_h_clk.clkr,
+       [GSBI1_H_CLK] = &gsbi1_h_clk.clkr,
+       [GSBI2_H_CLK] = &gsbi2_h_clk.clkr,
+       [GSBI4_H_CLK] = &gsbi4_h_clk.clkr,
+       [GSBI5_H_CLK] = &gsbi5_h_clk.clkr,
+       [GSBI6_H_CLK] = &gsbi6_h_clk.clkr,
+       [GSBI7_H_CLK] = &gsbi7_h_clk.clkr,
+       [TSIF_H_CLK] = &tsif_h_clk.clkr,
+       [SDC1_H_CLK] = &sdc1_h_clk.clkr,
+       [SDC3_H_CLK] = &sdc3_h_clk.clkr,
+       [ADM0_CLK] = &adm0_clk.clkr,
+       [ADM0_PBUS_CLK] = &adm0_pbus_clk.clkr,
+       [PCIE_A_CLK] = &pcie_a_clk.clkr,
+       [PCIE_AUX_CLK] = &pcie_aux_clk.clkr,
+       [PCIE_H_CLK] = &pcie_h_clk.clkr,
+       [PCIE_PHY_CLK] = &pcie_phy_clk.clkr,
+       [SFAB_SATA_S_H_CLK] = &sfab_sata_s_h_clk.clkr,
+       [PMIC_ARB0_H_CLK] = &pmic_arb0_h_clk.clkr,
+       [PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
+       [PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
+       [RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+       [SATA_H_CLK] = &sata_h_clk.clkr,
+       [SATA_CLK_SRC] = &sata_ref_src.clkr,
+       [SATA_RXOOB_CLK] = &sata_rxoob_clk.clkr,
+       [SATA_PMALIVE_CLK] = &sata_pmalive_clk.clkr,
+       [SATA_PHY_REF_CLK] = &sata_phy_ref_clk.clkr,
+       [SATA_A_CLK] = &sata_a_clk.clkr,
+       [SATA_PHY_CFG_CLK] = &sata_phy_cfg_clk.clkr,
+       [PCIE_ALT_REF_SRC] = &pcie_ref_src.clkr,
+       [PCIE_ALT_REF_CLK] = &pcie_ref_src_clk.clkr,
+       [PCIE_1_A_CLK] = &pcie1_a_clk.clkr,
+       [PCIE_1_AUX_CLK] = &pcie1_aux_clk.clkr,
+       [PCIE_1_H_CLK] = &pcie1_h_clk.clkr,
+       [PCIE_1_PHY_CLK] = &pcie1_phy_clk.clkr,
+       [PCIE_1_ALT_REF_SRC] = &pcie1_ref_src.clkr,
+       [PCIE_1_ALT_REF_CLK] = &pcie1_ref_src_clk.clkr,
+       [PCIE_2_A_CLK] = &pcie2_a_clk.clkr,
+       [PCIE_2_AUX_CLK] = &pcie2_aux_clk.clkr,
+       [PCIE_2_H_CLK] = &pcie2_h_clk.clkr,
+       [PCIE_2_PHY_CLK] = &pcie2_phy_clk.clkr,
+       [PCIE_2_ALT_REF_SRC] = &pcie2_ref_src.clkr,
+       [PCIE_2_ALT_REF_CLK] = &pcie2_ref_src_clk.clkr,
+       [USB30_MASTER_SRC] = &usb30_master_clk_src.clkr,
+       [USB30_0_MASTER_CLK] = &usb30_0_branch_clk.clkr,
+       [USB30_1_MASTER_CLK] = &usb30_1_branch_clk.clkr,
+       [USB30_UTMI_SRC] = &usb30_utmi_clk.clkr,
+       [USB30_0_UTMI_CLK] = &usb30_0_utmi_clk_ctl.clkr,
+       [USB30_1_UTMI_CLK] = &usb30_1_utmi_clk_ctl.clkr,
+       [USB_HS1_H_CLK] = &usb_hs1_h_clk.clkr,
+       [USB_HS1_XCVR_SRC] = &usb_hs1_xcvr_clk_src.clkr,
+       [USB_HS1_XCVR_CLK] = &usb_hs1_xcvr_clk.clkr,
+       [USB_FS1_H_CLK] = &usb_fs1_h_clk.clkr,
+       [USB_FS1_XCVR_SRC] = &usb_fs1_xcvr_clk_src.clkr,
+       [USB_FS1_XCVR_CLK] = &usb_fs1_xcvr_clk.clkr,
+       [USB_FS1_SYSTEM_CLK] = &usb_fs1_sys_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_ipq806x_resets[] = {
+       [QDSS_STM_RESET] = { 0x2060, 6 },
+       [AFAB_SMPSS_S_RESET] = { 0x20b8, 2 },
+       [AFAB_SMPSS_M1_RESET] = { 0x20b8, 1 },
+       [AFAB_SMPSS_M0_RESET] = { 0x20b8, 0 },
+       [AFAB_EBI1_CH0_RESET] = { 0x20c0, 7 },
+       [AFAB_EBI1_CH1_RESET] = { 0x20c4, 7 },
+       [SFAB_ADM0_M0_RESET] = { 0x21e0, 7 },
+       [SFAB_ADM0_M1_RESET] = { 0x21e4, 7 },
+       [SFAB_ADM0_M2_RESET] = { 0x21e8, 7 },
+       [ADM0_C2_RESET] = { 0x220c, 4 },
+       [ADM0_C1_RESET] = { 0x220c, 3 },
+       [ADM0_C0_RESET] = { 0x220c, 2 },
+       [ADM0_PBUS_RESET] = { 0x220c, 1 },
+       [ADM0_RESET] = { 0x220c, 0 },
+       [QDSS_CLKS_SW_RESET] = { 0x2260, 5 },
+       [QDSS_POR_RESET] = { 0x2260, 4 },
+       [QDSS_TSCTR_RESET] = { 0x2260, 3 },
+       [QDSS_HRESET_RESET] = { 0x2260, 2 },
+       [QDSS_AXI_RESET] = { 0x2260, 1 },
+       [QDSS_DBG_RESET] = { 0x2260, 0 },
+       [SFAB_PCIE_M_RESET] = { 0x22d8, 1 },
+       [SFAB_PCIE_S_RESET] = { 0x22d8, 0 },
+       [PCIE_EXT_RESET] = { 0x22dc, 6 },
+       [PCIE_PHY_RESET] = { 0x22dc, 5 },
+       [PCIE_PCI_RESET] = { 0x22dc, 4 },
+       [PCIE_POR_RESET] = { 0x22dc, 3 },
+       [PCIE_HCLK_RESET] = { 0x22dc, 2 },
+       [PCIE_ACLK_RESET] = { 0x22dc, 0 },
+       [SFAB_LPASS_RESET] = { 0x23a0, 7 },
+       [SFAB_AFAB_M_RESET] = { 0x23e0, 7 },
+       [AFAB_SFAB_M0_RESET] = { 0x2420, 7 },
+       [AFAB_SFAB_M1_RESET] = { 0x2424, 7 },
+       [SFAB_SATA_S_RESET] = { 0x2480, 7 },
+       [SFAB_DFAB_M_RESET] = { 0x2500, 7 },
+       [DFAB_SFAB_M_RESET] = { 0x2520, 7 },
+       [DFAB_SWAY0_RESET] = { 0x2540, 7 },
+       [DFAB_SWAY1_RESET] = { 0x2544, 7 },
+       [DFAB_ARB0_RESET] = { 0x2560, 7 },
+       [DFAB_ARB1_RESET] = { 0x2564, 7 },
+       [PPSS_PROC_RESET] = { 0x2594, 1 },
+       [PPSS_RESET] = { 0x2594, 0 },
+       [DMA_BAM_RESET] = { 0x25c0, 7 },
+       [SPS_TIC_H_RESET] = { 0x2600, 7 },
+       [SFAB_CFPB_M_RESET] = { 0x2680, 7 },
+       [SFAB_CFPB_S_RESET] = { 0x26c0, 7 },
+       [TSIF_H_RESET] = { 0x2700, 7 },
+       [CE1_H_RESET] = { 0x2720, 7 },
+       [CE1_CORE_RESET] = { 0x2724, 7 },
+       [CE1_SLEEP_RESET] = { 0x2728, 7 },
+       [CE2_H_RESET] = { 0x2740, 7 },
+       [CE2_CORE_RESET] = { 0x2744, 7 },
+       [SFAB_SFPB_M_RESET] = { 0x2780, 7 },
+       [SFAB_SFPB_S_RESET] = { 0x27a0, 7 },
+       [RPM_PROC_RESET] = { 0x27c0, 7 },
+       [PMIC_SSBI2_RESET] = { 0x280c, 12 },
+       [SDC1_RESET] = { 0x2830, 0 },
+       [SDC2_RESET] = { 0x2850, 0 },
+       [SDC3_RESET] = { 0x2870, 0 },
+       [SDC4_RESET] = { 0x2890, 0 },
+       [USB_HS1_RESET] = { 0x2910, 0 },
+       [USB_HSIC_RESET] = { 0x2934, 0 },
+       [USB_FS1_XCVR_RESET] = { 0x2974, 1 },
+       [USB_FS1_RESET] = { 0x2974, 0 },
+       [GSBI1_RESET] = { 0x29dc, 0 },
+       [GSBI2_RESET] = { 0x29fc, 0 },
+       [GSBI3_RESET] = { 0x2a1c, 0 },
+       [GSBI4_RESET] = { 0x2a3c, 0 },
+       [GSBI5_RESET] = { 0x2a5c, 0 },
+       [GSBI6_RESET] = { 0x2a7c, 0 },
+       [GSBI7_RESET] = { 0x2a9c, 0 },
+       [SPDM_RESET] = { 0x2b6c, 0 },
+       [SEC_CTRL_RESET] = { 0x2b80, 7 },
+       [TLMM_H_RESET] = { 0x2ba0, 7 },
+       [SFAB_SATA_M_RESET] = { 0x2c18, 0 },
+       [SATA_RESET] = { 0x2c1c, 0 },
+       [TSSC_RESET] = { 0x2ca0, 7 },
+       [PDM_RESET] = { 0x2cc0, 12 },
+       [MPM_H_RESET] = { 0x2da0, 7 },
+       [MPM_RESET] = { 0x2da4, 0 },
+       [SFAB_SMPSS_S_RESET] = { 0x2e00, 7 },
+       [PRNG_RESET] = { 0x2e80, 12 },
+       [SFAB_CE3_M_RESET] = { 0x36c8, 1 },
+       [SFAB_CE3_S_RESET] = { 0x36c8, 0 },
+       [CE3_SLEEP_RESET] = { 0x36d0, 7 },
+       [PCIE_1_M_RESET] = { 0x3a98, 1 },
+       [PCIE_1_S_RESET] = { 0x3a98, 0 },
+       [PCIE_1_EXT_RESET] = { 0x3a9c, 6 },
+       [PCIE_1_PHY_RESET] = { 0x3a9c, 5 },
+       [PCIE_1_PCI_RESET] = { 0x3a9c, 4 },
+       [PCIE_1_POR_RESET] = { 0x3a9c, 3 },
+       [PCIE_1_HCLK_RESET] = { 0x3a9c, 2 },
+       [PCIE_1_ACLK_RESET] = { 0x3a9c, 0 },
+       [PCIE_2_M_RESET] = { 0x3ad8, 1 },
+       [PCIE_2_S_RESET] = { 0x3ad8, 0 },
+       [PCIE_2_EXT_RESET] = { 0x3adc, 6 },
+       [PCIE_2_PHY_RESET] = { 0x3adc, 5 },
+       [PCIE_2_PCI_RESET] = { 0x3adc, 4 },
+       [PCIE_2_POR_RESET] = { 0x3adc, 3 },
+       [PCIE_2_HCLK_RESET] = { 0x3adc, 2 },
+       [PCIE_2_ACLK_RESET] = { 0x3adc, 0 },
+       [SFAB_USB30_S_RESET] = { 0x3b54, 1 },
+       [SFAB_USB30_M_RESET] = { 0x3b54, 0 },
+       [USB30_0_PORT2_HS_PHY_RESET] = { 0x3b50, 5 },
+       [USB30_0_MASTER_RESET] = { 0x3b50, 4 },
+       [USB30_0_SLEEP_RESET] = { 0x3b50, 3 },
+       [USB30_0_UTMI_PHY_RESET] = { 0x3b50, 2 },
+       [USB30_0_POWERON_RESET] = { 0x3b50, 1 },
+       [USB30_0_PHY_RESET] = { 0x3b50, 0 },
+       [USB30_1_MASTER_RESET] = { 0x3b58, 4 },
+       [USB30_1_SLEEP_RESET] = { 0x3b58, 3 },
+       [USB30_1_UTMI_PHY_RESET] = { 0x3b58, 2 },
+       [USB30_1_POWERON_RESET] = { 0x3b58, 1 },
+       [USB30_1_PHY_RESET] = { 0x3b58, 0 },
+       [NSSFB0_RESET] = { 0x3b60, 6 },
+       [NSSFB1_RESET] = { 0x3b60, 7 },
+};
+
+static const struct regmap_config gcc_ipq806x_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x3e40,
+       .fast_io        = true,
+};
+
+static const struct qcom_cc_desc gcc_ipq806x_desc = {
+       .config = &gcc_ipq806x_regmap_config,
+       .clks = gcc_ipq806x_clks,
+       .num_clks = ARRAY_SIZE(gcc_ipq806x_clks),
+       .resets = gcc_ipq806x_resets,
+       .num_resets = ARRAY_SIZE(gcc_ipq806x_resets),
+};
+
+static const struct of_device_id gcc_ipq806x_match_table[] = {
+       { .compatible = "qcom,gcc-ipq8064" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, gcc_ipq806x_match_table);
+
+static int gcc_ipq806x_probe(struct platform_device *pdev)
+{
+       struct clk *clk;
+       struct device *dev = &pdev->dev;
+
+       /* Temporary until RPM clocks supported */
+       clk = clk_register_fixed_rate(dev, "cxo", NULL, CLK_IS_ROOT, 25000000);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       clk = clk_register_fixed_rate(dev, "pxo", NULL, CLK_IS_ROOT, 25000000);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       return qcom_cc_probe(pdev, &gcc_ipq806x_desc);
+}
+
+static int gcc_ipq806x_remove(struct platform_device *pdev)
+{
+       qcom_cc_remove(pdev);
+       return 0;
+}
+
+static struct platform_driver gcc_ipq806x_driver = {
+       .probe          = gcc_ipq806x_probe,
+       .remove         = gcc_ipq806x_remove,
+       .driver         = {
+               .name   = "gcc-ipq806x",
+               .owner  = THIS_MODULE,
+               .of_match_table = gcc_ipq806x_match_table,
+       },
+};
+
+static int __init gcc_ipq806x_init(void)
+{
+       return platform_driver_register(&gcc_ipq806x_driver);
+}
+core_initcall(gcc_ipq806x_init);
+
+static void __exit gcc_ipq806x_exit(void)
+{
+       platform_driver_unregister(&gcc_ipq806x_driver);
+}
+module_exit(gcc_ipq806x_exit);
+
+MODULE_DESCRIPTION("QCOM GCC IPQ806x Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:gcc-ipq806x");
index f4ffd91..007534f 100644 (file)
@@ -104,6 +104,7 @@ static struct clk_regmap pll14_vote = {
 
 #define P_PXO  0
 #define P_PLL8 1
+#define P_PLL3 2
 #define P_CXO  2
 
 static const u8 gcc_pxo_pll8_map[] = {
@@ -128,6 +129,18 @@ static const char *gcc_pxo_pll8_cxo[] = {
        "cxo",
 };
 
+static const u8 gcc_pxo_pll8_pll3_map[] = {
+       [P_PXO]         = 0,
+       [P_PLL8]        = 3,
+       [P_PLL3]        = 6,
+};
+
+static const char *gcc_pxo_pll8_pll3[] = {
+       "pxo",
+       "pll8_vote",
+       "pll3",
+};
+
 static struct freq_tbl clk_tbl_gsbi_uart[] = {
        {  1843200, P_PLL8, 2,  6, 625 },
        {  3686400, P_PLL8, 2, 12, 625 },
@@ -1928,6 +1941,104 @@ static struct clk_branch usb_hs1_xcvr_clk = {
        },
 };
 
+static struct clk_rcg usb_hs3_xcvr_src = {
+       .ns_reg = 0x370c,
+       .md_reg = 0x3708,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_usb,
+       .clkr = {
+               .enable_reg = 0x370c,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb_hs3_xcvr_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       }
+};
+
+static struct clk_branch usb_hs3_xcvr_clk = {
+       .halt_reg = 0x2fc8,
+       .halt_bit = 30,
+       .clkr = {
+               .enable_reg = 0x370c,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb_hs3_xcvr_clk",
+                       .parent_names = (const char *[]){ "usb_hs3_xcvr_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg usb_hs4_xcvr_src = {
+       .ns_reg = 0x372c,
+       .md_reg = 0x3728,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_map,
+       },
+       .freq_tbl = clk_tbl_usb,
+       .clkr = {
+               .enable_reg = 0x372c,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb_hs4_xcvr_src",
+                       .parent_names = gcc_pxo_pll8,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       }
+};
+
+static struct clk_branch usb_hs4_xcvr_clk = {
+       .halt_reg = 0x2fc8,
+       .halt_bit = 2,
+       .clkr = {
+               .enable_reg = 0x372c,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb_hs4_xcvr_clk",
+                       .parent_names = (const char *[]){ "usb_hs4_xcvr_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
 static struct clk_rcg usb_hsic_xcvr_fs_src = {
        .ns_reg = 0x2928,
        .md_reg = 0x2924,
@@ -2456,6 +2567,34 @@ static struct clk_branch usb_hs1_h_clk = {
        },
 };
 
+static struct clk_branch usb_hs3_h_clk = {
+       .halt_reg = 0x2fc8,
+       .halt_bit = 31,
+       .clkr = {
+               .enable_reg = 0x3700,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb_hs3_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch usb_hs4_h_clk = {
+       .halt_reg = 0x2fc8,
+       .halt_bit = 7,
+       .clkr = {
+               .enable_reg = 0x3720,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "usb_hs4_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
 static struct clk_branch usb_hsic_h_clk = {
        .halt_reg = 0x2fcc,
        .halt_bit = 28,
@@ -2582,6 +2721,244 @@ static struct clk_branch adm0_pbus_clk = {
        },
 };
 
+static struct freq_tbl clk_tbl_ce3[] = {
+       { 48000000, P_PLL8, 8 },
+       { 100000000, P_PLL3, 12 },
+       { 120000000, P_PLL3, 10 },
+       { }
+};
+
+static struct clk_rcg ce3_src = {
+       .ns_reg = 0x36c0,
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll3_map,
+       },
+       .freq_tbl = clk_tbl_ce3,
+       .clkr = {
+               .enable_reg = 0x2c08,
+               .enable_mask = BIT(7),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ce3_src",
+                       .parent_names = gcc_pxo_pll8_pll3,
+                       .num_parents = 3,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch ce3_core_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 5,
+       .clkr = {
+               .enable_reg = 0x36c4,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ce3_core_clk",
+                       .parent_names = (const char *[]){ "ce3_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch ce3_h_clk = {
+       .halt_reg = 0x2fc4,
+       .halt_bit = 16,
+       .clkr = {
+               .enable_reg = 0x36c4,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ce3_h_clk",
+                       .parent_names = (const char *[]){ "ce3_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static const struct freq_tbl clk_tbl_sata_ref[] = {
+       { 48000000, P_PLL8, 8, 0, 0 },
+       { 100000000, P_PLL3, 12, 0, 0 },
+       { }
+};
+
+static struct clk_rcg sata_clk_src = {
+       .ns_reg = 0x2c08,
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 4,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = gcc_pxo_pll8_pll3_map,
+       },
+       .freq_tbl = clk_tbl_sata_ref,
+       .clkr = {
+               .enable_reg = 0x2c08,
+               .enable_mask = BIT(7),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sata_clk_src",
+                       .parent_names = gcc_pxo_pll8_pll3,
+                       .num_parents = 3,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch sata_rxoob_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 26,
+       .clkr = {
+               .enable_reg = 0x2c0c,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sata_rxoob_clk",
+                       .parent_names = (const char *[]){ "sata_clk_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch sata_pmalive_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 25,
+       .clkr = {
+               .enable_reg = 0x2c10,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sata_pmalive_clk",
+                       .parent_names = (const char *[]){ "sata_clk_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch sata_phy_ref_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 24,
+       .clkr = {
+               .enable_reg = 0x2c14,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sata_phy_ref_clk",
+                       .parent_names = (const char *[]){ "pxo" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+               },
+       },
+};
+
+static struct clk_branch sata_a_clk = {
+       .halt_reg = 0x2fc0,
+       .halt_bit = 12,
+       .clkr = {
+               .enable_reg = 0x2c20,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sata_a_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch sata_h_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 27,
+       .clkr = {
+               .enable_reg = 0x2c00,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sata_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch sfab_sata_s_h_clk = {
+       .halt_reg = 0x2fc4,
+       .halt_bit = 14,
+       .clkr = {
+               .enable_reg = 0x2480,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sfab_sata_s_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch sata_phy_cfg_clk = {
+       .halt_reg = 0x2fcc,
+       .halt_bit = 12,
+       .clkr = {
+               .enable_reg = 0x2c40,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sata_phy_cfg_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pcie_phy_ref_clk = {
+       .halt_reg = 0x2fdc,
+       .halt_bit = 29,
+       .clkr = {
+               .enable_reg = 0x22d0,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie_phy_ref_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pcie_h_clk = {
+       .halt_reg = 0x2fd4,
+       .halt_bit = 8,
+       .clkr = {
+               .enable_reg = 0x22cc,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie_h_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
+static struct clk_branch pcie_a_clk = {
+       .halt_reg = 0x2fc0,
+       .halt_bit = 13,
+       .clkr = {
+               .enable_reg = 0x22c0,
+               .enable_mask = BIT(4),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcie_a_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
 static struct clk_branch pmic_arb0_h_clk = {
        .halt_reg = 0x2fd8,
        .halt_check = BRANCH_HALT_VOTED,
@@ -2869,13 +3246,205 @@ static const struct qcom_reset_map gcc_msm8960_resets[] = {
 };
 
 static struct clk_regmap *gcc_apq8064_clks[] = {
+       [PLL3] = &pll3.clkr,
        [PLL8] = &pll8.clkr,
        [PLL8_VOTE] = &pll8_vote,
+       [PLL14] = &pll14.clkr,
+       [PLL14_VOTE] = &pll14_vote,
+       [GSBI1_UART_SRC] = &gsbi1_uart_src.clkr,
+       [GSBI1_UART_CLK] = &gsbi1_uart_clk.clkr,
+       [GSBI2_UART_SRC] = &gsbi2_uart_src.clkr,
+       [GSBI2_UART_CLK] = &gsbi2_uart_clk.clkr,
+       [GSBI3_UART_SRC] = &gsbi3_uart_src.clkr,
+       [GSBI3_UART_CLK] = &gsbi3_uart_clk.clkr,
+       [GSBI4_UART_SRC] = &gsbi4_uart_src.clkr,
+       [GSBI4_UART_CLK] = &gsbi4_uart_clk.clkr,
+       [GSBI5_UART_SRC] = &gsbi5_uart_src.clkr,
+       [GSBI5_UART_CLK] = &gsbi5_uart_clk.clkr,
+       [GSBI6_UART_SRC] = &gsbi6_uart_src.clkr,
+       [GSBI6_UART_CLK] = &gsbi6_uart_clk.clkr,
        [GSBI7_UART_SRC] = &gsbi7_uart_src.clkr,
        [GSBI7_UART_CLK] = &gsbi7_uart_clk.clkr,
+       [GSBI1_QUP_SRC] = &gsbi1_qup_src.clkr,
+       [GSBI1_QUP_CLK] = &gsbi1_qup_clk.clkr,
+       [GSBI2_QUP_SRC] = &gsbi2_qup_src.clkr,
+       [GSBI2_QUP_CLK] = &gsbi2_qup_clk.clkr,
+       [GSBI3_QUP_SRC] = &gsbi3_qup_src.clkr,
+       [GSBI3_QUP_CLK] = &gsbi3_qup_clk.clkr,
+       [GSBI4_QUP_SRC] = &gsbi4_qup_src.clkr,
+       [GSBI4_QUP_CLK] = &gsbi4_qup_clk.clkr,
+       [GSBI5_QUP_SRC] = &gsbi5_qup_src.clkr,
+       [GSBI5_QUP_CLK] = &gsbi5_qup_clk.clkr,
+       [GSBI6_QUP_SRC] = &gsbi6_qup_src.clkr,
+       [GSBI6_QUP_CLK] = &gsbi6_qup_clk.clkr,
        [GSBI7_QUP_SRC] = &gsbi7_qup_src.clkr,
        [GSBI7_QUP_CLK] = &gsbi7_qup_clk.clkr,
+       [GP0_SRC] = &gp0_src.clkr,
+       [GP0_CLK] = &gp0_clk.clkr,
+       [GP1_SRC] = &gp1_src.clkr,
+       [GP1_CLK] = &gp1_clk.clkr,
+       [GP2_SRC] = &gp2_src.clkr,
+       [GP2_CLK] = &gp2_clk.clkr,
+       [PMEM_A_CLK] = &pmem_clk.clkr,
+       [PRNG_SRC] = &prng_src.clkr,
+       [PRNG_CLK] = &prng_clk.clkr,
+       [SDC1_SRC] = &sdc1_src.clkr,
+       [SDC1_CLK] = &sdc1_clk.clkr,
+       [SDC2_SRC] = &sdc2_src.clkr,
+       [SDC2_CLK] = &sdc2_clk.clkr,
+       [SDC3_SRC] = &sdc3_src.clkr,
+       [SDC3_CLK] = &sdc3_clk.clkr,
+       [SDC4_SRC] = &sdc4_src.clkr,
+       [SDC4_CLK] = &sdc4_clk.clkr,
+       [TSIF_REF_SRC] = &tsif_ref_src.clkr,
+       [TSIF_REF_CLK] = &tsif_ref_clk.clkr,
+       [USB_HS1_XCVR_SRC] = &usb_hs1_xcvr_src.clkr,
+       [USB_HS1_XCVR_CLK] = &usb_hs1_xcvr_clk.clkr,
+       [USB_HS3_XCVR_SRC] = &usb_hs3_xcvr_src.clkr,
+       [USB_HS3_XCVR_CLK] = &usb_hs3_xcvr_clk.clkr,
+       [USB_HS4_XCVR_SRC] = &usb_hs4_xcvr_src.clkr,
+       [USB_HS4_XCVR_CLK] = &usb_hs4_xcvr_clk.clkr,
+       [USB_HSIC_XCVR_FS_SRC] = &usb_hsic_xcvr_fs_src.clkr,
+       [USB_HSIC_XCVR_FS_CLK] = &usb_hsic_xcvr_fs_clk.clkr,
+       [USB_HSIC_SYSTEM_CLK] = &usb_hsic_system_clk.clkr,
+       [USB_HSIC_HSIC_CLK] = &usb_hsic_hsic_clk.clkr,
+       [USB_HSIC_HSIO_CAL_CLK] = &usb_hsic_hsio_cal_clk.clkr,
+       [USB_FS1_XCVR_FS_SRC] = &usb_fs1_xcvr_fs_src.clkr,
+       [USB_FS1_XCVR_FS_CLK] = &usb_fs1_xcvr_fs_clk.clkr,
+       [USB_FS1_SYSTEM_CLK] = &usb_fs1_system_clk.clkr,
+       [SATA_H_CLK] = &sata_h_clk.clkr,
+       [SATA_CLK_SRC] = &sata_clk_src.clkr,
+       [SATA_RXOOB_CLK] = &sata_rxoob_clk.clkr,
+       [SATA_PMALIVE_CLK] = &sata_pmalive_clk.clkr,
+       [SATA_PHY_REF_CLK] = &sata_phy_ref_clk.clkr,
+       [SATA_PHY_CFG_CLK] = &sata_phy_cfg_clk.clkr,
+       [SATA_A_CLK] = &sata_a_clk.clkr,
+       [SFAB_SATA_S_H_CLK] = &sfab_sata_s_h_clk.clkr,
+       [CE3_SRC] = &ce3_src.clkr,
+       [CE3_CORE_CLK] = &ce3_core_clk.clkr,
+       [CE3_H_CLK] = &ce3_h_clk.clkr,
+       [DMA_BAM_H_CLK] = &dma_bam_h_clk.clkr,
+       [GSBI1_H_CLK] = &gsbi1_h_clk.clkr,
+       [GSBI2_H_CLK] = &gsbi2_h_clk.clkr,
+       [GSBI3_H_CLK] = &gsbi3_h_clk.clkr,
+       [GSBI4_H_CLK] = &gsbi4_h_clk.clkr,
+       [GSBI5_H_CLK] = &gsbi5_h_clk.clkr,
+       [GSBI6_H_CLK] = &gsbi6_h_clk.clkr,
        [GSBI7_H_CLK] = &gsbi7_h_clk.clkr,
+       [TSIF_H_CLK] = &tsif_h_clk.clkr,
+       [USB_FS1_H_CLK] = &usb_fs1_h_clk.clkr,
+       [USB_HS1_H_CLK] = &usb_hs1_h_clk.clkr,
+       [USB_HSIC_H_CLK] = &usb_hsic_h_clk.clkr,
+       [USB_HS3_H_CLK] = &usb_hs3_h_clk.clkr,
+       [USB_HS4_H_CLK] = &usb_hs4_h_clk.clkr,
+       [SDC1_H_CLK] = &sdc1_h_clk.clkr,
+       [SDC2_H_CLK] = &sdc2_h_clk.clkr,
+       [SDC3_H_CLK] = &sdc3_h_clk.clkr,
+       [SDC4_H_CLK] = &sdc4_h_clk.clkr,
+       [ADM0_CLK] = &adm0_clk.clkr,
+       [ADM0_PBUS_CLK] = &adm0_pbus_clk.clkr,
+       [PCIE_A_CLK] = &pcie_a_clk.clkr,
+       [PCIE_PHY_REF_CLK] = &pcie_phy_ref_clk.clkr,
+       [PCIE_H_CLK] = &pcie_h_clk.clkr,
+       [PMIC_ARB0_H_CLK] = &pmic_arb0_h_clk.clkr,
+       [PMIC_ARB1_H_CLK] = &pmic_arb1_h_clk.clkr,
+       [PMIC_SSBI2_CLK] = &pmic_ssbi2_clk.clkr,
+       [RPM_MSG_RAM_H_CLK] = &rpm_msg_ram_h_clk.clkr,
+};
+
+static const struct qcom_reset_map gcc_apq8064_resets[] = {
+       [QDSS_STM_RESET] = { 0x2060, 6 },
+       [AFAB_SMPSS_S_RESET] = { 0x20b8, 2 },
+       [AFAB_SMPSS_M1_RESET] = { 0x20b8, 1 },
+       [AFAB_SMPSS_M0_RESET] = { 0x20b8 },
+       [AFAB_EBI1_CH0_RESET] = { 0x20c0, 7 },
+       [AFAB_EBI1_CH1_RESET] = { 0x20c4, 7},
+       [SFAB_ADM0_M0_RESET] = { 0x21e0, 7 },
+       [SFAB_ADM0_M1_RESET] = { 0x21e4, 7 },
+       [SFAB_ADM0_M2_RESET] = { 0x21e8, 7 },
+       [ADM0_C2_RESET] = { 0x220c, 4},
+       [ADM0_C1_RESET] = { 0x220c, 3},
+       [ADM0_C0_RESET] = { 0x220c, 2},
+       [ADM0_PBUS_RESET] = { 0x220c, 1 },
+       [ADM0_RESET] = { 0x220c },
+       [QDSS_CLKS_SW_RESET] = { 0x2260, 5 },
+       [QDSS_POR_RESET] = { 0x2260, 4 },
+       [QDSS_TSCTR_RESET] = { 0x2260, 3 },
+       [QDSS_HRESET_RESET] = { 0x2260, 2 },
+       [QDSS_AXI_RESET] = { 0x2260, 1 },
+       [QDSS_DBG_RESET] = { 0x2260 },
+       [SFAB_PCIE_M_RESET] = { 0x22d8, 1 },
+       [SFAB_PCIE_S_RESET] = { 0x22d8 },
+       [PCIE_EXT_PCI_RESET] = { 0x22dc, 6 },
+       [PCIE_PHY_RESET] = { 0x22dc, 5 },
+       [PCIE_PCI_RESET] = { 0x22dc, 4 },
+       [PCIE_POR_RESET] = { 0x22dc, 3 },
+       [PCIE_HCLK_RESET] = { 0x22dc, 2 },
+       [PCIE_ACLK_RESET] = { 0x22dc },
+       [SFAB_USB3_M_RESET] = { 0x2360, 7 },
+       [SFAB_RIVA_M_RESET] = { 0x2380, 7 },
+       [SFAB_LPASS_RESET] = { 0x23a0, 7 },
+       [SFAB_AFAB_M_RESET] = { 0x23e0, 7 },
+       [AFAB_SFAB_M0_RESET] = { 0x2420, 7 },
+       [AFAB_SFAB_M1_RESET] = { 0x2424, 7 },
+       [SFAB_SATA_S_RESET] = { 0x2480, 7 },
+       [SFAB_DFAB_M_RESET] = { 0x2500, 7 },
+       [DFAB_SFAB_M_RESET] = { 0x2520, 7 },
+       [DFAB_SWAY0_RESET] = { 0x2540, 7 },
+       [DFAB_SWAY1_RESET] = { 0x2544, 7 },
+       [DFAB_ARB0_RESET] = { 0x2560, 7 },
+       [DFAB_ARB1_RESET] = { 0x2564, 7 },
+       [PPSS_PROC_RESET] = { 0x2594, 1 },
+       [PPSS_RESET] = { 0x2594},
+       [DMA_BAM_RESET] = { 0x25c0, 7 },
+       [SPS_TIC_H_RESET] = { 0x2600, 7 },
+       [SFAB_CFPB_M_RESET] = { 0x2680, 7 },
+       [SFAB_CFPB_S_RESET] = { 0x26c0, 7 },
+       [TSIF_H_RESET] = { 0x2700, 7 },
+       [CE1_H_RESET] = { 0x2720, 7 },
+       [CE1_CORE_RESET] = { 0x2724, 7 },
+       [CE1_SLEEP_RESET] = { 0x2728, 7 },
+       [CE2_H_RESET] = { 0x2740, 7 },
+       [CE2_CORE_RESET] = { 0x2744, 7 },
+       [SFAB_SFPB_M_RESET] = { 0x2780, 7 },
+       [SFAB_SFPB_S_RESET] = { 0x27a0, 7 },
+       [RPM_PROC_RESET] = { 0x27c0, 7 },
+       [PMIC_SSBI2_RESET] = { 0x280c, 12 },
+       [SDC1_RESET] = { 0x2830 },
+       [SDC2_RESET] = { 0x2850 },
+       [SDC3_RESET] = { 0x2870 },
+       [SDC4_RESET] = { 0x2890 },
+       [USB_HS1_RESET] = { 0x2910 },
+       [USB_HSIC_RESET] = { 0x2934 },
+       [USB_FS1_XCVR_RESET] = { 0x2974, 1 },
+       [USB_FS1_RESET] = { 0x2974 },
+       [GSBI1_RESET] = { 0x29dc },
+       [GSBI2_RESET] = { 0x29fc },
+       [GSBI3_RESET] = { 0x2a1c },
+       [GSBI4_RESET] = { 0x2a3c },
+       [GSBI5_RESET] = { 0x2a5c },
+       [GSBI6_RESET] = { 0x2a7c },
+       [GSBI7_RESET] = { 0x2a9c },
+       [SPDM_RESET] = { 0x2b6c },
+       [TLMM_H_RESET] = { 0x2ba0, 7 },
+       [SATA_SFAB_M_RESET] = { 0x2c18 },
+       [SATA_RESET] = { 0x2c1c },
+       [GSS_SLP_RESET] = { 0x2c60, 7 },
+       [GSS_RESET] = { 0x2c64 },
+       [TSSC_RESET] = { 0x2ca0, 7 },
+       [PDM_RESET] = { 0x2cc0, 12 },
+       [MPM_H_RESET] = { 0x2da0, 7 },
+       [MPM_RESET] = { 0x2da4 },
+       [SFAB_SMPSS_S_RESET] = { 0x2e00, 7 },
+       [PRNG_RESET] = { 0x2e80, 12 },
+       [RIVA_RESET] = { 0x35e0 },
+       [CE3_H_RESET] = { 0x36c4, 7 },
+       [SFAB_CE3_M_RESET] = { 0x36c8, 1 },
+       [SFAB_CE3_S_RESET] = { 0x36c8 },
+       [CE3_RESET] = { 0x36cc, 7 },
+       [CE3_SLEEP_RESET] = { 0x36d0, 7 },
+       [USB_HS3_RESET] = { 0x3710 },
+       [USB_HS4_RESET] = { 0x3730 },
 };
 
 static const struct regmap_config gcc_msm8960_regmap_config = {
@@ -2886,6 +3455,14 @@ static const struct regmap_config gcc_msm8960_regmap_config = {
        .fast_io        = true,
 };
 
+static const struct regmap_config gcc_apq8064_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x3880,
+       .fast_io        = true,
+};
+
 static const struct qcom_cc_desc gcc_msm8960_desc = {
        .config = &gcc_msm8960_regmap_config,
        .clks = gcc_msm8960_clks,
@@ -2895,11 +3472,11 @@ static const struct qcom_cc_desc gcc_msm8960_desc = {
 };
 
 static const struct qcom_cc_desc gcc_apq8064_desc = {
-       .config = &gcc_msm8960_regmap_config,
+       .config = &gcc_apq8064_regmap_config,
        .clks = gcc_apq8064_clks,
        .num_clks = ARRAY_SIZE(gcc_apq8064_clks),
-       .resets = gcc_msm8960_resets,
-       .num_resets = ARRAY_SIZE(gcc_msm8960_resets),
+       .resets = gcc_apq8064_resets,
+       .num_resets = ARRAY_SIZE(gcc_apq8064_resets),
 };
 
 static const struct of_device_id gcc_msm8960_match_table[] = {
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
new file mode 100644 (file)
index 0000000..751eea3
--- /dev/null
@@ -0,0 +1,3352 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/reset-controller.h>
+
+#include <dt-bindings/clock/qcom,mmcc-apq8084.h>
+#include <dt-bindings/reset/qcom,mmcc-apq8084.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "reset.h"
+
+#define P_XO           0
+#define P_MMPLL0       1
+#define P_EDPLINK      1
+#define P_MMPLL1       2
+#define P_HDMIPLL      2
+#define P_GPLL0                3
+#define P_EDPVCO       3
+#define P_MMPLL4       4
+#define P_DSI0PLL      4
+#define P_DSI0PLL_BYTE 4
+#define P_MMPLL2       4
+#define P_MMPLL3       4
+#define P_GPLL1                5
+#define P_DSI1PLL      5
+#define P_DSI1PLL_BYTE 5
+#define P_MMSLEEP      6
+
+static const u8 mmcc_xo_mmpll0_mmpll1_gpll0_map[] = {
+       [P_XO]          = 0,
+       [P_MMPLL0]      = 1,
+       [P_MMPLL1]      = 2,
+       [P_GPLL0]       = 5,
+};
+
+static const char *mmcc_xo_mmpll0_mmpll1_gpll0[] = {
+       "xo",
+       "mmpll0_vote",
+       "mmpll1_vote",
+       "mmss_gpll0_vote",
+};
+
+static const u8 mmcc_xo_mmpll0_dsi_hdmi_gpll0_map[] = {
+       [P_XO]          = 0,
+       [P_MMPLL0]      = 1,
+       [P_HDMIPLL]     = 4,
+       [P_GPLL0]       = 5,
+       [P_DSI0PLL]     = 2,
+       [P_DSI1PLL]     = 3,
+};
+
+static const char *mmcc_xo_mmpll0_dsi_hdmi_gpll0[] = {
+       "xo",
+       "mmpll0_vote",
+       "hdmipll",
+       "mmss_gpll0_vote",
+       "dsi0pll",
+       "dsi1pll",
+};
+
+static const u8 mmcc_xo_mmpll0_1_2_gpll0_map[] = {
+       [P_XO]          = 0,
+       [P_MMPLL0]      = 1,
+       [P_MMPLL1]      = 2,
+       [P_GPLL0]       = 5,
+       [P_MMPLL2]      = 3,
+};
+
+static const char *mmcc_xo_mmpll0_1_2_gpll0[] = {
+       "xo",
+       "mmpll0_vote",
+       "mmpll1_vote",
+       "mmss_gpll0_vote",
+       "mmpll2",
+};
+
+static const u8 mmcc_xo_mmpll0_1_3_gpll0_map[] = {
+       [P_XO]          = 0,
+       [P_MMPLL0]      = 1,
+       [P_MMPLL1]      = 2,
+       [P_GPLL0]       = 5,
+       [P_MMPLL3]      = 3,
+};
+
+static const char *mmcc_xo_mmpll0_1_3_gpll0[] = {
+       "xo",
+       "mmpll0_vote",
+       "mmpll1_vote",
+       "mmss_gpll0_vote",
+       "mmpll3",
+};
+
+static const u8 mmcc_xo_dsi_hdmi_edp_map[] = {
+       [P_XO]          = 0,
+       [P_EDPLINK]     = 4,
+       [P_HDMIPLL]     = 3,
+       [P_EDPVCO]      = 5,
+       [P_DSI0PLL]     = 1,
+       [P_DSI1PLL]     = 2,
+};
+
+static const char *mmcc_xo_dsi_hdmi_edp[] = {
+       "xo",
+       "edp_link_clk",
+       "hdmipll",
+       "edp_vco_div",
+       "dsi0pll",
+       "dsi1pll",
+};
+
+static const u8 mmcc_xo_dsi_hdmi_edp_gpll0_map[] = {
+       [P_XO]          = 0,
+       [P_EDPLINK]     = 4,
+       [P_HDMIPLL]     = 3,
+       [P_GPLL0]       = 5,
+       [P_DSI0PLL]     = 1,
+       [P_DSI1PLL]     = 2,
+};
+
+static const char *mmcc_xo_dsi_hdmi_edp_gpll0[] = {
+       "xo",
+       "edp_link_clk",
+       "hdmipll",
+       "gpll0_vote",
+       "dsi0pll",
+       "dsi1pll",
+};
+
+static const u8 mmcc_xo_dsibyte_hdmi_edp_gpll0_map[] = {
+       [P_XO]                  = 0,
+       [P_EDPLINK]             = 4,
+       [P_HDMIPLL]             = 3,
+       [P_GPLL0]               = 5,
+       [P_DSI0PLL_BYTE]        = 1,
+       [P_DSI1PLL_BYTE]        = 2,
+};
+
+static const char *mmcc_xo_dsibyte_hdmi_edp_gpll0[] = {
+       "xo",
+       "edp_link_clk",
+       "hdmipll",
+       "gpll0_vote",
+       "dsi0pllbyte",
+       "dsi1pllbyte",
+};
+
+static const u8 mmcc_xo_mmpll0_1_4_gpll0_map[] = {
+       [P_XO]          = 0,
+       [P_MMPLL0]      = 1,
+       [P_MMPLL1]      = 2,
+       [P_GPLL0]       = 5,
+       [P_MMPLL4]      = 3,
+};
+
+static const char *mmcc_xo_mmpll0_1_4_gpll0[] = {
+       "xo",
+       "mmpll0",
+       "mmpll1",
+       "mmpll4",
+       "gpll0",
+};
+
+static const u8 mmcc_xo_mmpll0_1_4_gpll1_0_map[] = {
+       [P_XO]          = 0,
+       [P_MMPLL0]      = 1,
+       [P_MMPLL1]      = 2,
+       [P_MMPLL4]      = 3,
+       [P_GPLL0]       = 5,
+       [P_GPLL1]       = 4,
+};
+
+static const char *mmcc_xo_mmpll0_1_4_gpll1_0[] = {
+       "xo",
+       "mmpll0",
+       "mmpll1",
+       "mmpll4",
+       "gpll1",
+       "gpll0",
+};
+
+static const u8 mmcc_xo_mmpll0_1_4_gpll1_0_sleep_map[] = {
+       [P_XO]          = 0,
+       [P_MMPLL0]      = 1,
+       [P_MMPLL1]      = 2,
+       [P_MMPLL4]      = 3,
+       [P_GPLL0]       = 5,
+       [P_GPLL1]       = 4,
+       [P_MMSLEEP]     = 6,
+};
+
+static const char *mmcc_xo_mmpll0_1_4_gpll1_0_sleep[] = {
+       "xo",
+       "mmpll0",
+       "mmpll1",
+       "mmpll4",
+       "gpll1",
+       "gpll0",
+       "sleep_clk_src",
+};
+
+#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
+
+static struct clk_pll mmpll0 = {
+       .l_reg = 0x0004,
+       .m_reg = 0x0008,
+       .n_reg = 0x000c,
+       .config_reg = 0x0014,
+       .mode_reg = 0x0000,
+       .status_reg = 0x001c,
+       .status_bit = 17,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll0",
+               .parent_names = (const char *[]){ "xo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static struct clk_regmap mmpll0_vote = {
+       .enable_reg = 0x0100,
+       .enable_mask = BIT(0),
+       .hw.init = &(struct clk_init_data){
+               .name = "mmpll0_vote",
+               .parent_names = (const char *[]){ "mmpll0" },
+               .num_parents = 1,
+               .ops = &clk_pll_vote_ops,
+       },
+};
+
+static struct clk_pll mmpll1 = {
+       .l_reg = 0x0044,
+       .m_reg = 0x0048,
+       .n_reg = 0x004c,
+       .config_reg = 0x0050,
+       .mode_reg = 0x0040,
+       .status_reg = 0x005c,
+       .status_bit = 17,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll1",
+               .parent_names = (const char *[]){ "xo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static struct clk_regmap mmpll1_vote = {
+       .enable_reg = 0x0100,
+       .enable_mask = BIT(1),
+       .hw.init = &(struct clk_init_data){
+               .name = "mmpll1_vote",
+               .parent_names = (const char *[]){ "mmpll1" },
+               .num_parents = 1,
+               .ops = &clk_pll_vote_ops,
+       },
+};
+
+static struct clk_pll mmpll2 = {
+       .l_reg = 0x4104,
+       .m_reg = 0x4108,
+       .n_reg = 0x410c,
+       .config_reg = 0x4110,
+       .mode_reg = 0x4100,
+       .status_reg = 0x411c,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll2",
+               .parent_names = (const char *[]){ "xo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static struct clk_pll mmpll3 = {
+       .l_reg = 0x0084,
+       .m_reg = 0x0088,
+       .n_reg = 0x008c,
+       .config_reg = 0x0090,
+       .mode_reg = 0x0080,
+       .status_reg = 0x009c,
+       .status_bit = 17,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll3",
+               .parent_names = (const char *[]){ "xo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static struct clk_pll mmpll4 = {
+       .l_reg = 0x00a4,
+       .m_reg = 0x00a8,
+       .n_reg = 0x00ac,
+       .config_reg = 0x00b0,
+       .mode_reg = 0x0080,
+       .status_reg = 0x00bc,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmpll4",
+               .parent_names = (const char *[]){ "xo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static struct clk_rcg2 mmss_ahb_clk_src = {
+       .cmd_rcgr = 0x5000,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmss_ahb_clk_src",
+               .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_mmss_axi_clk[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       F(37500000, P_GPLL0, 16, 0, 0),
+       F(50000000, P_GPLL0, 12, 0, 0),
+       F(75000000, P_GPLL0, 8, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(150000000, P_GPLL0, 4, 0, 0),
+       F(333430000, P_MMPLL1, 3.5, 0, 0),
+       F(400000000, P_MMPLL0, 2, 0, 0),
+       F(466800000, P_MMPLL1, 2.5, 0, 0),
+};
+
+static struct clk_rcg2 mmss_axi_clk_src = {
+       .cmd_rcgr = 0x5040,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+       .freq_tbl = ftbl_mmss_axi_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mmss_axi_clk_src",
+               .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_ocmemnoc_clk[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       F(37500000, P_GPLL0, 16, 0, 0),
+       F(50000000, P_GPLL0, 12, 0, 0),
+       F(75000000, P_GPLL0, 8, 0, 0),
+       F(109090000, P_GPLL0, 5.5, 0, 0),
+       F(150000000, P_GPLL0, 4, 0, 0),
+       F(228570000, P_MMPLL0, 3.5, 0, 0),
+       F(320000000, P_MMPLL0, 2.5, 0, 0),
+};
+
+static struct clk_rcg2 ocmemnoc_clk_src = {
+       .cmd_rcgr = 0x5090,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+       .freq_tbl = ftbl_ocmemnoc_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "ocmemnoc_clk_src",
+               .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_camss_csi0_3_clk[] = {
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(200000000, P_MMPLL0, 4, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 csi0_clk_src = {
+       .cmd_rcgr = 0x3090,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+       .freq_tbl = ftbl_camss_csi0_3_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csi0_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 csi1_clk_src = {
+       .cmd_rcgr = 0x3100,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+       .freq_tbl = ftbl_camss_csi0_3_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csi1_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 csi2_clk_src = {
+       .cmd_rcgr = 0x3160,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+       .freq_tbl = ftbl_camss_csi0_3_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csi2_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 csi3_clk_src = {
+       .cmd_rcgr = 0x31c0,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+       .freq_tbl = ftbl_camss_csi0_3_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csi3_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_camss_vfe_vfe0_1_clk[] = {
+       F(37500000, P_GPLL0, 16, 0, 0),
+       F(50000000, P_GPLL0, 12, 0, 0),
+       F(60000000, P_GPLL0, 10, 0, 0),
+       F(80000000, P_GPLL0, 7.5, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(109090000, P_GPLL0, 5.5, 0, 0),
+       F(133330000, P_GPLL0, 4.5, 0, 0),
+       F(200000000, P_GPLL0, 3, 0, 0),
+       F(228570000, P_MMPLL0, 3.5, 0, 0),
+       F(266670000, P_MMPLL0, 3, 0, 0),
+       F(320000000, P_MMPLL0, 2.5, 0, 0),
+       F(465000000, P_MMPLL4, 2, 0, 0),
+       F(600000000, P_GPLL0, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 vfe0_clk_src = {
+       .cmd_rcgr = 0x3600,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+       .freq_tbl = ftbl_camss_vfe_vfe0_1_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "vfe0_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 vfe1_clk_src = {
+       .cmd_rcgr = 0x3620,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+       .freq_tbl = ftbl_camss_vfe_vfe0_1_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "vfe1_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_mdss_mdp_clk[] = {
+       F(37500000, P_GPLL0, 16, 0, 0),
+       F(60000000, P_GPLL0, 10, 0, 0),
+       F(75000000, P_GPLL0, 8, 0, 0),
+       F(85710000, P_GPLL0, 7, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(150000000, P_GPLL0, 4, 0, 0),
+       F(160000000, P_MMPLL0, 5, 0, 0),
+       F(200000000, P_MMPLL0, 4, 0, 0),
+       F(228570000, P_MMPLL0, 3.5, 0, 0),
+       F(300000000, P_GPLL0, 2, 0, 0),
+       F(320000000, P_MMPLL0, 2.5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 mdp_clk_src = {
+       .cmd_rcgr = 0x2040,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_dsi_hdmi_gpll0_map,
+       .freq_tbl = ftbl_mdss_mdp_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mdp_clk_src",
+               .parent_names = mmcc_xo_mmpll0_dsi_hdmi_gpll0,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 gfx3d_clk_src = {
+       .cmd_rcgr = 0x4000,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_2_gpll0_map,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "gfx3d_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_2_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_camss_jpeg_jpeg0_2_clk[] = {
+       F(75000000, P_GPLL0, 8, 0, 0),
+       F(133330000, P_GPLL0, 4.5, 0, 0),
+       F(200000000, P_GPLL0, 3, 0, 0),
+       F(228570000, P_MMPLL0, 3.5, 0, 0),
+       F(266670000, P_MMPLL0, 3, 0, 0),
+       F(320000000, P_MMPLL0, 2.5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 jpeg0_clk_src = {
+       .cmd_rcgr = 0x3500,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+       .freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "jpeg0_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 jpeg1_clk_src = {
+       .cmd_rcgr = 0x3520,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+       .freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "jpeg1_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 jpeg2_clk_src = {
+       .cmd_rcgr = 0x3540,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+       .freq_tbl = ftbl_camss_jpeg_jpeg0_2_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "jpeg2_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl pixel_freq_tbl[] = {
+       { .src = P_DSI0PLL },
+       { }
+};
+
+static struct clk_rcg2 pclk0_clk_src = {
+       .cmd_rcgr = 0x2000,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+       .freq_tbl = pixel_freq_tbl,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pclk0_clk_src",
+               .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+               .num_parents = 6,
+               .ops = &clk_pixel_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct clk_rcg2 pclk1_clk_src = {
+       .cmd_rcgr = 0x2020,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+       .freq_tbl = pixel_freq_tbl,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pclk1_clk_src",
+               .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+               .num_parents = 6,
+               .ops = &clk_pixel_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct freq_tbl ftbl_venus0_vcodec0_clk[] = {
+       F(50000000, P_GPLL0, 12, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(133330000, P_GPLL0, 4.5, 0, 0),
+       F(200000000, P_MMPLL0, 4, 0, 0),
+       F(266670000, P_MMPLL0, 3, 0, 0),
+       F(465000000, P_MMPLL3, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 vcodec0_clk_src = {
+       .cmd_rcgr = 0x1000,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_3_gpll0_map,
+       .freq_tbl = ftbl_venus0_vcodec0_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "vcodec0_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_3_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_avsync_vp_clk[] = {
+       F(150000000, P_GPLL0, 4, 0, 0),
+       F(320000000, P_MMPLL0, 2.5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 vp_clk_src = {
+       .cmd_rcgr = 0x2430,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+       .freq_tbl = ftbl_avsync_vp_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "vp_clk_src",
+               .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_camss_cci_cci_clk[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 cci_clk_src = {
+       .cmd_rcgr = 0x3300,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll1_0_map,
+       .freq_tbl = ftbl_camss_cci_cci_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "cci_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_camss_gp0_1_clk[] = {
+       F(10000, P_XO, 16, 1, 120),
+       F(24000, P_XO, 16, 1, 50),
+       F(6000000, P_GPLL0, 10, 1, 10),
+       F(12000000, P_GPLL0, 10, 1, 5),
+       F(13000000, P_GPLL0, 4, 13, 150),
+       F(24000000, P_GPLL0, 5, 1, 5),
+       { }
+};
+
+static struct clk_rcg2 camss_gp0_clk_src = {
+       .cmd_rcgr = 0x3420,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll1_0_sleep_map,
+       .freq_tbl = ftbl_camss_gp0_1_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "camss_gp0_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll1_0_sleep,
+               .num_parents = 7,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 camss_gp1_clk_src = {
+       .cmd_rcgr = 0x3450,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll1_0_sleep_map,
+       .freq_tbl = ftbl_camss_gp0_1_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "camss_gp1_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll1_0_sleep,
+               .num_parents = 7,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_camss_mclk0_3_clk[] = {
+       F(4800000, P_XO, 4, 0, 0),
+       F(6000000, P_GPLL0, 10, 1, 10),
+       F(8000000, P_GPLL0, 15, 1, 5),
+       F(9600000, P_XO, 2, 0, 0),
+       F(16000000, P_MMPLL0, 10, 1, 5),
+       F(19200000, P_XO, 1, 0, 0),
+       F(24000000, P_GPLL0, 5, 1, 5),
+       F(32000000, P_MMPLL0, 5, 1, 5),
+       F(48000000, P_GPLL0, 12.5, 0, 0),
+       F(64000000, P_MMPLL0, 12.5, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 mclk0_clk_src = {
+       .cmd_rcgr = 0x3360,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll1_0_map,
+       .freq_tbl = ftbl_camss_mclk0_3_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mclk0_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 mclk1_clk_src = {
+       .cmd_rcgr = 0x3390,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll1_0_map,
+       .freq_tbl = ftbl_camss_mclk0_3_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mclk1_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 mclk2_clk_src = {
+       .cmd_rcgr = 0x33c0,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll1_0_map,
+       .freq_tbl = ftbl_camss_mclk0_3_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mclk2_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 mclk3_clk_src = {
+       .cmd_rcgr = 0x33f0,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll1_0_map,
+       .freq_tbl = ftbl_camss_mclk0_3_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "mclk3_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll1_0,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_camss_phy0_2_csi0_2phytimer_clk[] = {
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(200000000, P_MMPLL0, 4, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 csi0phytimer_clk_src = {
+       .cmd_rcgr = 0x3000,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+       .freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csi0phytimer_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 csi1phytimer_clk_src = {
+       .cmd_rcgr = 0x3030,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+       .freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csi1phytimer_clk_src",
+               .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 csi2phytimer_clk_src = {
+       .cmd_rcgr = 0x3060,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+       .freq_tbl = ftbl_camss_phy0_2_csi0_2phytimer_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "csi2phytimer_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_camss_vfe_cpp_clk[] = {
+       F(133330000, P_GPLL0, 4.5, 0, 0),
+       F(266670000, P_MMPLL0, 3, 0, 0),
+       F(320000000, P_MMPLL0, 2.5, 0, 0),
+       F(372000000, P_MMPLL4, 2.5, 0, 0),
+       F(465000000, P_MMPLL4, 2, 0, 0),
+       F(600000000, P_GPLL0, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 cpp_clk_src = {
+       .cmd_rcgr = 0x3640,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_1_4_gpll0_map,
+       .freq_tbl = ftbl_camss_vfe_cpp_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "cpp_clk_src",
+               .parent_names = mmcc_xo_mmpll0_1_4_gpll0,
+               .num_parents = 5,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl byte_freq_tbl[] = {
+       { .src = P_DSI0PLL_BYTE },
+       { }
+};
+
+static struct clk_rcg2 byte0_clk_src = {
+       .cmd_rcgr = 0x2120,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
+       .freq_tbl = byte_freq_tbl,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "byte0_clk_src",
+               .parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
+               .num_parents = 6,
+               .ops = &clk_byte_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct clk_rcg2 byte1_clk_src = {
+       .cmd_rcgr = 0x2140,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
+       .freq_tbl = byte_freq_tbl,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "byte1_clk_src",
+               .parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
+               .num_parents = 6,
+               .ops = &clk_byte_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct freq_tbl ftbl_mdss_edpaux_clk[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 edpaux_clk_src = {
+       .cmd_rcgr = 0x20e0,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+       .freq_tbl = ftbl_mdss_edpaux_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "edpaux_clk_src",
+               .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_mdss_edplink_clk[] = {
+       F(135000000, P_EDPLINK, 2, 0, 0),
+       F(270000000, P_EDPLINK, 11, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 edplink_clk_src = {
+       .cmd_rcgr = 0x20c0,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+       .freq_tbl = ftbl_mdss_edplink_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "edplink_clk_src",
+               .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct freq_tbl edp_pixel_freq_tbl[] = {
+       { .src = P_EDPVCO },
+       { }
+};
+
+static struct clk_rcg2 edppixel_clk_src = {
+       .cmd_rcgr = 0x20a0,
+       .mnd_width = 8,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_dsi_hdmi_edp_map,
+       .freq_tbl = edp_pixel_freq_tbl,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "edppixel_clk_src",
+               .parent_names = mmcc_xo_dsi_hdmi_edp,
+               .num_parents = 6,
+               .ops = &clk_edp_pixel_ops,
+       },
+};
+
+static struct freq_tbl ftbl_mdss_esc0_1_clk[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 esc0_clk_src = {
+       .cmd_rcgr = 0x2160,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
+       .freq_tbl = ftbl_mdss_esc0_1_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "esc0_clk_src",
+               .parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_rcg2 esc1_clk_src = {
+       .cmd_rcgr = 0x2180,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_dsibyte_hdmi_edp_gpll0_map,
+       .freq_tbl = ftbl_mdss_esc0_1_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "esc1_clk_src",
+               .parent_names = mmcc_xo_dsibyte_hdmi_edp_gpll0,
+               .num_parents = 6,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl extpclk_freq_tbl[] = {
+       { .src = P_HDMIPLL },
+       { }
+};
+
+static struct clk_rcg2 extpclk_clk_src = {
+       .cmd_rcgr = 0x2060,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_dsi_hdmi_edp_gpll0_map,
+       .freq_tbl = extpclk_freq_tbl,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "extpclk_clk_src",
+               .parent_names = mmcc_xo_dsi_hdmi_edp_gpll0,
+               .num_parents = 6,
+               .ops = &clk_byte_ops,
+               .flags = CLK_SET_RATE_PARENT,
+       },
+};
+
+static struct freq_tbl ftbl_mdss_hdmi_clk[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 hdmi_clk_src = {
+       .cmd_rcgr = 0x2100,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+       .freq_tbl = ftbl_mdss_hdmi_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "hdmi_clk_src",
+               .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_mdss_vsync_clk[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 vsync_clk_src = {
+       .cmd_rcgr = 0x2080,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+       .freq_tbl = ftbl_mdss_vsync_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "vsync_clk_src",
+               .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_mmss_rbcpr_clk[] = {
+       F(50000000, P_GPLL0, 12, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 rbcpr_clk_src = {
+       .cmd_rcgr = 0x4060,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+       .freq_tbl = ftbl_mmss_rbcpr_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "rbcpr_clk_src",
+               .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_oxili_rbbmtimer_clk[] = {
+       F(19200000, P_XO, 1, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 rbbmtimer_clk_src = {
+       .cmd_rcgr = 0x4090,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+       .freq_tbl = ftbl_oxili_rbbmtimer_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "rbbmtimer_clk_src",
+               .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_vpu_maple_clk[] = {
+       F(50000000, P_GPLL0, 12, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(133330000, P_GPLL0, 4.5, 0, 0),
+       F(200000000, P_MMPLL0, 4, 0, 0),
+       F(266670000, P_MMPLL0, 3, 0, 0),
+       F(465000000, P_MMPLL3, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 maple_clk_src = {
+       .cmd_rcgr = 0x1320,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+       .freq_tbl = ftbl_vpu_maple_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "maple_clk_src",
+               .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_vpu_vdp_clk[] = {
+       F(50000000, P_GPLL0, 12, 0, 0),
+       F(100000000, P_GPLL0, 6, 0, 0),
+       F(200000000, P_MMPLL0, 4, 0, 0),
+       F(320000000, P_MMPLL0, 2.5, 0, 0),
+       F(400000000, P_MMPLL0, 2, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 vdp_clk_src = {
+       .cmd_rcgr = 0x1300,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+       .freq_tbl = ftbl_vpu_vdp_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "vdp_clk_src",
+               .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct freq_tbl ftbl_vpu_bus_clk[] = {
+       F(40000000, P_GPLL0, 15, 0, 0),
+       F(80000000, P_MMPLL0, 10, 0, 0),
+       { }
+};
+
+static struct clk_rcg2 vpu_bus_clk_src = {
+       .cmd_rcgr = 0x1340,
+       .hid_width = 5,
+       .parent_map = mmcc_xo_mmpll0_mmpll1_gpll0_map,
+       .freq_tbl = ftbl_vpu_bus_clk,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "vpu_bus_clk_src",
+               .parent_names = mmcc_xo_mmpll0_mmpll1_gpll0,
+               .num_parents = 4,
+               .ops = &clk_rcg2_ops,
+       },
+};
+
+static struct clk_branch mmss_cxo_clk = {
+       .halt_reg = 0x5104,
+       .clkr = {
+               .enable_reg = 0x5104,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_cxo_clk",
+                       .parent_names = (const char *[]){ "xo" },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_sleepclk_clk = {
+       .halt_reg = 0x5100,
+       .clkr = {
+               .enable_reg = 0x5100,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_sleepclk_clk",
+                       .parent_names = (const char *[]){
+                               "sleep_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch avsync_ahb_clk = {
+       .halt_reg = 0x2414,
+       .clkr = {
+               .enable_reg = 0x2414,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "avsync_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch avsync_edppixel_clk = {
+       .halt_reg = 0x2418,
+       .clkr = {
+               .enable_reg = 0x2418,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "avsync_edppixel_clk",
+                       .parent_names = (const char *[]){
+                               "edppixel_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch avsync_extpclk_clk = {
+       .halt_reg = 0x2410,
+       .clkr = {
+               .enable_reg = 0x2410,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "avsync_extpclk_clk",
+                       .parent_names = (const char *[]){
+                               "extpclk_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch avsync_pclk0_clk = {
+       .halt_reg = 0x241c,
+       .clkr = {
+               .enable_reg = 0x241c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "avsync_pclk0_clk",
+                       .parent_names = (const char *[]){
+                               "pclk0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch avsync_pclk1_clk = {
+       .halt_reg = 0x2420,
+       .clkr = {
+               .enable_reg = 0x2420,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "avsync_pclk1_clk",
+                       .parent_names = (const char *[]){
+                               "pclk1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch avsync_vp_clk = {
+       .halt_reg = 0x2404,
+       .clkr = {
+               .enable_reg = 0x2404,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "avsync_vp_clk",
+                       .parent_names = (const char *[]){
+                               "vp_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_ahb_clk = {
+       .halt_reg = 0x348c,
+       .clkr = {
+               .enable_reg = 0x348c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_cci_cci_ahb_clk = {
+       .halt_reg = 0x3348,
+       .clkr = {
+               .enable_reg = 0x3348,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_cci_cci_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_cci_cci_clk = {
+       .halt_reg = 0x3344,
+       .clkr = {
+               .enable_reg = 0x3344,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_cci_cci_clk",
+                       .parent_names = (const char *[]){
+                               "cci_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi0_ahb_clk = {
+       .halt_reg = 0x30bc,
+       .clkr = {
+               .enable_reg = 0x30bc,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi0_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi0_clk = {
+       .halt_reg = 0x30b4,
+       .clkr = {
+               .enable_reg = 0x30b4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi0_clk",
+                       .parent_names = (const char *[]){
+                               "csi0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi0phy_clk = {
+       .halt_reg = 0x30c4,
+       .clkr = {
+               .enable_reg = 0x30c4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi0phy_clk",
+                       .parent_names = (const char *[]){
+                               "csi0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi0pix_clk = {
+       .halt_reg = 0x30e4,
+       .clkr = {
+               .enable_reg = 0x30e4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi0pix_clk",
+                       .parent_names = (const char *[]){
+                               "csi0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi0rdi_clk = {
+       .halt_reg = 0x30d4,
+       .clkr = {
+               .enable_reg = 0x30d4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi0rdi_clk",
+                       .parent_names = (const char *[]){
+                               "csi0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi1_ahb_clk = {
+       .halt_reg = 0x3128,
+       .clkr = {
+               .enable_reg = 0x3128,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi1_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi1_clk = {
+       .halt_reg = 0x3124,
+       .clkr = {
+               .enable_reg = 0x3124,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi1_clk",
+                       .parent_names = (const char *[]){
+                               "csi1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi1phy_clk = {
+       .halt_reg = 0x3134,
+       .clkr = {
+               .enable_reg = 0x3134,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi1phy_clk",
+                       .parent_names = (const char *[]){
+                               "csi1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi1pix_clk = {
+       .halt_reg = 0x3154,
+       .clkr = {
+               .enable_reg = 0x3154,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi1pix_clk",
+                       .parent_names = (const char *[]){
+                               "csi1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi1rdi_clk = {
+       .halt_reg = 0x3144,
+       .clkr = {
+               .enable_reg = 0x3144,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi1rdi_clk",
+                       .parent_names = (const char *[]){
+                               "csi1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi2_ahb_clk = {
+       .halt_reg = 0x3188,
+       .clkr = {
+               .enable_reg = 0x3188,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi2_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi2_clk = {
+       .halt_reg = 0x3184,
+       .clkr = {
+               .enable_reg = 0x3184,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi2_clk",
+                       .parent_names = (const char *[]){
+                               "csi2_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi2phy_clk = {
+       .halt_reg = 0x3194,
+       .clkr = {
+               .enable_reg = 0x3194,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi2phy_clk",
+                       .parent_names = (const char *[]){
+                               "csi2_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi2pix_clk = {
+       .halt_reg = 0x31b4,
+       .clkr = {
+               .enable_reg = 0x31b4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi2pix_clk",
+                       .parent_names = (const char *[]){
+                               "csi2_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi2rdi_clk = {
+       .halt_reg = 0x31a4,
+       .clkr = {
+               .enable_reg = 0x31a4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi2rdi_clk",
+                       .parent_names = (const char *[]){
+                               "csi2_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi3_ahb_clk = {
+       .halt_reg = 0x31e8,
+       .clkr = {
+               .enable_reg = 0x31e8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi3_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi3_clk = {
+       .halt_reg = 0x31e4,
+       .clkr = {
+               .enable_reg = 0x31e4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi3_clk",
+                       .parent_names = (const char *[]){
+                               "csi3_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi3phy_clk = {
+       .halt_reg = 0x31f4,
+       .clkr = {
+               .enable_reg = 0x31f4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi3phy_clk",
+                       .parent_names = (const char *[]){
+                               "csi3_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi3pix_clk = {
+       .halt_reg = 0x3214,
+       .clkr = {
+               .enable_reg = 0x3214,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi3pix_clk",
+                       .parent_names = (const char *[]){
+                               "csi3_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi3rdi_clk = {
+       .halt_reg = 0x3204,
+       .clkr = {
+               .enable_reg = 0x3204,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi3rdi_clk",
+                       .parent_names = (const char *[]){
+                               "csi3_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi_vfe0_clk = {
+       .halt_reg = 0x3704,
+       .clkr = {
+               .enable_reg = 0x3704,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi_vfe0_clk",
+                       .parent_names = (const char *[]){
+                               "vfe0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_csi_vfe1_clk = {
+       .halt_reg = 0x3714,
+       .clkr = {
+               .enable_reg = 0x3714,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_csi_vfe1_clk",
+                       .parent_names = (const char *[]){
+                               "vfe1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_gp0_clk = {
+       .halt_reg = 0x3444,
+       .clkr = {
+               .enable_reg = 0x3444,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_gp0_clk",
+                       .parent_names = (const char *[]){
+                               "camss_gp0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_gp1_clk = {
+       .halt_reg = 0x3474,
+       .clkr = {
+               .enable_reg = 0x3474,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_gp1_clk",
+                       .parent_names = (const char *[]){
+                               "camss_gp1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_ispif_ahb_clk = {
+       .halt_reg = 0x3224,
+       .clkr = {
+               .enable_reg = 0x3224,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_ispif_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_jpeg_jpeg0_clk = {
+       .halt_reg = 0x35a8,
+       .clkr = {
+               .enable_reg = 0x35a8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_jpeg_jpeg0_clk",
+                       .parent_names = (const char *[]){
+                               "jpeg0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_jpeg_jpeg1_clk = {
+       .halt_reg = 0x35ac,
+       .clkr = {
+               .enable_reg = 0x35ac,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_jpeg_jpeg1_clk",
+                       .parent_names = (const char *[]){
+                               "jpeg1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_jpeg_jpeg2_clk = {
+       .halt_reg = 0x35b0,
+       .clkr = {
+               .enable_reg = 0x35b0,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_jpeg_jpeg2_clk",
+                       .parent_names = (const char *[]){
+                               "jpeg2_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_jpeg_jpeg_ahb_clk = {
+       .halt_reg = 0x35b4,
+       .clkr = {
+               .enable_reg = 0x35b4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_jpeg_jpeg_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_jpeg_jpeg_axi_clk = {
+       .halt_reg = 0x35b8,
+       .clkr = {
+               .enable_reg = 0x35b8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_jpeg_jpeg_axi_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_axi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_mclk0_clk = {
+       .halt_reg = 0x3384,
+       .clkr = {
+               .enable_reg = 0x3384,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_mclk0_clk",
+                       .parent_names = (const char *[]){
+                               "mclk0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_mclk1_clk = {
+       .halt_reg = 0x33b4,
+       .clkr = {
+               .enable_reg = 0x33b4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_mclk1_clk",
+                       .parent_names = (const char *[]){
+                               "mclk1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_mclk2_clk = {
+       .halt_reg = 0x33e4,
+       .clkr = {
+               .enable_reg = 0x33e4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_mclk2_clk",
+                       .parent_names = (const char *[]){
+                               "mclk2_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_mclk3_clk = {
+       .halt_reg = 0x3414,
+       .clkr = {
+               .enable_reg = 0x3414,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_mclk3_clk",
+                       .parent_names = (const char *[]){
+                               "mclk3_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_micro_ahb_clk = {
+       .halt_reg = 0x3494,
+       .clkr = {
+               .enable_reg = 0x3494,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_micro_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_phy0_csi0phytimer_clk = {
+       .halt_reg = 0x3024,
+       .clkr = {
+               .enable_reg = 0x3024,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_phy0_csi0phytimer_clk",
+                       .parent_names = (const char *[]){
+                               "csi0phytimer_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_phy1_csi1phytimer_clk = {
+       .halt_reg = 0x3054,
+       .clkr = {
+               .enable_reg = 0x3054,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_phy1_csi1phytimer_clk",
+                       .parent_names = (const char *[]){
+                               "csi1phytimer_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_phy2_csi2phytimer_clk = {
+       .halt_reg = 0x3084,
+       .clkr = {
+               .enable_reg = 0x3084,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_phy2_csi2phytimer_clk",
+                       .parent_names = (const char *[]){
+                               "csi2phytimer_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_top_ahb_clk = {
+       .halt_reg = 0x3484,
+       .clkr = {
+               .enable_reg = 0x3484,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_top_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_vfe_cpp_ahb_clk = {
+       .halt_reg = 0x36b4,
+       .clkr = {
+               .enable_reg = 0x36b4,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_vfe_cpp_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_vfe_cpp_clk = {
+       .halt_reg = 0x36b0,
+       .clkr = {
+               .enable_reg = 0x36b0,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_vfe_cpp_clk",
+                       .parent_names = (const char *[]){
+                               "cpp_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_vfe_vfe0_clk = {
+       .halt_reg = 0x36a8,
+       .clkr = {
+               .enable_reg = 0x36a8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_vfe_vfe0_clk",
+                       .parent_names = (const char *[]){
+                               "vfe0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_vfe_vfe1_clk = {
+       .halt_reg = 0x36ac,
+       .clkr = {
+               .enable_reg = 0x36ac,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_vfe_vfe1_clk",
+                       .parent_names = (const char *[]){
+                               "vfe1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_vfe_vfe_ahb_clk = {
+       .halt_reg = 0x36b8,
+       .clkr = {
+               .enable_reg = 0x36b8,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_vfe_vfe_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch camss_vfe_vfe_axi_clk = {
+       .halt_reg = 0x36bc,
+       .clkr = {
+               .enable_reg = 0x36bc,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "camss_vfe_vfe_axi_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_axi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_ahb_clk = {
+       .halt_reg = 0x2308,
+       .clkr = {
+               .enable_reg = 0x2308,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_axi_clk = {
+       .halt_reg = 0x2310,
+       .clkr = {
+               .enable_reg = 0x2310,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_axi_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_axi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_byte0_clk = {
+       .halt_reg = 0x233c,
+       .clkr = {
+               .enable_reg = 0x233c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_byte0_clk",
+                       .parent_names = (const char *[]){
+                               "byte0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_byte1_clk = {
+       .halt_reg = 0x2340,
+       .clkr = {
+               .enable_reg = 0x2340,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_byte1_clk",
+                       .parent_names = (const char *[]){
+                               "byte1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_edpaux_clk = {
+       .halt_reg = 0x2334,
+       .clkr = {
+               .enable_reg = 0x2334,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_edpaux_clk",
+                       .parent_names = (const char *[]){
+                               "edpaux_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_edplink_clk = {
+       .halt_reg = 0x2330,
+       .clkr = {
+               .enable_reg = 0x2330,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_edplink_clk",
+                       .parent_names = (const char *[]){
+                               "edplink_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_edppixel_clk = {
+       .halt_reg = 0x232c,
+       .clkr = {
+               .enable_reg = 0x232c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_edppixel_clk",
+                       .parent_names = (const char *[]){
+                               "edppixel_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_esc0_clk = {
+       .halt_reg = 0x2344,
+       .clkr = {
+               .enable_reg = 0x2344,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_esc0_clk",
+                       .parent_names = (const char *[]){
+                               "esc0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_esc1_clk = {
+       .halt_reg = 0x2348,
+       .clkr = {
+               .enable_reg = 0x2348,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_esc1_clk",
+                       .parent_names = (const char *[]){
+                               "esc1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_extpclk_clk = {
+       .halt_reg = 0x2324,
+       .clkr = {
+               .enable_reg = 0x2324,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_extpclk_clk",
+                       .parent_names = (const char *[]){
+                               "extpclk_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_hdmi_ahb_clk = {
+       .halt_reg = 0x230c,
+       .clkr = {
+               .enable_reg = 0x230c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_hdmi_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_hdmi_clk = {
+       .halt_reg = 0x2338,
+       .clkr = {
+               .enable_reg = 0x2338,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_hdmi_clk",
+                       .parent_names = (const char *[]){
+                               "hdmi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_mdp_clk = {
+       .halt_reg = 0x231c,
+       .clkr = {
+               .enable_reg = 0x231c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_mdp_clk",
+                       .parent_names = (const char *[]){
+                               "mdp_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_mdp_lut_clk = {
+       .halt_reg = 0x2320,
+       .clkr = {
+               .enable_reg = 0x2320,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_mdp_lut_clk",
+                       .parent_names = (const char *[]){
+                               "mdp_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_pclk0_clk = {
+       .halt_reg = 0x2314,
+       .clkr = {
+               .enable_reg = 0x2314,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_pclk0_clk",
+                       .parent_names = (const char *[]){
+                               "pclk0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_pclk1_clk = {
+       .halt_reg = 0x2318,
+       .clkr = {
+               .enable_reg = 0x2318,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_pclk1_clk",
+                       .parent_names = (const char *[]){
+                               "pclk1_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mdss_vsync_clk = {
+       .halt_reg = 0x2328,
+       .clkr = {
+               .enable_reg = 0x2328,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mdss_vsync_clk",
+                       .parent_names = (const char *[]){
+                               "vsync_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_rbcpr_ahb_clk = {
+       .halt_reg = 0x4088,
+       .clkr = {
+               .enable_reg = 0x4088,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_rbcpr_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_rbcpr_clk = {
+       .halt_reg = 0x4084,
+       .clkr = {
+               .enable_reg = 0x4084,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_rbcpr_clk",
+                       .parent_names = (const char *[]){
+                               "rbcpr_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_ahb_clk = {
+       .halt_reg = 0x0230,
+       .clkr = {
+               .enable_reg = 0x0230,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_spdm_ahb_div_clk",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_axi_clk = {
+       .halt_reg = 0x0210,
+       .clkr = {
+               .enable_reg = 0x0210,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_axi_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_spdm_axi_div_clk",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_csi0_clk = {
+       .halt_reg = 0x023c,
+       .clkr = {
+               .enable_reg = 0x023c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_csi0_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_spdm_csi0_div_clk",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_gfx3d_clk = {
+       .halt_reg = 0x022c,
+       .clkr = {
+               .enable_reg = 0x022c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_gfx3d_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_spdm_gfx3d_div_clk",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_jpeg0_clk = {
+       .halt_reg = 0x0204,
+       .clkr = {
+               .enable_reg = 0x0204,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_jpeg0_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_spdm_jpeg0_div_clk",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_jpeg1_clk = {
+       .halt_reg = 0x0208,
+       .clkr = {
+               .enable_reg = 0x0208,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_jpeg1_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_spdm_jpeg1_div_clk",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_jpeg2_clk = {
+       .halt_reg = 0x0224,
+       .clkr = {
+               .enable_reg = 0x0224,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_jpeg2_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_spdm_jpeg2_div_clk",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_mdp_clk = {
+       .halt_reg = 0x020c,
+       .clkr = {
+               .enable_reg = 0x020c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_mdp_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_spdm_mdp_div_clk",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_pclk0_clk = {
+       .halt_reg = 0x0234,
+       .clkr = {
+               .enable_reg = 0x0234,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_pclk0_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_spdm_pclk0_div_clk",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_pclk1_clk = {
+       .halt_reg = 0x0228,
+       .clkr = {
+               .enable_reg = 0x0228,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_pclk1_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_spdm_pclk1_div_clk",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_vcodec0_clk = {
+       .halt_reg = 0x0214,
+       .clkr = {
+               .enable_reg = 0x0214,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_vcodec0_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_spdm_vcodec0_div_clk",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_vfe0_clk = {
+       .halt_reg = 0x0218,
+       .clkr = {
+               .enable_reg = 0x0218,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_vfe0_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_spdm_vfe0_div_clk",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_vfe1_clk = {
+       .halt_reg = 0x021c,
+       .clkr = {
+               .enable_reg = 0x021c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_vfe1_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_spdm_vfe1_div_clk",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_rm_axi_clk = {
+       .halt_reg = 0x0304,
+       .clkr = {
+               .enable_reg = 0x0304,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_rm_axi_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_axi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_spdm_rm_ocmemnoc_clk = {
+       .halt_reg = 0x0308,
+       .clkr = {
+               .enable_reg = 0x0308,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_spdm_rm_ocmemnoc_clk",
+                       .parent_names = (const char *[]){
+                               "ocmemnoc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+
+static struct clk_branch mmss_misc_ahb_clk = {
+       .halt_reg = 0x502c,
+       .clkr = {
+               .enable_reg = 0x502c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_misc_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_mmssnoc_ahb_clk = {
+       .halt_reg = 0x5024,
+       .clkr = {
+               .enable_reg = 0x5024,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_mmssnoc_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+               },
+       },
+};
+
+static struct clk_branch mmss_mmssnoc_bto_ahb_clk = {
+       .halt_reg = 0x5028,
+       .clkr = {
+               .enable_reg = 0x5028,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_mmssnoc_bto_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+               },
+       },
+};
+
+static struct clk_branch mmss_mmssnoc_axi_clk = {
+       .halt_reg = 0x506c,
+       .clkr = {
+               .enable_reg = 0x506c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_mmssnoc_axi_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_axi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch mmss_s0_axi_clk = {
+       .halt_reg = 0x5064,
+       .clkr = {
+               .enable_reg = 0x5064,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mmss_s0_axi_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_axi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .ops = &clk_branch2_ops,
+                       .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
+               },
+       },
+};
+
+static struct clk_branch ocmemcx_ahb_clk = {
+       .halt_reg = 0x405c,
+       .clkr = {
+               .enable_reg = 0x405c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ocmemcx_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch ocmemcx_ocmemnoc_clk = {
+       .halt_reg = 0x4058,
+       .clkr = {
+               .enable_reg = 0x4058,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "ocmemcx_ocmemnoc_clk",
+                       .parent_names = (const char *[]){
+                               "ocmemnoc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch oxili_ocmemgx_clk = {
+       .halt_reg = 0x402c,
+       .clkr = {
+               .enable_reg = 0x402c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "oxili_ocmemgx_clk",
+                       .parent_names = (const char *[]){
+                               "gfx3d_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch oxili_gfx3d_clk = {
+       .halt_reg = 0x4028,
+       .clkr = {
+               .enable_reg = 0x4028,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "oxili_gfx3d_clk",
+                       .parent_names = (const char *[]){
+                               "gfx3d_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch oxili_rbbmtimer_clk = {
+       .halt_reg = 0x40b0,
+       .clkr = {
+               .enable_reg = 0x40b0,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "oxili_rbbmtimer_clk",
+                       .parent_names = (const char *[]){
+                               "rbbmtimer_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch oxilicx_ahb_clk = {
+       .halt_reg = 0x403c,
+       .clkr = {
+               .enable_reg = 0x403c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "oxilicx_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch venus0_ahb_clk = {
+       .halt_reg = 0x1030,
+       .clkr = {
+               .enable_reg = 0x1030,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "venus0_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch venus0_axi_clk = {
+       .halt_reg = 0x1034,
+       .clkr = {
+               .enable_reg = 0x1034,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "venus0_axi_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_axi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch venus0_core0_vcodec_clk = {
+       .halt_reg = 0x1048,
+       .clkr = {
+               .enable_reg = 0x1048,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "venus0_core0_vcodec_clk",
+                       .parent_names = (const char *[]){
+                               "vcodec0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch venus0_core1_vcodec_clk = {
+       .halt_reg = 0x104c,
+       .clkr = {
+               .enable_reg = 0x104c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "venus0_core1_vcodec_clk",
+                       .parent_names = (const char *[]){
+                               "vcodec0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch venus0_ocmemnoc_clk = {
+       .halt_reg = 0x1038,
+       .clkr = {
+               .enable_reg = 0x1038,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "venus0_ocmemnoc_clk",
+                       .parent_names = (const char *[]){
+                               "ocmemnoc_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch venus0_vcodec0_clk = {
+       .halt_reg = 0x1028,
+       .clkr = {
+               .enable_reg = 0x1028,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "venus0_vcodec0_clk",
+                       .parent_names = (const char *[]){
+                               "vcodec0_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch vpu_ahb_clk = {
+       .halt_reg = 0x1430,
+       .clkr = {
+               .enable_reg = 0x1430,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "vpu_ahb_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_ahb_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch vpu_axi_clk = {
+       .halt_reg = 0x143c,
+       .clkr = {
+               .enable_reg = 0x143c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "vpu_axi_clk",
+                       .parent_names = (const char *[]){
+                               "mmss_axi_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch vpu_bus_clk = {
+       .halt_reg = 0x1440,
+       .clkr = {
+               .enable_reg = 0x1440,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "vpu_bus_clk",
+                       .parent_names = (const char *[]){
+                               "vpu_bus_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch vpu_cxo_clk = {
+       .halt_reg = 0x1434,
+       .clkr = {
+               .enable_reg = 0x1434,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "vpu_cxo_clk",
+                       .parent_names = (const char *[]){ "xo" },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch vpu_maple_clk = {
+       .halt_reg = 0x142c,
+       .clkr = {
+               .enable_reg = 0x142c,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "vpu_maple_clk",
+                       .parent_names = (const char *[]){
+                               "maple_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch vpu_sleep_clk = {
+       .halt_reg = 0x1438,
+       .clkr = {
+               .enable_reg = 0x1438,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "vpu_sleep_clk",
+                       .parent_names = (const char *[]){
+                               "sleep_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static struct clk_branch vpu_vdp_clk = {
+       .halt_reg = 0x1428,
+       .clkr = {
+               .enable_reg = 0x1428,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "vpu_vdp_clk",
+                       .parent_names = (const char *[]){
+                               "vdp_clk_src",
+                       },
+                       .num_parents = 1,
+                       .flags = CLK_SET_RATE_PARENT,
+                       .ops = &clk_branch2_ops,
+               },
+       },
+};
+
+static const struct pll_config mmpll1_config = {
+       .l = 60,
+       .m = 25,
+       .n = 32,
+       .vco_val = 0x0,
+       .vco_mask = 0x3 << 20,
+       .pre_div_val = 0x0,
+       .pre_div_mask = 0x7 << 12,
+       .post_div_val = 0x0,
+       .post_div_mask = 0x3 << 8,
+       .mn_ena_mask = BIT(24),
+       .main_output_mask = BIT(0),
+};
+
+static const struct pll_config mmpll3_config = {
+       .l = 48,
+       .m = 7,
+       .n = 16,
+       .vco_val = 0x0,
+       .vco_mask = 0x3 << 20,
+       .pre_div_val = 0x0,
+       .pre_div_mask = 0x7 << 12,
+       .post_div_val = 0x0,
+       .post_div_mask = 0x3 << 8,
+       .mn_ena_mask = BIT(24),
+       .main_output_mask = BIT(0),
+       .aux_output_mask = BIT(1),
+};
+
+static struct clk_regmap *mmcc_apq8084_clocks[] = {
+       [MMSS_AHB_CLK_SRC] = &mmss_ahb_clk_src.clkr,
+       [MMSS_AXI_CLK_SRC] = &mmss_axi_clk_src.clkr,
+       [MMPLL0] = &mmpll0.clkr,
+       [MMPLL0_VOTE] = &mmpll0_vote,
+       [MMPLL1] = &mmpll1.clkr,
+       [MMPLL1_VOTE] = &mmpll1_vote,
+       [MMPLL2] = &mmpll2.clkr,
+       [MMPLL3] = &mmpll3.clkr,
+       [MMPLL4] = &mmpll4.clkr,
+       [CSI0_CLK_SRC] = &csi0_clk_src.clkr,
+       [CSI1_CLK_SRC] = &csi1_clk_src.clkr,
+       [CSI2_CLK_SRC] = &csi2_clk_src.clkr,
+       [CSI3_CLK_SRC] = &csi3_clk_src.clkr,
+       [VCODEC0_CLK_SRC] = &vcodec0_clk_src.clkr,
+       [VFE0_CLK_SRC] = &vfe0_clk_src.clkr,
+       [VFE1_CLK_SRC] = &vfe1_clk_src.clkr,
+       [MDP_CLK_SRC] = &mdp_clk_src.clkr,
+       [PCLK0_CLK_SRC] = &pclk0_clk_src.clkr,
+       [PCLK1_CLK_SRC] = &pclk1_clk_src.clkr,
+       [OCMEMNOC_CLK_SRC] = &ocmemnoc_clk_src.clkr,
+       [GFX3D_CLK_SRC] = &gfx3d_clk_src.clkr,
+       [JPEG0_CLK_SRC] = &jpeg0_clk_src.clkr,
+       [JPEG1_CLK_SRC] = &jpeg1_clk_src.clkr,
+       [JPEG2_CLK_SRC] = &jpeg2_clk_src.clkr,
+       [EDPPIXEL_CLK_SRC] = &edppixel_clk_src.clkr,
+       [EXTPCLK_CLK_SRC] = &extpclk_clk_src.clkr,
+       [VP_CLK_SRC] = &vp_clk_src.clkr,
+       [CCI_CLK_SRC] = &cci_clk_src.clkr,
+       [CAMSS_GP0_CLK_SRC] = &camss_gp0_clk_src.clkr,
+       [CAMSS_GP1_CLK_SRC] = &camss_gp1_clk_src.clkr,
+       [MCLK0_CLK_SRC] = &mclk0_clk_src.clkr,
+       [MCLK1_CLK_SRC] = &mclk1_clk_src.clkr,
+       [MCLK2_CLK_SRC] = &mclk2_clk_src.clkr,
+       [MCLK3_CLK_SRC] = &mclk3_clk_src.clkr,
+       [CSI0PHYTIMER_CLK_SRC] = &csi0phytimer_clk_src.clkr,
+       [CSI1PHYTIMER_CLK_SRC] = &csi1phytimer_clk_src.clkr,
+       [CSI2PHYTIMER_CLK_SRC] = &csi2phytimer_clk_src.clkr,
+       [CPP_CLK_SRC] = &cpp_clk_src.clkr,
+       [BYTE0_CLK_SRC] = &byte0_clk_src.clkr,
+       [BYTE1_CLK_SRC] = &byte1_clk_src.clkr,
+       [EDPAUX_CLK_SRC] = &edpaux_clk_src.clkr,
+       [EDPLINK_CLK_SRC] = &edplink_clk_src.clkr,
+       [ESC0_CLK_SRC] = &esc0_clk_src.clkr,
+       [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
+       [HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
+       [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
+       [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
+       [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
+       [MAPLE_CLK_SRC] = &maple_clk_src.clkr,
+       [VDP_CLK_SRC] = &vdp_clk_src.clkr,
+       [VPU_BUS_CLK_SRC] = &vpu_bus_clk_src.clkr,
+       [MMSS_CXO_CLK] = &mmss_cxo_clk.clkr,
+       [MMSS_SLEEPCLK_CLK] = &mmss_sleepclk_clk.clkr,
+       [AVSYNC_AHB_CLK] = &avsync_ahb_clk.clkr,
+       [AVSYNC_EDPPIXEL_CLK] = &avsync_edppixel_clk.clkr,
+       [AVSYNC_EXTPCLK_CLK] = &avsync_extpclk_clk.clkr,
+       [AVSYNC_PCLK0_CLK] = &avsync_pclk0_clk.clkr,
+       [AVSYNC_PCLK1_CLK] = &avsync_pclk1_clk.clkr,
+       [AVSYNC_VP_CLK] = &avsync_vp_clk.clkr,
+       [CAMSS_AHB_CLK] = &camss_ahb_clk.clkr,
+       [CAMSS_CCI_CCI_AHB_CLK] = &camss_cci_cci_ahb_clk.clkr,
+       [CAMSS_CCI_CCI_CLK] = &camss_cci_cci_clk.clkr,
+       [CAMSS_CSI0_AHB_CLK] = &camss_csi0_ahb_clk.clkr,
+       [CAMSS_CSI0_CLK] = &camss_csi0_clk.clkr,
+       [CAMSS_CSI0PHY_CLK] = &camss_csi0phy_clk.clkr,
+       [CAMSS_CSI0PIX_CLK] = &camss_csi0pix_clk.clkr,
+       [CAMSS_CSI0RDI_CLK] = &camss_csi0rdi_clk.clkr,
+       [CAMSS_CSI1_AHB_CLK] = &camss_csi1_ahb_clk.clkr,
+       [CAMSS_CSI1_CLK] = &camss_csi1_clk.clkr,
+       [CAMSS_CSI1PHY_CLK] = &camss_csi1phy_clk.clkr,
+       [CAMSS_CSI1PIX_CLK] = &camss_csi1pix_clk.clkr,
+       [CAMSS_CSI1RDI_CLK] = &camss_csi1rdi_clk.clkr,
+       [CAMSS_CSI2_AHB_CLK] = &camss_csi2_ahb_clk.clkr,
+       [CAMSS_CSI2_CLK] = &camss_csi2_clk.clkr,
+       [CAMSS_CSI2PHY_CLK] = &camss_csi2phy_clk.clkr,
+       [CAMSS_CSI2PIX_CLK] = &camss_csi2pix_clk.clkr,
+       [CAMSS_CSI2RDI_CLK] = &camss_csi2rdi_clk.clkr,
+       [CAMSS_CSI3_AHB_CLK] = &camss_csi3_ahb_clk.clkr,
+       [CAMSS_CSI3_CLK] = &camss_csi3_clk.clkr,
+       [CAMSS_CSI3PHY_CLK] = &camss_csi3phy_clk.clkr,
+       [CAMSS_CSI3PIX_CLK] = &camss_csi3pix_clk.clkr,
+       [CAMSS_CSI3RDI_CLK] = &camss_csi3rdi_clk.clkr,
+       [CAMSS_CSI_VFE0_CLK] = &camss_csi_vfe0_clk.clkr,
+       [CAMSS_CSI_VFE1_CLK] = &camss_csi_vfe1_clk.clkr,
+       [CAMSS_GP0_CLK] = &camss_gp0_clk.clkr,
+       [CAMSS_GP1_CLK] = &camss_gp1_clk.clkr,
+       [CAMSS_ISPIF_AHB_CLK] = &camss_ispif_ahb_clk.clkr,
+       [CAMSS_JPEG_JPEG0_CLK] = &camss_jpeg_jpeg0_clk.clkr,
+       [CAMSS_JPEG_JPEG1_CLK] = &camss_jpeg_jpeg1_clk.clkr,
+       [CAMSS_JPEG_JPEG2_CLK] = &camss_jpeg_jpeg2_clk.clkr,
+       [CAMSS_JPEG_JPEG_AHB_CLK] = &camss_jpeg_jpeg_ahb_clk.clkr,
+       [CAMSS_JPEG_JPEG_AXI_CLK] = &camss_jpeg_jpeg_axi_clk.clkr,
+       [CAMSS_MCLK0_CLK] = &camss_mclk0_clk.clkr,
+       [CAMSS_MCLK1_CLK] = &camss_mclk1_clk.clkr,
+       [CAMSS_MCLK2_CLK] = &camss_mclk2_clk.clkr,
+       [CAMSS_MCLK3_CLK] = &camss_mclk3_clk.clkr,
+       [CAMSS_MICRO_AHB_CLK] = &camss_micro_ahb_clk.clkr,
+       [CAMSS_PHY0_CSI0PHYTIMER_CLK] = &camss_phy0_csi0phytimer_clk.clkr,
+       [CAMSS_PHY1_CSI1PHYTIMER_CLK] = &camss_phy1_csi1phytimer_clk.clkr,
+       [CAMSS_PHY2_CSI2PHYTIMER_CLK] = &camss_phy2_csi2phytimer_clk.clkr,
+       [CAMSS_TOP_AHB_CLK] = &camss_top_ahb_clk.clkr,
+       [CAMSS_VFE_CPP_AHB_CLK] = &camss_vfe_cpp_ahb_clk.clkr,
+       [CAMSS_VFE_CPP_CLK] = &camss_vfe_cpp_clk.clkr,
+       [CAMSS_VFE_VFE0_CLK] = &camss_vfe_vfe0_clk.clkr,
+       [CAMSS_VFE_VFE1_CLK] = &camss_vfe_vfe1_clk.clkr,
+       [CAMSS_VFE_VFE_AHB_CLK] = &camss_vfe_vfe_ahb_clk.clkr,
+       [CAMSS_VFE_VFE_AXI_CLK] = &camss_vfe_vfe_axi_clk.clkr,
+       [MDSS_AHB_CLK] = &mdss_ahb_clk.clkr,
+       [MDSS_AXI_CLK] = &mdss_axi_clk.clkr,
+       [MDSS_BYTE0_CLK] = &mdss_byte0_clk.clkr,
+       [MDSS_BYTE1_CLK] = &mdss_byte1_clk.clkr,
+       [MDSS_EDPAUX_CLK] = &mdss_edpaux_clk.clkr,
+       [MDSS_EDPLINK_CLK] = &mdss_edplink_clk.clkr,
+       [MDSS_EDPPIXEL_CLK] = &mdss_edppixel_clk.clkr,
+       [MDSS_ESC0_CLK] = &mdss_esc0_clk.clkr,
+       [MDSS_ESC1_CLK] = &mdss_esc1_clk.clkr,
+       [MDSS_EXTPCLK_CLK] = &mdss_extpclk_clk.clkr,
+       [MDSS_HDMI_AHB_CLK] = &mdss_hdmi_ahb_clk.clkr,
+       [MDSS_HDMI_CLK] = &mdss_hdmi_clk.clkr,
+       [MDSS_MDP_CLK] = &mdss_mdp_clk.clkr,
+       [MDSS_MDP_LUT_CLK] = &mdss_mdp_lut_clk.clkr,
+       [MDSS_PCLK0_CLK] = &mdss_pclk0_clk.clkr,
+       [MDSS_PCLK1_CLK] = &mdss_pclk1_clk.clkr,
+       [MDSS_VSYNC_CLK] = &mdss_vsync_clk.clkr,
+       [MMSS_RBCPR_AHB_CLK] = &mmss_rbcpr_ahb_clk.clkr,
+       [MMSS_RBCPR_CLK] = &mmss_rbcpr_clk.clkr,
+       [MMSS_SPDM_AHB_CLK] = &mmss_spdm_ahb_clk.clkr,
+       [MMSS_SPDM_AXI_CLK] = &mmss_spdm_axi_clk.clkr,
+       [MMSS_SPDM_CSI0_CLK] = &mmss_spdm_csi0_clk.clkr,
+       [MMSS_SPDM_GFX3D_CLK] = &mmss_spdm_gfx3d_clk.clkr,
+       [MMSS_SPDM_JPEG0_CLK] = &mmss_spdm_jpeg0_clk.clkr,
+       [MMSS_SPDM_JPEG1_CLK] = &mmss_spdm_jpeg1_clk.clkr,
+       [MMSS_SPDM_JPEG2_CLK] = &mmss_spdm_jpeg2_clk.clkr,
+       [MMSS_SPDM_MDP_CLK] = &mmss_spdm_mdp_clk.clkr,
+       [MMSS_SPDM_PCLK0_CLK] = &mmss_spdm_pclk0_clk.clkr,
+       [MMSS_SPDM_PCLK1_CLK] = &mmss_spdm_pclk1_clk.clkr,
+       [MMSS_SPDM_VCODEC0_CLK] = &mmss_spdm_vcodec0_clk.clkr,
+       [MMSS_SPDM_VFE0_CLK] = &mmss_spdm_vfe0_clk.clkr,
+       [MMSS_SPDM_VFE1_CLK] = &mmss_spdm_vfe1_clk.clkr,
+       [MMSS_SPDM_RM_AXI_CLK] = &mmss_spdm_rm_axi_clk.clkr,
+       [MMSS_SPDM_RM_OCMEMNOC_CLK] = &mmss_spdm_rm_ocmemnoc_clk.clkr,
+       [MMSS_MISC_AHB_CLK] = &mmss_misc_ahb_clk.clkr,
+       [MMSS_MMSSNOC_AHB_CLK] = &mmss_mmssnoc_ahb_clk.clkr,
+       [MMSS_MMSSNOC_BTO_AHB_CLK] = &mmss_mmssnoc_bto_ahb_clk.clkr,
+       [MMSS_MMSSNOC_AXI_CLK] = &mmss_mmssnoc_axi_clk.clkr,
+       [MMSS_S0_AXI_CLK] = &mmss_s0_axi_clk.clkr,
+       [OCMEMCX_AHB_CLK] = &ocmemcx_ahb_clk.clkr,
+       [OCMEMCX_OCMEMNOC_CLK] = &ocmemcx_ocmemnoc_clk.clkr,
+       [OXILI_OCMEMGX_CLK] = &oxili_ocmemgx_clk.clkr,
+       [OXILI_GFX3D_CLK] = &oxili_gfx3d_clk.clkr,
+       [OXILI_RBBMTIMER_CLK] = &oxili_rbbmtimer_clk.clkr,
+       [OXILICX_AHB_CLK] = &oxilicx_ahb_clk.clkr,
+       [VENUS0_AHB_CLK] = &venus0_ahb_clk.clkr,
+       [VENUS0_AXI_CLK] = &venus0_axi_clk.clkr,
+       [VENUS0_CORE0_VCODEC_CLK] = &venus0_core0_vcodec_clk.clkr,
+       [VENUS0_CORE1_VCODEC_CLK] = &venus0_core1_vcodec_clk.clkr,
+       [VENUS0_OCMEMNOC_CLK] = &venus0_ocmemnoc_clk.clkr,
+       [VENUS0_VCODEC0_CLK] = &venus0_vcodec0_clk.clkr,
+       [VPU_AHB_CLK] = &vpu_ahb_clk.clkr,
+       [VPU_AXI_CLK] = &vpu_axi_clk.clkr,
+       [VPU_BUS_CLK] = &vpu_bus_clk.clkr,
+       [VPU_CXO_CLK] = &vpu_cxo_clk.clkr,
+       [VPU_MAPLE_CLK] = &vpu_maple_clk.clkr,
+       [VPU_SLEEP_CLK] = &vpu_sleep_clk.clkr,
+       [VPU_VDP_CLK] = &vpu_vdp_clk.clkr,
+};
+
+static const struct qcom_reset_map mmcc_apq8084_resets[] = {
+       [MMSS_SPDM_RESET] = { 0x0200 },
+       [MMSS_SPDM_RM_RESET] = { 0x0300 },
+       [VENUS0_RESET] = { 0x1020 },
+       [VPU_RESET] = { 0x1400 },
+       [MDSS_RESET] = { 0x2300 },
+       [AVSYNC_RESET] = { 0x2400 },
+       [CAMSS_PHY0_RESET] = { 0x3020 },
+       [CAMSS_PHY1_RESET] = { 0x3050 },
+       [CAMSS_PHY2_RESET] = { 0x3080 },
+       [CAMSS_CSI0_RESET] = { 0x30b0 },
+       [CAMSS_CSI0PHY_RESET] = { 0x30c0 },
+       [CAMSS_CSI0RDI_RESET] = { 0x30d0 },
+       [CAMSS_CSI0PIX_RESET] = { 0x30e0 },
+       [CAMSS_CSI1_RESET] = { 0x3120 },
+       [CAMSS_CSI1PHY_RESET] = { 0x3130 },
+       [CAMSS_CSI1RDI_RESET] = { 0x3140 },
+       [CAMSS_CSI1PIX_RESET] = { 0x3150 },
+       [CAMSS_CSI2_RESET] = { 0x3180 },
+       [CAMSS_CSI2PHY_RESET] = { 0x3190 },
+       [CAMSS_CSI2RDI_RESET] = { 0x31a0 },
+       [CAMSS_CSI2PIX_RESET] = { 0x31b0 },
+       [CAMSS_CSI3_RESET] = { 0x31e0 },
+       [CAMSS_CSI3PHY_RESET] = { 0x31f0 },
+       [CAMSS_CSI3RDI_RESET] = { 0x3200 },
+       [CAMSS_CSI3PIX_RESET] = { 0x3210 },
+       [CAMSS_ISPIF_RESET] = { 0x3220 },
+       [CAMSS_CCI_RESET] = { 0x3340 },
+       [CAMSS_MCLK0_RESET] = { 0x3380 },
+       [CAMSS_MCLK1_RESET] = { 0x33b0 },
+       [CAMSS_MCLK2_RESET] = { 0x33e0 },
+       [CAMSS_MCLK3_RESET] = { 0x3410 },
+       [CAMSS_GP0_RESET] = { 0x3440 },
+       [CAMSS_GP1_RESET] = { 0x3470 },
+       [CAMSS_TOP_RESET] = { 0x3480 },
+       [CAMSS_AHB_RESET] = { 0x3488 },
+       [CAMSS_MICRO_RESET] = { 0x3490 },
+       [CAMSS_JPEG_RESET] = { 0x35a0 },
+       [CAMSS_VFE_RESET] = { 0x36a0 },
+       [CAMSS_CSI_VFE0_RESET] = { 0x3700 },
+       [CAMSS_CSI_VFE1_RESET] = { 0x3710 },
+       [OXILI_RESET] = { 0x4020 },
+       [OXILICX_RESET] = { 0x4030 },
+       [OCMEMCX_RESET] = { 0x4050 },
+       [MMSS_RBCRP_RESET] = { 0x4080 },
+       [MMSSNOCAHB_RESET] = { 0x5020 },
+       [MMSSNOCAXI_RESET] = { 0x5060 },
+};
+
+static const struct regmap_config mmcc_apq8084_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x5104,
+       .fast_io        = true,
+};
+
+static const struct qcom_cc_desc mmcc_apq8084_desc = {
+       .config = &mmcc_apq8084_regmap_config,
+       .clks = mmcc_apq8084_clocks,
+       .num_clks = ARRAY_SIZE(mmcc_apq8084_clocks),
+       .resets = mmcc_apq8084_resets,
+       .num_resets = ARRAY_SIZE(mmcc_apq8084_resets),
+};
+
+static const struct of_device_id mmcc_apq8084_match_table[] = {
+       { .compatible = "qcom,mmcc-apq8084" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, mmcc_apq8084_match_table);
+
+static int mmcc_apq8084_probe(struct platform_device *pdev)
+{
+       int ret;
+       struct regmap *regmap;
+
+       ret = qcom_cc_probe(pdev, &mmcc_apq8084_desc);
+       if (ret)
+               return ret;
+
+       regmap = dev_get_regmap(&pdev->dev, NULL);
+       clk_pll_configure_sr_hpm_lp(&mmpll1, regmap, &mmpll1_config, true);
+       clk_pll_configure_sr_hpm_lp(&mmpll3, regmap, &mmpll3_config, false);
+
+       return 0;
+}
+
+static int mmcc_apq8084_remove(struct platform_device *pdev)
+{
+       qcom_cc_remove(pdev);
+       return 0;
+}
+
+static struct platform_driver mmcc_apq8084_driver = {
+       .probe          = mmcc_apq8084_probe,
+       .remove         = mmcc_apq8084_remove,
+       .driver         = {
+               .name   = "mmcc-apq8084",
+               .owner  = THIS_MODULE,
+               .of_match_table = mmcc_apq8084_match_table,
+       },
+};
+module_platform_driver(mmcc_apq8084_driver);
+
+MODULE_DESCRIPTION("QCOM MMCC APQ8084 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:mmcc-apq8084");
index 12f3c0b..2e80a21 100644 (file)
@@ -37,6 +37,9 @@
 #define P_PLL8 1
 #define P_PLL2 2
 #define P_PLL3 3
+#define P_PLL15        3
+
+#define F_MN(f, s, _m, _n) { .freq = f, .src = s, .m = _m, .n = _n }
 
 static u8 mmcc_pxo_pll8_pll2_map[] = {
        [P_PXO]         = 0,
@@ -57,10 +60,24 @@ static u8 mmcc_pxo_pll8_pll2_pll3_map[] = {
        [P_PLL3]        = 3,
 };
 
-static const char *mmcc_pxo_pll8_pll2_pll3[] = {
+static const char *mmcc_pxo_pll8_pll2_pll15[] = {
        "pxo",
+       "pll8_vote",
        "pll2",
+       "pll15",
+};
+
+static u8 mmcc_pxo_pll8_pll2_pll15_map[] = {
+       [P_PXO]         = 0,
+       [P_PLL8]        = 2,
+       [P_PLL2]        = 1,
+       [P_PLL15]       = 3,
+};
+
+static const char *mmcc_pxo_pll8_pll2_pll3[] = {
+       "pxo",
        "pll8_vote",
+       "pll2",
        "pll3",
 };
 
@@ -80,6 +97,36 @@ static struct clk_pll pll2 = {
        },
 };
 
+static struct clk_pll pll15 = {
+       .l_reg = 0x33c,
+       .m_reg = 0x340,
+       .n_reg = 0x344,
+       .config_reg = 0x348,
+       .mode_reg = 0x338,
+       .status_reg = 0x350,
+       .status_bit = 16,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pll15",
+               .parent_names = (const char *[]){ "pxo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static const struct pll_config pll15_config = {
+       .l = 33,
+       .m = 1,
+       .n = 3,
+       .vco_val = 0x2 << 16,
+       .vco_mask = 0x3 << 16,
+       .pre_div_val = 0x0,
+       .pre_div_mask = BIT(19),
+       .post_div_val = 0x0,
+       .post_div_mask = 0x3 << 20,
+       .mn_ena_mask = BIT(22),
+       .main_output_mask = BIT(23),
+};
+
 static struct freq_tbl clk_tbl_cam[] = {
        {   6000000, P_PLL8, 4, 1, 16 },
        {   8000000, P_PLL8, 4, 1, 12 },
@@ -710,18 +757,18 @@ static struct clk_branch csiphy2_timer_clk = {
 };
 
 static struct freq_tbl clk_tbl_gfx2d[] = {
-       {  27000000, P_PXO,  1,  0 },
-       {  48000000, P_PLL8, 1,  8 },
-       {  54857000, P_PLL8, 1,  7 },
-       {  64000000, P_PLL8, 1,  6 },
-       {  76800000, P_PLL8, 1,  5 },
-       {  96000000, P_PLL8, 1,  4 },
-       { 128000000, P_PLL8, 1,  3 },
-       { 145455000, P_PLL2, 2, 11 },
-       { 160000000, P_PLL2, 1,  5 },
-       { 177778000, P_PLL2, 2,  9 },
-       { 200000000, P_PLL2, 1,  4 },
-       { 228571000, P_PLL2, 2,  7 },
+       F_MN( 27000000, P_PXO,  1,  0),
+       F_MN( 48000000, P_PLL8, 1,  8),
+       F_MN( 54857000, P_PLL8, 1,  7),
+       F_MN( 64000000, P_PLL8, 1,  6),
+       F_MN( 76800000, P_PLL8, 1,  5),
+       F_MN( 96000000, P_PLL8, 1,  4),
+       F_MN(128000000, P_PLL8, 1,  3),
+       F_MN(145455000, P_PLL2, 2, 11),
+       F_MN(160000000, P_PLL2, 1,  5),
+       F_MN(177778000, P_PLL2, 2,  9),
+       F_MN(200000000, P_PLL2, 1,  4),
+       F_MN(228571000, P_PLL2, 2,  7),
        { }
 };
 
@@ -842,22 +889,43 @@ static struct clk_branch gfx2d1_clk = {
 };
 
 static struct freq_tbl clk_tbl_gfx3d[] = {
-       {  27000000, P_PXO,  1,  0 },
-       {  48000000, P_PLL8, 1,  8 },
-       {  54857000, P_PLL8, 1,  7 },
-       {  64000000, P_PLL8, 1,  6 },
-       {  76800000, P_PLL8, 1,  5 },
-       {  96000000, P_PLL8, 1,  4 },
-       { 128000000, P_PLL8, 1,  3 },
-       { 145455000, P_PLL2, 2, 11 },
-       { 160000000, P_PLL2, 1,  5 },
-       { 177778000, P_PLL2, 2,  9 },
-       { 200000000, P_PLL2, 1,  4 },
-       { 228571000, P_PLL2, 2,  7 },
-       { 266667000, P_PLL2, 1,  3 },
-       { 300000000, P_PLL3, 1,  4 },
-       { 320000000, P_PLL2, 2,  5 },
-       { 400000000, P_PLL2, 1,  2 },
+       F_MN( 27000000, P_PXO,  1,  0),
+       F_MN( 48000000, P_PLL8, 1,  8),
+       F_MN( 54857000, P_PLL8, 1,  7),
+       F_MN( 64000000, P_PLL8, 1,  6),
+       F_MN( 76800000, P_PLL8, 1,  5),
+       F_MN( 96000000, P_PLL8, 1,  4),
+       F_MN(128000000, P_PLL8, 1,  3),
+       F_MN(145455000, P_PLL2, 2, 11),
+       F_MN(160000000, P_PLL2, 1,  5),
+       F_MN(177778000, P_PLL2, 2,  9),
+       F_MN(200000000, P_PLL2, 1,  4),
+       F_MN(228571000, P_PLL2, 2,  7),
+       F_MN(266667000, P_PLL2, 1,  3),
+       F_MN(300000000, P_PLL3, 1,  4),
+       F_MN(320000000, P_PLL2, 2,  5),
+       F_MN(400000000, P_PLL2, 1,  2),
+       { }
+};
+
+static struct freq_tbl clk_tbl_gfx3d_8064[] = {
+       F_MN( 27000000, P_PXO,   0,  0),
+       F_MN( 48000000, P_PLL8,  1,  8),
+       F_MN( 54857000, P_PLL8,  1,  7),
+       F_MN( 64000000, P_PLL8,  1,  6),
+       F_MN( 76800000, P_PLL8,  1,  5),
+       F_MN( 96000000, P_PLL8,  1,  4),
+       F_MN(128000000, P_PLL8,  1,  3),
+       F_MN(145455000, P_PLL2,  2, 11),
+       F_MN(160000000, P_PLL2,  1,  5),
+       F_MN(177778000, P_PLL2,  2,  9),
+       F_MN(192000000, P_PLL8,  1,  2),
+       F_MN(200000000, P_PLL2,  1,  4),
+       F_MN(228571000, P_PLL2,  2,  7),
+       F_MN(266667000, P_PLL2,  1,  3),
+       F_MN(320000000, P_PLL2,  2,  5),
+       F_MN(400000000, P_PLL2,  1,  2),
+       F_MN(450000000, P_PLL15, 1,  2),
        { }
 };
 
@@ -897,12 +965,19 @@ static struct clk_dyn_rcg gfx3d_src = {
                .hw.init = &(struct clk_init_data){
                        .name = "gfx3d_src",
                        .parent_names = mmcc_pxo_pll8_pll2_pll3,
-                       .num_parents = 3,
+                       .num_parents = 4,
                        .ops = &clk_dyn_rcg_ops,
                },
        },
 };
 
+static const struct clk_init_data gfx3d_8064_init = {
+       .name = "gfx3d_src",
+       .parent_names = mmcc_pxo_pll8_pll2_pll15,
+       .num_parents = 4,
+       .ops = &clk_dyn_rcg_ops,
+};
+
 static struct clk_branch gfx3d_clk = {
        .halt_reg = 0x01c8,
        .halt_bit = 4,
@@ -919,6 +994,91 @@ static struct clk_branch gfx3d_clk = {
        },
 };
 
+static struct freq_tbl clk_tbl_vcap[] = {
+       F_MN( 27000000, P_PXO,  0,  0),
+       F_MN( 54860000, P_PLL8, 1,  7),
+       F_MN( 64000000, P_PLL8, 1,  6),
+       F_MN( 76800000, P_PLL8, 1,  5),
+       F_MN(128000000, P_PLL8, 1,  3),
+       F_MN(160000000, P_PLL2, 1,  5),
+       F_MN(200000000, P_PLL2, 1,  4),
+       { }
+};
+
+static struct clk_dyn_rcg vcap_src = {
+       .ns_reg = 0x021c,
+       .md_reg[0] = 0x01ec,
+       .md_reg[1] = 0x0218,
+       .mn[0] = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 23,
+               .mnctr_mode_shift = 9,
+               .n_val_shift = 18,
+               .m_val_shift = 4,
+               .width = 4,
+       },
+       .mn[1] = {
+               .mnctr_en_bit = 5,
+               .mnctr_reset_bit = 22,
+               .mnctr_mode_shift = 6,
+               .n_val_shift = 14,
+               .m_val_shift = 4,
+               .width = 4,
+       },
+       .s[0] = {
+               .src_sel_shift = 3,
+               .parent_map = mmcc_pxo_pll8_pll2_map,
+       },
+       .s[1] = {
+               .src_sel_shift = 0,
+               .parent_map = mmcc_pxo_pll8_pll2_map,
+       },
+       .mux_sel_bit = 11,
+       .freq_tbl = clk_tbl_vcap,
+       .clkr = {
+               .enable_reg = 0x0178,
+               .enable_mask = BIT(2),
+               .hw.init = &(struct clk_init_data){
+                       .name = "vcap_src",
+                       .parent_names = mmcc_pxo_pll8_pll2,
+                       .num_parents = 3,
+                       .ops = &clk_dyn_rcg_ops,
+               },
+       },
+};
+
+static struct clk_branch vcap_clk = {
+       .halt_reg = 0x0240,
+       .halt_bit = 15,
+       .clkr = {
+               .enable_reg = 0x0178,
+               .enable_mask = BIT(0),
+               .hw.init = &(struct clk_init_data){
+                       .name = "vcap_clk",
+                       .parent_names = (const char *[]){ "vcap_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch vcap_npl_clk = {
+       .halt_reg = 0x0240,
+       .halt_bit = 25,
+       .clkr = {
+               .enable_reg = 0x0178,
+               .enable_mask = BIT(13),
+               .hw.init = &(struct clk_init_data){
+                       .name = "vcap_npl_clk",
+                       .parent_names = (const char *[]){ "vcap_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
 static struct freq_tbl clk_tbl_ijpeg[] = {
        {  27000000, P_PXO,  1, 0,  0 },
        {  36570000, P_PLL8, 1, 2, 21 },
@@ -995,7 +1155,7 @@ static struct clk_rcg jpegd_src = {
        .ns_reg = 0x00ac,
        .p = {
                .pre_div_shift = 12,
-               .pre_div_width = 2,
+               .pre_div_width = 4,
        },
        .s = {
                .src_sel_shift = 0,
@@ -1115,7 +1275,7 @@ static struct clk_branch mdp_lut_clk = {
                .enable_reg = 0x016c,
                .enable_mask = BIT(0),
                .hw.init = &(struct clk_init_data){
-                       .parent_names = (const char *[]){ "mdp_clk" },
+                       .parent_names = (const char *[]){ "mdp_src" },
                        .num_parents = 1,
                        .name = "mdp_lut_clk",
                        .ops = &clk_branch_ops,
@@ -1209,7 +1369,7 @@ static struct clk_branch rot_clk = {
 
 static u8 mmcc_pxo_hdmi_map[] = {
        [P_PXO]         = 0,
-       [P_HDMI_PLL]    = 2,
+       [P_HDMI_PLL]    = 3,
 };
 
 static const char *mmcc_pxo_hdmi[] = {
@@ -1218,12 +1378,7 @@ static const char *mmcc_pxo_hdmi[] = {
 };
 
 static struct freq_tbl clk_tbl_tv[] = {
-       {  25200000, P_HDMI_PLL, 1, 0, 0 },
-       {  27000000, P_HDMI_PLL, 1, 0, 0 },
-       {  27030000, P_HDMI_PLL, 1, 0, 0 },
-       {  74250000, P_HDMI_PLL, 1, 0, 0 },
-       { 108000000, P_HDMI_PLL, 1, 0, 0 },
-       { 148500000, P_HDMI_PLL, 1, 0, 0 },
+       {  .src = P_HDMI_PLL, .pre_div = 1 },
        { }
 };
 
@@ -1254,7 +1409,7 @@ static struct clk_rcg tv_src = {
                        .name = "tv_src",
                        .parent_names = mmcc_pxo_hdmi,
                        .num_parents = 2,
-                       .ops = &clk_rcg_ops,
+                       .ops = &clk_rcg_bypass_ops,
                        .flags = CLK_SET_RATE_PARENT,
                },
        },
@@ -1326,6 +1481,38 @@ static struct clk_branch hdmi_tv_clk = {
        },
 };
 
+static struct clk_branch rgb_tv_clk = {
+       .halt_reg = 0x0240,
+       .halt_bit = 27,
+       .clkr = {
+               .enable_reg = 0x0124,
+               .enable_mask = BIT(14),
+               .hw.init = &(struct clk_init_data){
+                       .parent_names = tv_src_name,
+                       .num_parents = 1,
+                       .name = "rgb_tv_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch npl_tv_clk = {
+       .halt_reg = 0x0240,
+       .halt_bit = 26,
+       .clkr = {
+               .enable_reg = 0x0124,
+               .enable_mask = BIT(16),
+               .hw.init = &(struct clk_init_data){
+                       .parent_names = tv_src_name,
+                       .num_parents = 1,
+                       .name = "npl_tv_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
 static struct clk_branch hdmi_app_clk = {
        .halt_reg = 0x01cc,
        .halt_bit = 25,
@@ -1342,15 +1529,15 @@ static struct clk_branch hdmi_app_clk = {
 };
 
 static struct freq_tbl clk_tbl_vcodec[] = {
-       {  27000000, P_PXO,  1,  0 },
-       {  32000000, P_PLL8, 1, 12 },
-       {  48000000, P_PLL8, 1,  8 },
-       {  54860000, P_PLL8, 1,  7 },
-       {  96000000, P_PLL8, 1,  4 },
-       { 133330000, P_PLL2, 1,  6 },
-       { 200000000, P_PLL2, 1,  4 },
-       { 228570000, P_PLL2, 2,  7 },
-       { 266670000, P_PLL2, 1,  3 },
+       F_MN( 27000000, P_PXO,  1,  0),
+       F_MN( 32000000, P_PLL8, 1, 12),
+       F_MN( 48000000, P_PLL8, 1,  8),
+       F_MN( 54860000, P_PLL8, 1,  7),
+       F_MN( 96000000, P_PLL8, 1,  4),
+       F_MN(133330000, P_PLL2, 1,  6),
+       F_MN(200000000, P_PLL2, 1,  4),
+       F_MN(228570000, P_PLL2, 2,  7),
+       F_MN(266670000, P_PLL2, 1,  3),
        { }
 };
 
@@ -1701,6 +1888,22 @@ static struct clk_branch rot_axi_clk = {
        },
 };
 
+static struct clk_branch vcap_axi_clk = {
+       .halt_reg = 0x0240,
+       .halt_bit = 20,
+       .hwcg_reg = 0x0244,
+       .hwcg_bit = 11,
+       .clkr = {
+               .enable_reg = 0x0244,
+               .enable_mask = BIT(12),
+               .hw.init = &(struct clk_init_data){
+                       .name = "vcap_axi_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
 static struct clk_branch vpe_axi_clk = {
        .hwcg_reg = 0x0020,
        .hwcg_bit = 27,
@@ -2003,6 +2206,20 @@ static struct clk_branch tv_enc_ahb_clk = {
        },
 };
 
+static struct clk_branch vcap_ahb_clk = {
+       .halt_reg = 0x0240,
+       .halt_bit = 23,
+       .clkr = {
+               .enable_reg = 0x0248,
+               .enable_mask = BIT(1),
+               .hw.init = &(struct clk_init_data){
+                       .name = "vcap_ahb_clk",
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_IS_ROOT,
+               },
+       },
+};
+
 static struct clk_branch vcodec_ahb_clk = {
        .hwcg_reg = 0x0038,
        .hwcg_bit = 26,
@@ -2215,6 +2432,175 @@ static const struct qcom_reset_map mmcc_msm8960_resets[] = {
        [CSI_RDI2_RESET] = { 0x0214 },
 };
 
+static struct clk_regmap *mmcc_apq8064_clks[] = {
+       [AMP_AHB_CLK] = &amp_ahb_clk.clkr,
+       [DSI2_S_AHB_CLK] = &dsi2_s_ahb_clk.clkr,
+       [JPEGD_AHB_CLK] = &jpegd_ahb_clk.clkr,
+       [DSI_S_AHB_CLK] = &dsi_s_ahb_clk.clkr,
+       [DSI2_M_AHB_CLK] = &dsi2_m_ahb_clk.clkr,
+       [VPE_AHB_CLK] = &vpe_ahb_clk.clkr,
+       [SMMU_AHB_CLK] = &smmu_ahb_clk.clkr,
+       [HDMI_M_AHB_CLK] = &hdmi_m_ahb_clk.clkr,
+       [VFE_AHB_CLK] = &vfe_ahb_clk.clkr,
+       [ROT_AHB_CLK] = &rot_ahb_clk.clkr,
+       [VCODEC_AHB_CLK] = &vcodec_ahb_clk.clkr,
+       [MDP_AHB_CLK] = &mdp_ahb_clk.clkr,
+       [DSI_M_AHB_CLK] = &dsi_m_ahb_clk.clkr,
+       [CSI_AHB_CLK] = &csi_ahb_clk.clkr,
+       [MMSS_IMEM_AHB_CLK] = &mmss_imem_ahb_clk.clkr,
+       [IJPEG_AHB_CLK] = &ijpeg_ahb_clk.clkr,
+       [HDMI_S_AHB_CLK] = &hdmi_s_ahb_clk.clkr,
+       [GFX3D_AHB_CLK] = &gfx3d_ahb_clk.clkr,
+       [JPEGD_AXI_CLK] = &jpegd_axi_clk.clkr,
+       [GMEM_AXI_CLK] = &gmem_axi_clk.clkr,
+       [MDP_AXI_CLK] = &mdp_axi_clk.clkr,
+       [MMSS_IMEM_AXI_CLK] = &mmss_imem_axi_clk.clkr,
+       [IJPEG_AXI_CLK] = &ijpeg_axi_clk.clkr,
+       [GFX3D_AXI_CLK] = &gfx3d_axi_clk.clkr,
+       [VCODEC_AXI_CLK] = &vcodec_axi_clk.clkr,
+       [VFE_AXI_CLK] = &vfe_axi_clk.clkr,
+       [VPE_AXI_CLK] = &vpe_axi_clk.clkr,
+       [ROT_AXI_CLK] = &rot_axi_clk.clkr,
+       [VCODEC_AXI_A_CLK] = &vcodec_axi_a_clk.clkr,
+       [VCODEC_AXI_B_CLK] = &vcodec_axi_b_clk.clkr,
+       [CSI0_SRC] = &csi0_src.clkr,
+       [CSI0_CLK] = &csi0_clk.clkr,
+       [CSI0_PHY_CLK] = &csi0_phy_clk.clkr,
+       [CSI1_SRC] = &csi1_src.clkr,
+       [CSI1_CLK] = &csi1_clk.clkr,
+       [CSI1_PHY_CLK] = &csi1_phy_clk.clkr,
+       [CSI2_SRC] = &csi2_src.clkr,
+       [CSI2_CLK] = &csi2_clk.clkr,
+       [CSI2_PHY_CLK] = &csi2_phy_clk.clkr,
+       [CSI_PIX_CLK] = &csi_pix_clk.clkr,
+       [CSI_RDI_CLK] = &csi_rdi_clk.clkr,
+       [MDP_VSYNC_CLK] = &mdp_vsync_clk.clkr,
+       [HDMI_APP_CLK] = &hdmi_app_clk.clkr,
+       [CSI_PIX1_CLK] = &csi_pix1_clk.clkr,
+       [CSI_RDI2_CLK] = &csi_rdi2_clk.clkr,
+       [CSI_RDI1_CLK] = &csi_rdi1_clk.clkr,
+       [GFX3D_SRC] = &gfx3d_src.clkr,
+       [GFX3D_CLK] = &gfx3d_clk.clkr,
+       [IJPEG_SRC] = &ijpeg_src.clkr,
+       [IJPEG_CLK] = &ijpeg_clk.clkr,
+       [JPEGD_SRC] = &jpegd_src.clkr,
+       [JPEGD_CLK] = &jpegd_clk.clkr,
+       [MDP_SRC] = &mdp_src.clkr,
+       [MDP_CLK] = &mdp_clk.clkr,
+       [MDP_LUT_CLK] = &mdp_lut_clk.clkr,
+       [ROT_SRC] = &rot_src.clkr,
+       [ROT_CLK] = &rot_clk.clkr,
+       [TV_DAC_CLK] = &tv_dac_clk.clkr,
+       [HDMI_TV_CLK] = &hdmi_tv_clk.clkr,
+       [MDP_TV_CLK] = &mdp_tv_clk.clkr,
+       [TV_SRC] = &tv_src.clkr,
+       [VCODEC_SRC] = &vcodec_src.clkr,
+       [VCODEC_CLK] = &vcodec_clk.clkr,
+       [VFE_SRC] = &vfe_src.clkr,
+       [VFE_CLK] = &vfe_clk.clkr,
+       [VFE_CSI_CLK] = &vfe_csi_clk.clkr,
+       [VPE_SRC] = &vpe_src.clkr,
+       [VPE_CLK] = &vpe_clk.clkr,
+       [CAMCLK0_SRC] = &camclk0_src.clkr,
+       [CAMCLK0_CLK] = &camclk0_clk.clkr,
+       [CAMCLK1_SRC] = &camclk1_src.clkr,
+       [CAMCLK1_CLK] = &camclk1_clk.clkr,
+       [CAMCLK2_SRC] = &camclk2_src.clkr,
+       [CAMCLK2_CLK] = &camclk2_clk.clkr,
+       [CSIPHYTIMER_SRC] = &csiphytimer_src.clkr,
+       [CSIPHY2_TIMER_CLK] = &csiphy2_timer_clk.clkr,
+       [CSIPHY1_TIMER_CLK] = &csiphy1_timer_clk.clkr,
+       [CSIPHY0_TIMER_CLK] = &csiphy0_timer_clk.clkr,
+       [PLL2] = &pll2.clkr,
+       [RGB_TV_CLK] = &rgb_tv_clk.clkr,
+       [NPL_TV_CLK] = &npl_tv_clk.clkr,
+       [VCAP_AHB_CLK] = &vcap_ahb_clk.clkr,
+       [VCAP_AXI_CLK] = &vcap_axi_clk.clkr,
+       [VCAP_SRC] = &vcap_src.clkr,
+       [VCAP_CLK] = &vcap_clk.clkr,
+       [VCAP_NPL_CLK] = &vcap_npl_clk.clkr,
+       [PLL15] = &pll15.clkr,
+};
+
+static const struct qcom_reset_map mmcc_apq8064_resets[] = {
+       [GFX3D_AXI_RESET] = { 0x0208, 17 },
+       [VCAP_AXI_RESET] = { 0x0208, 16 },
+       [VPE_AXI_RESET] = { 0x0208, 15 },
+       [IJPEG_AXI_RESET] = { 0x0208, 14 },
+       [MPD_AXI_RESET] = { 0x0208, 13 },
+       [VFE_AXI_RESET] = { 0x0208, 9 },
+       [SP_AXI_RESET] = { 0x0208, 8 },
+       [VCODEC_AXI_RESET] = { 0x0208, 7 },
+       [ROT_AXI_RESET] = { 0x0208, 6 },
+       [VCODEC_AXI_A_RESET] = { 0x0208, 5 },
+       [VCODEC_AXI_B_RESET] = { 0x0208, 4 },
+       [FAB_S3_AXI_RESET] = { 0x0208, 3 },
+       [FAB_S2_AXI_RESET] = { 0x0208, 2 },
+       [FAB_S1_AXI_RESET] = { 0x0208, 1 },
+       [FAB_S0_AXI_RESET] = { 0x0208 },
+       [SMMU_GFX3D_ABH_RESET] = { 0x020c, 31 },
+       [SMMU_VPE_AHB_RESET] = { 0x020c, 30 },
+       [SMMU_VFE_AHB_RESET] = { 0x020c, 29 },
+       [SMMU_ROT_AHB_RESET] = { 0x020c, 28 },
+       [SMMU_VCODEC_B_AHB_RESET] = { 0x020c, 27 },
+       [SMMU_VCODEC_A_AHB_RESET] = { 0x020c, 26 },
+       [SMMU_MDP1_AHB_RESET] = { 0x020c, 25 },
+       [SMMU_MDP0_AHB_RESET] = { 0x020c, 24 },
+       [SMMU_JPEGD_AHB_RESET] = { 0x020c, 23 },
+       [SMMU_IJPEG_AHB_RESET] = { 0x020c, 22 },
+       [APU_AHB_RESET] = { 0x020c, 18 },
+       [CSI_AHB_RESET] = { 0x020c, 17 },
+       [TV_ENC_AHB_RESET] = { 0x020c, 15 },
+       [VPE_AHB_RESET] = { 0x020c, 14 },
+       [FABRIC_AHB_RESET] = { 0x020c, 13 },
+       [GFX3D_AHB_RESET] = { 0x020c, 10 },
+       [HDMI_AHB_RESET] = { 0x020c, 9 },
+       [MSSS_IMEM_AHB_RESET] = { 0x020c, 8 },
+       [IJPEG_AHB_RESET] = { 0x020c, 7 },
+       [DSI_M_AHB_RESET] = { 0x020c, 6 },
+       [DSI_S_AHB_RESET] = { 0x020c, 5 },
+       [JPEGD_AHB_RESET] = { 0x020c, 4 },
+       [MDP_AHB_RESET] = { 0x020c, 3 },
+       [ROT_AHB_RESET] = { 0x020c, 2 },
+       [VCODEC_AHB_RESET] = { 0x020c, 1 },
+       [VFE_AHB_RESET] = { 0x020c, 0 },
+       [SMMU_VCAP_AHB_RESET] = { 0x0200, 3 },
+       [VCAP_AHB_RESET] = { 0x0200, 2 },
+       [DSI2_M_AHB_RESET] = { 0x0200, 1 },
+       [DSI2_S_AHB_RESET] = { 0x0200, 0 },
+       [CSIPHY2_RESET] = { 0x0210, 31 },
+       [CSI_PIX1_RESET] = { 0x0210, 30 },
+       [CSIPHY0_RESET] = { 0x0210, 29 },
+       [CSIPHY1_RESET] = { 0x0210, 28 },
+       [CSI_RDI_RESET] = { 0x0210, 27 },
+       [CSI_PIX_RESET] = { 0x0210, 26 },
+       [DSI2_RESET] = { 0x0210, 25 },
+       [VFE_CSI_RESET] = { 0x0210, 24 },
+       [MDP_RESET] = { 0x0210, 21 },
+       [AMP_RESET] = { 0x0210, 20 },
+       [JPEGD_RESET] = { 0x0210, 19 },
+       [CSI1_RESET] = { 0x0210, 18 },
+       [VPE_RESET] = { 0x0210, 17 },
+       [MMSS_FABRIC_RESET] = { 0x0210, 16 },
+       [VFE_RESET] = { 0x0210, 15 },
+       [GFX3D_RESET] = { 0x0210, 12 },
+       [HDMI_RESET] = { 0x0210, 11 },
+       [MMSS_IMEM_RESET] = { 0x0210, 10 },
+       [IJPEG_RESET] = { 0x0210, 9 },
+       [CSI0_RESET] = { 0x0210, 8 },
+       [DSI_RESET] = { 0x0210, 7 },
+       [VCODEC_RESET] = { 0x0210, 6 },
+       [MDP_TV_RESET] = { 0x0210, 4 },
+       [MDP_VSYNC_RESET] = { 0x0210, 3 },
+       [ROT_RESET] = { 0x0210, 2 },
+       [TV_HDMI_RESET] = { 0x0210, 1 },
+       [VCAP_NPL_RESET] = { 0x0214, 4 },
+       [VCAP_RESET] = { 0x0214, 3 },
+       [CSI2_RESET] = { 0x0214, 2 },
+       [CSI_RDI1_RESET] = { 0x0214, 1 },
+       [CSI_RDI2_RESET] = { 0x0214 },
+};
+
 static const struct regmap_config mmcc_msm8960_regmap_config = {
        .reg_bits       = 32,
        .reg_stride     = 4,
@@ -2223,6 +2609,14 @@ static const struct regmap_config mmcc_msm8960_regmap_config = {
        .fast_io        = true,
 };
 
+static const struct regmap_config mmcc_apq8064_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0x350,
+       .fast_io        = true,
+};
+
 static const struct qcom_cc_desc mmcc_msm8960_desc = {
        .config = &mmcc_msm8960_regmap_config,
        .clks = mmcc_msm8960_clks,
@@ -2231,15 +2625,47 @@ static const struct qcom_cc_desc mmcc_msm8960_desc = {
        .num_resets = ARRAY_SIZE(mmcc_msm8960_resets),
 };
 
+static const struct qcom_cc_desc mmcc_apq8064_desc = {
+       .config = &mmcc_apq8064_regmap_config,
+       .clks = mmcc_apq8064_clks,
+       .num_clks = ARRAY_SIZE(mmcc_apq8064_clks),
+       .resets = mmcc_apq8064_resets,
+       .num_resets = ARRAY_SIZE(mmcc_apq8064_resets),
+};
+
 static const struct of_device_id mmcc_msm8960_match_table[] = {
-       { .compatible = "qcom,mmcc-msm8960" },
+       { .compatible = "qcom,mmcc-msm8960", .data = &mmcc_msm8960_desc },
+       { .compatible = "qcom,mmcc-apq8064", .data = &mmcc_apq8064_desc },
        { }
 };
 MODULE_DEVICE_TABLE(of, mmcc_msm8960_match_table);
 
 static int mmcc_msm8960_probe(struct platform_device *pdev)
 {
-       return qcom_cc_probe(pdev, &mmcc_msm8960_desc);
+       const struct of_device_id *match;
+       struct regmap *regmap;
+       bool is_8064;
+       struct device *dev = &pdev->dev;
+
+       match = of_match_device(mmcc_msm8960_match_table, dev);
+       if (!match)
+               return -EINVAL;
+
+       is_8064 = of_device_is_compatible(dev->of_node, "qcom,mmcc-apq8064");
+       if (is_8064) {
+               gfx3d_src.freq_tbl = clk_tbl_gfx3d_8064;
+               gfx3d_src.clkr.hw.init = &gfx3d_8064_init;
+               gfx3d_src.s[0].parent_map = mmcc_pxo_pll8_pll2_pll15_map;
+               gfx3d_src.s[1].parent_map = mmcc_pxo_pll8_pll2_pll15_map;
+       }
+
+       regmap = qcom_cc_map(pdev, match->data);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       clk_pll_configure_sr(&pll15, regmap, &pll15_config, false);
+
+       return qcom_cc_really_probe(pdev, match->data, regmap);
 }
 
 static int mmcc_msm8960_remove(struct platform_device *pdev)
index c65b905..bc8f519 100644 (file)
@@ -2547,18 +2547,16 @@ MODULE_DEVICE_TABLE(of, mmcc_msm8974_match_table);
 
 static int mmcc_msm8974_probe(struct platform_device *pdev)
 {
-       int ret;
        struct regmap *regmap;
 
-       ret = qcom_cc_probe(pdev, &mmcc_msm8974_desc);
-       if (ret)
-               return ret;
+       regmap = qcom_cc_map(pdev, &mmcc_msm8974_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
 
-       regmap = dev_get_regmap(&pdev->dev, NULL);
        clk_pll_configure_sr_hpm_lp(&mmpll1, regmap, &mmpll1_config, true);
        clk_pll_configure_sr_hpm_lp(&mmpll3, regmap, &mmpll3_config, false);
 
-       return 0;
+       return qcom_cc_really_probe(pdev, &mmcc_msm8974_desc, regmap);
 }
 
 static int mmcc_msm8974_remove(struct platform_device *pdev)
index 8d3aefa..ee6b077 100644 (file)
@@ -3,3 +3,9 @@
 #
 
 obj-y  += clk-rockchip.o
+obj-y  += clk.o
+obj-y  += clk-pll.o
+obj-$(CONFIG_RESET_CONTROLLER) += softrst.o
+
+obj-y  += clk-rk3188.o
+obj-y  += clk-rk3288.o
diff --git a/drivers/clk/rockchip/clk-pll.c b/drivers/clk/rockchip/clk-pll.c
new file mode 100644 (file)
index 0000000..f2a1c7a
--- /dev/null
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <asm/div64.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+#include "clk.h"
+
+#define PLL_MODE_MASK          0x3
+#define PLL_MODE_SLOW          0x0
+#define PLL_MODE_NORM          0x1
+#define PLL_MODE_DEEP          0x2
+
+struct rockchip_clk_pll {
+       struct clk_hw           hw;
+
+       struct clk_mux          pll_mux;
+       const struct clk_ops    *pll_mux_ops;
+
+       struct notifier_block   clk_nb;
+       bool                    rate_change_remuxed;
+
+       void __iomem            *reg_base;
+       int                     lock_offset;
+       unsigned int            lock_shift;
+       enum rockchip_pll_type  type;
+       const struct rockchip_pll_rate_table *rate_table;
+       unsigned int            rate_count;
+       spinlock_t              *lock;
+};
+
+#define to_rockchip_clk_pll(_hw) container_of(_hw, struct rockchip_clk_pll, hw)
+#define to_rockchip_clk_pll_nb(nb) \
+                       container_of(nb, struct rockchip_clk_pll, clk_nb)
+
+static const struct rockchip_pll_rate_table *rockchip_get_pll_settings(
+                           struct rockchip_clk_pll *pll, unsigned long rate)
+{
+       const struct rockchip_pll_rate_table  *rate_table = pll->rate_table;
+       int i;
+
+       for (i = 0; i < pll->rate_count; i++) {
+               if (rate == rate_table[i].rate)
+                       return &rate_table[i];
+       }
+
+       return NULL;
+}
+
+static long rockchip_pll_round_rate(struct clk_hw *hw,
+                           unsigned long drate, unsigned long *prate)
+{
+       struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+       const struct rockchip_pll_rate_table *rate_table = pll->rate_table;
+       int i;
+
+       /* Assumming rate_table is in descending order */
+       for (i = 0; i < pll->rate_count; i++) {
+               if (drate >= rate_table[i].rate)
+                       return rate_table[i].rate;
+       }
+
+       /* return minimum supported value */
+       return rate_table[i - 1].rate;
+}
+
+/*
+ * Wait for the pll to reach the locked state.
+ * The calling set_rate function is responsible for making sure the
+ * grf regmap is available.
+ */
+static int rockchip_pll_wait_lock(struct rockchip_clk_pll *pll)
+{
+       struct regmap *grf = rockchip_clk_get_grf();
+       unsigned int val;
+       int delay = 24000000, ret;
+
+       while (delay > 0) {
+               ret = regmap_read(grf, pll->lock_offset, &val);
+               if (ret) {
+                       pr_err("%s: failed to read pll lock status: %d\n",
+                              __func__, ret);
+                       return ret;
+               }
+
+               if (val & BIT(pll->lock_shift))
+                       return 0;
+               delay--;
+       }
+
+       pr_err("%s: timeout waiting for pll to lock\n", __func__);
+       return -ETIMEDOUT;
+}
+
+/**
+ * Set pll mux when changing the pll rate.
+ * This makes sure to move the pll mux away from the actual pll before
+ * changing its rate and back to the original parent after the change.
+ */
+static int rockchip_pll_notifier_cb(struct notifier_block *nb,
+                                       unsigned long event, void *data)
+{
+       struct rockchip_clk_pll *pll = to_rockchip_clk_pll_nb(nb);
+       struct clk_mux *pll_mux = &pll->pll_mux;
+       const struct clk_ops *pll_mux_ops = pll->pll_mux_ops;
+       int cur_parent;
+
+       switch (event) {
+       case PRE_RATE_CHANGE:
+               cur_parent = pll_mux_ops->get_parent(&pll_mux->hw);
+               if (cur_parent == PLL_MODE_NORM) {
+                       pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_SLOW);
+                       pll->rate_change_remuxed = 1;
+               }
+               break;
+       case POST_RATE_CHANGE:
+               if (pll->rate_change_remuxed) {
+                       pll_mux_ops->set_parent(&pll_mux->hw, PLL_MODE_NORM);
+                       pll->rate_change_remuxed = 0;
+               }
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+/**
+ * PLL used in RK3066, RK3188 and RK3288
+ */
+
+#define RK3066_PLL_RESET_DELAY(nr)     ((nr * 500) / 24 + 1)
+
+#define RK3066_PLLCON(i)               (i * 0x4)
+#define RK3066_PLLCON0_OD_MASK         0xf
+#define RK3066_PLLCON0_OD_SHIFT                0
+#define RK3066_PLLCON0_NR_MASK         0x3f
+#define RK3066_PLLCON0_NR_SHIFT                8
+#define RK3066_PLLCON1_NF_MASK         0x1fff
+#define RK3066_PLLCON1_NF_SHIFT                0
+#define RK3066_PLLCON2_BWADJ_MASK      0xfff
+#define RK3066_PLLCON2_BWADJ_SHIFT     0
+#define RK3066_PLLCON3_RESET           (1 << 5)
+#define RK3066_PLLCON3_PWRDOWN         (1 << 1)
+#define RK3066_PLLCON3_BYPASS          (1 << 0)
+
+static unsigned long rockchip_rk3066_pll_recalc_rate(struct clk_hw *hw,
+                                                    unsigned long prate)
+{
+       struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+       u64 nf, nr, no, rate64 = prate;
+       u32 pllcon;
+
+       pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(3));
+       if (pllcon & RK3066_PLLCON3_BYPASS) {
+               pr_debug("%s: pll %s is bypassed\n", __func__,
+                       __clk_get_name(hw->clk));
+               return prate;
+       }
+
+       pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1));
+       nf = (pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK;
+
+       pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0));
+       nr = (pllcon >> RK3066_PLLCON0_NR_SHIFT) & RK3066_PLLCON0_NR_MASK;
+       no = (pllcon >> RK3066_PLLCON0_OD_SHIFT) & RK3066_PLLCON0_OD_MASK;
+
+       rate64 *= (nf + 1);
+       do_div(rate64, nr + 1);
+       do_div(rate64, no + 1);
+
+       return (unsigned long)rate64;
+}
+
+static int rockchip_rk3066_pll_set_rate(struct clk_hw *hw, unsigned long drate,
+                                       unsigned long prate)
+{
+       struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+       const struct rockchip_pll_rate_table *rate;
+       unsigned long old_rate = rockchip_rk3066_pll_recalc_rate(hw, prate);
+       struct regmap *grf = rockchip_clk_get_grf();
+       int ret;
+
+       if (IS_ERR(grf)) {
+               pr_debug("%s: grf regmap not available, aborting rate change\n",
+                        __func__);
+               return PTR_ERR(grf);
+       }
+
+       pr_debug("%s: changing %s from %lu to %lu with a parent rate of %lu\n",
+                __func__, __clk_get_name(hw->clk), old_rate, drate, prate);
+
+       /* Get required rate settings from table */
+       rate = rockchip_get_pll_settings(pll, drate);
+       if (!rate) {
+               pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+                       drate, __clk_get_name(hw->clk));
+               return -EINVAL;
+       }
+
+       pr_debug("%s: rate settings for %lu (nr, no, nf): (%d, %d, %d)\n",
+                __func__, rate->rate, rate->nr, rate->no, rate->nf);
+
+       /* enter reset mode */
+       writel(HIWORD_UPDATE(RK3066_PLLCON3_RESET, RK3066_PLLCON3_RESET, 0),
+              pll->reg_base + RK3066_PLLCON(3));
+
+       /* update pll values */
+       writel(HIWORD_UPDATE(rate->nr - 1, RK3066_PLLCON0_NR_MASK,
+                                          RK3066_PLLCON0_NR_SHIFT) |
+              HIWORD_UPDATE(rate->no - 1, RK3066_PLLCON0_OD_MASK,
+                                          RK3066_PLLCON0_OD_SHIFT),
+              pll->reg_base + RK3066_PLLCON(0));
+
+       writel_relaxed(HIWORD_UPDATE(rate->nf - 1, RK3066_PLLCON1_NF_MASK,
+                                                  RK3066_PLLCON1_NF_SHIFT),
+                      pll->reg_base + RK3066_PLLCON(1));
+       writel_relaxed(HIWORD_UPDATE(rate->bwadj, RK3066_PLLCON2_BWADJ_MASK,
+                                                 RK3066_PLLCON2_BWADJ_SHIFT),
+                      pll->reg_base + RK3066_PLLCON(2));
+
+       /* leave reset and wait the reset_delay */
+       writel(HIWORD_UPDATE(0, RK3066_PLLCON3_RESET, 0),
+              pll->reg_base + RK3066_PLLCON(3));
+       udelay(RK3066_PLL_RESET_DELAY(rate->nr));
+
+       /* wait for the pll to lock */
+       ret = rockchip_pll_wait_lock(pll);
+       if (ret) {
+               pr_warn("%s: pll did not lock, trying to restore old rate %lu\n",
+                       __func__, old_rate);
+               rockchip_rk3066_pll_set_rate(hw, old_rate, prate);
+       }
+
+       return ret;
+}
+
+static int rockchip_rk3066_pll_enable(struct clk_hw *hw)
+{
+       struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+       writel(HIWORD_UPDATE(0, RK3066_PLLCON3_PWRDOWN, 0),
+              pll->reg_base + RK3066_PLLCON(3));
+
+       return 0;
+}
+
+static void rockchip_rk3066_pll_disable(struct clk_hw *hw)
+{
+       struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+
+       writel(HIWORD_UPDATE(RK3066_PLLCON3_PWRDOWN,
+                            RK3066_PLLCON3_PWRDOWN, 0),
+              pll->reg_base + RK3066_PLLCON(3));
+}
+
+static int rockchip_rk3066_pll_is_enabled(struct clk_hw *hw)
+{
+       struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+       u32 pllcon = readl(pll->reg_base + RK3066_PLLCON(3));
+
+       return !(pllcon & RK3066_PLLCON3_PWRDOWN);
+}
+
+static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
+       .recalc_rate = rockchip_rk3066_pll_recalc_rate,
+       .enable = rockchip_rk3066_pll_enable,
+       .disable = rockchip_rk3066_pll_disable,
+       .is_enabled = rockchip_rk3066_pll_is_enabled,
+};
+
+static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
+       .recalc_rate = rockchip_rk3066_pll_recalc_rate,
+       .round_rate = rockchip_pll_round_rate,
+       .set_rate = rockchip_rk3066_pll_set_rate,
+       .enable = rockchip_rk3066_pll_enable,
+       .disable = rockchip_rk3066_pll_disable,
+       .is_enabled = rockchip_rk3066_pll_is_enabled,
+};
+
+/*
+ * Common registering of pll clocks
+ */
+
+struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
+               const char *name, const char **parent_names, u8 num_parents,
+               void __iomem *base, int con_offset, int grf_lock_offset,
+               int lock_shift, int mode_offset, int mode_shift,
+               struct rockchip_pll_rate_table *rate_table,
+               spinlock_t *lock)
+{
+       const char *pll_parents[3];
+       struct clk_init_data init;
+       struct rockchip_clk_pll *pll;
+       struct clk_mux *pll_mux;
+       struct clk *pll_clk, *mux_clk;
+       char pll_name[20];
+       int ret;
+
+       if (num_parents != 2) {
+               pr_err("%s: needs two parent clocks\n", __func__);
+               return ERR_PTR(-EINVAL);
+       }
+
+       /* name the actual pll */
+       snprintf(pll_name, sizeof(pll_name), "pll_%s", name);
+
+       pll = kzalloc(sizeof(*pll), GFP_KERNEL);
+       if (!pll)
+               return ERR_PTR(-ENOMEM);
+
+       init.name = pll_name;
+
+       /* keep all plls untouched for now */
+       init.flags = CLK_IGNORE_UNUSED;
+
+       init.parent_names = &parent_names[0];
+       init.num_parents = 1;
+
+       if (rate_table) {
+               int len;
+
+               /* find count of rates in rate_table */
+               for (len = 0; rate_table[len].rate != 0; )
+                       len++;
+
+               pll->rate_count = len;
+               pll->rate_table = kmemdup(rate_table,
+                                       pll->rate_count *
+                                       sizeof(struct rockchip_pll_rate_table),
+                                       GFP_KERNEL);
+               WARN(!pll->rate_table,
+                       "%s: could not allocate rate table for %s\n",
+                       __func__, name);
+       }
+
+       switch (pll_type) {
+       case pll_rk3066:
+               if (!pll->rate_table)
+                       init.ops = &rockchip_rk3066_pll_clk_norate_ops;
+               else
+                       init.ops = &rockchip_rk3066_pll_clk_ops;
+               break;
+       default:
+               pr_warn("%s: Unknown pll type for pll clk %s\n",
+                       __func__, name);
+       }
+
+       pll->hw.init = &init;
+       pll->type = pll_type;
+       pll->reg_base = base + con_offset;
+       pll->lock_offset = grf_lock_offset;
+       pll->lock_shift = lock_shift;
+       pll->lock = lock;
+       pll->clk_nb.notifier_call = rockchip_pll_notifier_cb;
+
+       pll_clk = clk_register(NULL, &pll->hw);
+       if (IS_ERR(pll_clk)) {
+               pr_err("%s: failed to register pll clock %s : %ld\n",
+                       __func__, name, PTR_ERR(pll_clk));
+               mux_clk = pll_clk;
+               goto err_pll;
+       }
+
+       ret = clk_notifier_register(pll_clk, &pll->clk_nb);
+       if (ret) {
+               pr_err("%s: failed to register clock notifier for %s : %d\n",
+                               __func__, name, ret);
+               mux_clk = ERR_PTR(ret);
+               goto err_pll_notifier;
+       }
+
+       /* create the mux on top of the real pll */
+       pll->pll_mux_ops = &clk_mux_ops;
+       pll_mux = &pll->pll_mux;
+
+       /* the actual muxing is xin24m, pll-output, xin32k */
+       pll_parents[0] = parent_names[0];
+       pll_parents[1] = pll_name;
+       pll_parents[2] = parent_names[1];
+
+       init.name = name;
+       init.flags = CLK_SET_RATE_PARENT;
+       init.ops = pll->pll_mux_ops;
+       init.parent_names = pll_parents;
+       init.num_parents = ARRAY_SIZE(pll_parents);
+
+       pll_mux->reg = base + mode_offset;
+       pll_mux->shift = mode_shift;
+       pll_mux->mask = PLL_MODE_MASK;
+       pll_mux->flags = 0;
+       pll_mux->lock = lock;
+       pll_mux->hw.init = &init;
+
+       if (pll_type == pll_rk3066)
+               pll_mux->flags |= CLK_MUX_HIWORD_MASK;
+
+       mux_clk = clk_register(NULL, &pll_mux->hw);
+       if (IS_ERR(mux_clk))
+               goto err_mux;
+
+       return mux_clk;
+
+err_mux:
+       ret = clk_notifier_unregister(pll_clk, &pll->clk_nb);
+       if (ret) {
+               pr_err("%s: could not unregister clock notifier in error path : %d\n",
+                      __func__, ret);
+               return mux_clk;
+       }
+err_pll_notifier:
+       clk_unregister(pll_clk);
+err_pll:
+       kfree(pll);
+       return mux_clk;
+}
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
new file mode 100644 (file)
index 0000000..a83a6d8
--- /dev/null
@@ -0,0 +1,672 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <dt-bindings/clock/rk3188-cru-common.h>
+#include "clk.h"
+
+#define RK3188_GRF_SOC_STATUS  0xac
+
+enum rk3188_plls {
+       apll, cpll, dpll, gpll,
+};
+
+struct rockchip_pll_rate_table rk3188_pll_rates[] = {
+       RK3066_PLL_RATE(2208000000, 1, 92, 1),
+       RK3066_PLL_RATE(2184000000, 1, 91, 1),
+       RK3066_PLL_RATE(2160000000, 1, 90, 1),
+       RK3066_PLL_RATE(2136000000, 1, 89, 1),
+       RK3066_PLL_RATE(2112000000, 1, 88, 1),
+       RK3066_PLL_RATE(2088000000, 1, 87, 1),
+       RK3066_PLL_RATE(2064000000, 1, 86, 1),
+       RK3066_PLL_RATE(2040000000, 1, 85, 1),
+       RK3066_PLL_RATE(2016000000, 1, 84, 1),
+       RK3066_PLL_RATE(1992000000, 1, 83, 1),
+       RK3066_PLL_RATE(1968000000, 1, 82, 1),
+       RK3066_PLL_RATE(1944000000, 1, 81, 1),
+       RK3066_PLL_RATE(1920000000, 1, 80, 1),
+       RK3066_PLL_RATE(1896000000, 1, 79, 1),
+       RK3066_PLL_RATE(1872000000, 1, 78, 1),
+       RK3066_PLL_RATE(1848000000, 1, 77, 1),
+       RK3066_PLL_RATE(1824000000, 1, 76, 1),
+       RK3066_PLL_RATE(1800000000, 1, 75, 1),
+       RK3066_PLL_RATE(1776000000, 1, 74, 1),
+       RK3066_PLL_RATE(1752000000, 1, 73, 1),
+       RK3066_PLL_RATE(1728000000, 1, 72, 1),
+       RK3066_PLL_RATE(1704000000, 1, 71, 1),
+       RK3066_PLL_RATE(1680000000, 1, 70, 1),
+       RK3066_PLL_RATE(1656000000, 1, 69, 1),
+       RK3066_PLL_RATE(1632000000, 1, 68, 1),
+       RK3066_PLL_RATE(1608000000, 1, 67, 1),
+       RK3066_PLL_RATE(1560000000, 1, 65, 1),
+       RK3066_PLL_RATE(1512000000, 1, 63, 1),
+       RK3066_PLL_RATE(1488000000, 1, 62, 1),
+       RK3066_PLL_RATE(1464000000, 1, 61, 1),
+       RK3066_PLL_RATE(1440000000, 1, 60, 1),
+       RK3066_PLL_RATE(1416000000, 1, 59, 1),
+       RK3066_PLL_RATE(1392000000, 1, 58, 1),
+       RK3066_PLL_RATE(1368000000, 1, 57, 1),
+       RK3066_PLL_RATE(1344000000, 1, 56, 1),
+       RK3066_PLL_RATE(1320000000, 1, 55, 1),
+       RK3066_PLL_RATE(1296000000, 1, 54, 1),
+       RK3066_PLL_RATE(1272000000, 1, 53, 1),
+       RK3066_PLL_RATE(1248000000, 1, 52, 1),
+       RK3066_PLL_RATE(1224000000, 1, 51, 1),
+       RK3066_PLL_RATE(1200000000, 1, 50, 1),
+       RK3066_PLL_RATE(1188000000, 2, 99, 1),
+       RK3066_PLL_RATE(1176000000, 1, 49, 1),
+       RK3066_PLL_RATE(1128000000, 1, 47, 1),
+       RK3066_PLL_RATE(1104000000, 1, 46, 1),
+       RK3066_PLL_RATE(1008000000, 1, 84, 2),
+       RK3066_PLL_RATE( 912000000, 1, 76, 2),
+       RK3066_PLL_RATE( 891000000, 8, 594, 2),
+       RK3066_PLL_RATE( 888000000, 1, 74, 2),
+       RK3066_PLL_RATE( 816000000, 1, 68, 2),
+       RK3066_PLL_RATE( 798000000, 2, 133, 2),
+       RK3066_PLL_RATE( 792000000, 1, 66, 2),
+       RK3066_PLL_RATE( 768000000, 1, 64, 2),
+       RK3066_PLL_RATE( 742500000, 8, 495, 2),
+       RK3066_PLL_RATE( 696000000, 1, 58, 2),
+       RK3066_PLL_RATE( 600000000, 1, 50, 2),
+       RK3066_PLL_RATE( 594000000, 2, 198, 4),
+       RK3066_PLL_RATE( 552000000, 1, 46, 2),
+       RK3066_PLL_RATE( 504000000, 1, 84, 4),
+       RK3066_PLL_RATE( 456000000, 1, 76, 4),
+       RK3066_PLL_RATE( 408000000, 1, 68, 4),
+       RK3066_PLL_RATE( 384000000, 2, 128, 4),
+       RK3066_PLL_RATE( 360000000, 1, 60, 4),
+       RK3066_PLL_RATE( 312000000, 1, 52, 4),
+       RK3066_PLL_RATE( 300000000, 1, 50, 4),
+       RK3066_PLL_RATE( 297000000, 2, 198, 8),
+       RK3066_PLL_RATE( 252000000, 1, 84, 8),
+       RK3066_PLL_RATE( 216000000, 1, 72, 8),
+       RK3066_PLL_RATE( 148500000, 2, 99, 8),
+       RK3066_PLL_RATE( 126000000, 1, 84, 16),
+       RK3066_PLL_RATE(  48000000, 1, 64, 32),
+       { /* sentinel */ },
+};
+
+PNAME(mux_pll_p)               = { "xin24m", "xin32k" };
+PNAME(mux_armclk_p)            = { "apll", "gpll_armclk" };
+PNAME(mux_ddrphy_p)            = { "dpll", "gpll_ddr" };
+PNAME(mux_pll_src_gpll_cpll_p) = { "gpll", "cpll" };
+PNAME(mux_pll_src_cpll_gpll_p) = { "cpll", "gpll" };
+PNAME(mux_aclk_cpu_p)          = { "apll", "gpll" };
+PNAME(mux_sclk_cif0_p)         = { "cif0_pre", "xin24m" };
+PNAME(mux_sclk_i2s0_p)         = { "i2s0_pre", "i2s0_frac", "xin12m" };
+PNAME(mux_sclk_spdif_p)                = { "spdif_src", "spdif_frac", "xin12m" };
+PNAME(mux_sclk_uart0_p)                = { "uart0_pre", "uart0_frac", "xin24m" };
+PNAME(mux_sclk_uart1_p)                = { "uart1_pre", "uart1_frac", "xin24m" };
+PNAME(mux_sclk_uart2_p)                = { "uart2_pre", "uart2_frac", "xin24m" };
+PNAME(mux_sclk_uart3_p)                = { "uart3_pre", "uart3_frac", "xin24m" };
+PNAME(mux_sclk_hsadc_p)                = { "hsadc_src", "hsadc_frac", "ext_hsadc" };
+PNAME(mux_mac_p)               = { "gpll", "dpll" };
+PNAME(mux_sclk_macref_p)       = { "mac_src", "ext_rmii" };
+
+static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
+       [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
+                    RK2928_MODE_CON, 0, 6, rk3188_pll_rates),
+       [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
+                    RK2928_MODE_CON, 4, 5, NULL),
+       [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
+                    RK2928_MODE_CON, 8, 7, rk3188_pll_rates),
+       [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
+                    RK2928_MODE_CON, 12, 8, rk3188_pll_rates),
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+/* 2 ^ (val + 1) */
+static struct clk_div_table div_core_peri_t[] = {
+       { .val = 0, .div = 2 },
+       { .val = 1, .div = 4 },
+       { .val = 2, .div = 8 },
+       { .val = 3, .div = 16 },
+       { /* sentinel */ },
+};
+
+static struct rockchip_clk_branch common_clk_branches[] __initdata = {
+       /*
+        * Clock-Architecture Diagram 2
+        */
+
+       GATE(0, "gpll_armclk", "gpll", 0, RK2928_CLKGATE_CON(0), 1, GFLAGS),
+
+       /* these two are set by the cpuclk and should not be changed */
+       COMPOSITE_NOMUX_DIVTBL(CORE_PERI, "core_peri", "armclk", 0,
+                       RK2928_CLKSEL_CON(0), 6, 2, DFLAGS | CLK_DIVIDER_READ_ONLY,
+                       div_core_peri_t, RK2928_CLKGATE_CON(0), 0, GFLAGS),
+
+       COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_p, 0,
+                       RK2928_CLKSEL_CON(32), 7, 1, MFLAGS, 0, 5, DFLAGS,
+                       RK2928_CLKGATE_CON(3), 9, GFLAGS),
+       GATE(0, "hclk_vepu", "aclk_vepu", 0,
+                       RK2928_CLKGATE_CON(3), 10, GFLAGS),
+       COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_p, 0,
+                       RK2928_CLKSEL_CON(32), 15, 1, MFLAGS, 8, 5, DFLAGS,
+                       RK2928_CLKGATE_CON(3), 11, GFLAGS),
+       GATE(0, "hclk_vdpu", "aclk_vdpu", 0,
+                       RK2928_CLKGATE_CON(3), 12, GFLAGS),
+
+       GATE(0, "gpll_ddr", "gpll", 0,
+                       RK2928_CLKGATE_CON(1), 7, GFLAGS),
+       COMPOSITE(0, "ddrphy", mux_ddrphy_p, 0,
+                       RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+                       RK2928_CLKGATE_CON(0), 2, GFLAGS),
+
+       GATE(0, "aclk_cpu", "aclk_cpu_pre", 0,
+                       RK2928_CLKGATE_CON(0), 3, GFLAGS),
+
+       DIV(0, "pclk_cpu_pre", "aclk_cpu_pre", 0,
+                       RK2928_CLKSEL_CON(1), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+       GATE(0, "atclk_cpu", "pclk_cpu_pre", 0,
+                       RK2928_CLKGATE_CON(0), 6, GFLAGS),
+       GATE(0, "pclk_cpu", "pclk_cpu_pre", 0,
+                       RK2928_CLKGATE_CON(0), 5, GFLAGS),
+       DIV(0, "hclk_cpu_pre", "aclk_cpu_pre", 0,
+                       RK2928_CLKSEL_CON(1), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+       COMPOSITE_NOMUX(0, "hclk_ahb2apb", "hclk_cpu_pre", 0,
+                       RK2928_CLKSEL_CON(1), 14, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+                       RK2928_CLKGATE_CON(4), 9, GFLAGS),
+       GATE(0, "hclk_cpu", "hclk_cpu_pre", 0,
+                       RK2928_CLKGATE_CON(0), 4, GFLAGS),
+
+       COMPOSITE(0, "aclk_lcdc0_pre", mux_pll_src_cpll_gpll_p, 0,
+                       RK2928_CLKSEL_CON(31), 7, 1, MFLAGS, 0, 5, DFLAGS,
+                       RK2928_CLKGATE_CON(3), 0, GFLAGS),
+       COMPOSITE(0, "aclk_lcdc1_pre", mux_pll_src_cpll_gpll_p, 0,
+                       RK2928_CLKSEL_CON(31), 15, 1, MFLAGS, 8, 5, DFLAGS,
+                       RK2928_CLKGATE_CON(1), 4, GFLAGS),
+
+       GATE(0, "aclk_peri", "aclk_peri_pre", 0,
+                       RK2928_CLKGATE_CON(2), 1, GFLAGS),
+       COMPOSITE_NOMUX(0, "hclk_peri", "aclk_peri_pre", 0,
+                       RK2928_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+                       RK2928_CLKGATE_CON(2), 2, GFLAGS),
+       COMPOSITE_NOMUX(0, "pclk_peri", "aclk_peri_pre", 0,
+                       RK2928_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+                       RK2928_CLKGATE_CON(2), 3, GFLAGS),
+
+       MUX(0, "cif_src", mux_pll_src_cpll_gpll_p, 0,
+                       RK2928_CLKSEL_CON(29), 0, 1, MFLAGS),
+       COMPOSITE_NOMUX(0, "cif0_pre", "cif_src", 0,
+                       RK2928_CLKSEL_CON(29), 1, 5, DFLAGS,
+                       RK2928_CLKGATE_CON(3), 7, GFLAGS),
+       MUX(SCLK_CIF0, "sclk_cif0", mux_sclk_cif0_p, 0,
+                       RK2928_CLKSEL_CON(29), 7, 1, MFLAGS),
+
+       GATE(0, "pclkin_cif0", "ext_cif0", 0,
+                       RK2928_CLKGATE_CON(3), 3, GFLAGS),
+
+       /*
+        * the 480m are generated inside the usb block from these clocks,
+        * but they are also a source for the hsicphy clock.
+        */
+       GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", 0,
+                       RK2928_CLKGATE_CON(1), 5, GFLAGS),
+       GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", 0,
+                       RK2928_CLKGATE_CON(1), 6, GFLAGS),
+
+       COMPOSITE(0, "mac_src", mux_mac_p, 0,
+                       RK2928_CLKSEL_CON(21), 0, 1, MFLAGS, 8, 5, DFLAGS,
+                       RK2928_CLKGATE_CON(2), 5, GFLAGS),
+       MUX(SCLK_MAC, "sclk_macref", mux_sclk_macref_p, CLK_SET_RATE_PARENT,
+                       RK2928_CLKSEL_CON(21), 4, 1, MFLAGS),
+       GATE(0, "sclk_mac_lbtest", "sclk_macref",
+                       RK2928_CLKGATE_CON(2), 12, 0, GFLAGS),
+
+       COMPOSITE(0, "hsadc_src", mux_pll_src_gpll_cpll_p, 0,
+                       RK2928_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS,
+                       RK2928_CLKGATE_CON(2), 6, GFLAGS),
+       COMPOSITE_FRAC(0, "hsadc_frac", "hsadc_src",
+                       RK2928_CLKSEL_CON(23), 0,
+                       RK2928_CLKGATE_CON(2), 7, 0, GFLAGS),
+       MUX(SCLK_HSADC, "sclk_hsadc", mux_sclk_hsadc_p, 0,
+                       RK2928_CLKSEL_CON(22), 4, 2, MFLAGS),
+
+       COMPOSITE_NOMUX(SCLK_SARADC, "sclk_saradc", "xin24m", 0,
+                       RK2928_CLKSEL_CON(24), 8, 8, DFLAGS,
+                       RK2928_CLKGATE_CON(2), 8, GFLAGS),
+
+       /*
+        * Clock-Architecture Diagram 4
+        */
+
+       GATE(SCLK_SMC, "sclk_smc", "hclk_peri",
+                       RK2928_CLKGATE_CON(2), 4, 0, GFLAGS),
+
+       COMPOSITE_NOMUX(SCLK_SPI0, "sclk_spi0", "pclk_peri", 0,
+                       RK2928_CLKSEL_CON(25), 0, 7, DFLAGS,
+                       RK2928_CLKGATE_CON(2), 9, GFLAGS),
+       COMPOSITE_NOMUX(SCLK_SPI1, "sclk_spi1", "pclk_peri", 0,
+                       RK2928_CLKSEL_CON(25), 8, 7, DFLAGS,
+                       RK2928_CLKGATE_CON(2), 10, GFLAGS),
+
+       COMPOSITE_NOMUX(SCLK_SDMMC, "sclk_sdmmc", "hclk_peri", 0,
+                       RK2928_CLKSEL_CON(11), 0, 6, DFLAGS,
+                       RK2928_CLKGATE_CON(2), 11, GFLAGS),
+       COMPOSITE_NOMUX(SCLK_SDIO, "sclk_sdio", "hclk_peri", 0,
+                       RK2928_CLKSEL_CON(12), 0, 6, DFLAGS,
+                       RK2928_CLKGATE_CON(2), 13, GFLAGS),
+       COMPOSITE_NOMUX(SCLK_EMMC, "sclk_emmc", "hclk_peri", 0,
+                       RK2928_CLKSEL_CON(12), 8, 6, DFLAGS,
+                       RK2928_CLKGATE_CON(2), 14, GFLAGS),
+
+       MUX(0, "uart_src", mux_pll_src_gpll_cpll_p, 0,
+                       RK2928_CLKSEL_CON(12), 15, 1, MFLAGS),
+       COMPOSITE_NOMUX(0, "uart0_pre", "uart_src", 0,
+                       RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
+                       RK2928_CLKGATE_CON(1), 8, GFLAGS),
+       COMPOSITE_FRAC(0, "uart0_frac", "uart0_pre", 0,
+                       RK2928_CLKSEL_CON(17), 0,
+                       RK2928_CLKGATE_CON(1), 9, GFLAGS),
+       MUX(SCLK_UART0, "sclk_uart0", mux_sclk_uart0_p, 0,
+                       RK2928_CLKSEL_CON(13), 8, 2, MFLAGS),
+       COMPOSITE_NOMUX(0, "uart1_pre", "uart_src", 0,
+                       RK2928_CLKSEL_CON(14), 0, 7, DFLAGS,
+                       RK2928_CLKGATE_CON(1), 10, GFLAGS),
+       COMPOSITE_FRAC(0, "uart1_frac", "uart1_pre", 0,
+                       RK2928_CLKSEL_CON(18), 0,
+                       RK2928_CLKGATE_CON(1), 11, GFLAGS),
+       MUX(SCLK_UART1, "sclk_uart1", mux_sclk_uart1_p, 0,
+                       RK2928_CLKSEL_CON(14), 8, 2, MFLAGS),
+       COMPOSITE_NOMUX(0, "uart2_pre", "uart_src", 0,
+                       RK2928_CLKSEL_CON(15), 0, 7, DFLAGS,
+                       RK2928_CLKGATE_CON(1), 12, GFLAGS),
+       COMPOSITE_FRAC(0, "uart2_frac", "uart2_pre", 0,
+                       RK2928_CLKSEL_CON(19), 0,
+                       RK2928_CLKGATE_CON(1), 13, GFLAGS),
+       MUX(SCLK_UART2, "sclk_uart2", mux_sclk_uart2_p, 0,
+                       RK2928_CLKSEL_CON(15), 8, 2, MFLAGS),
+       COMPOSITE_NOMUX(0, "uart3_pre", "uart_src", 0,
+                       RK2928_CLKSEL_CON(16), 0, 7, DFLAGS,
+                       RK2928_CLKGATE_CON(1), 14, GFLAGS),
+       COMPOSITE_FRAC(0, "uart3_frac", "uart3_pre", 0,
+                       RK2928_CLKSEL_CON(20), 0,
+                       RK2928_CLKGATE_CON(1), 15, GFLAGS),
+       MUX(SCLK_UART3, "sclk_uart3", mux_sclk_uart3_p, 0,
+                       RK2928_CLKSEL_CON(16), 8, 2, MFLAGS),
+
+       GATE(SCLK_JTAG, "jtag", "ext_jtag", 0, RK2928_CLKGATE_CON(1), 3, GFLAGS),
+
+       GATE(SCLK_TIMER0, "timer0", "xin24m", 0, RK2928_CLKGATE_CON(1), 0, GFLAGS),
+       GATE(SCLK_TIMER1, "timer1", "xin24m", 0, RK2928_CLKGATE_CON(1), 1, GFLAGS),
+
+       /* clk_core_pre gates */
+       GATE(0, "core_dbg", "armclk", 0, RK2928_CLKGATE_CON(9), 0, GFLAGS),
+
+       /* aclk_cpu gates */
+       GATE(ACLK_DMA1, "aclk_dma1", "aclk_cpu", 0, RK2928_CLKGATE_CON(5), 0, GFLAGS),
+       GATE(0, "aclk_intmem", "aclk_cpu", 0, RK2928_CLKGATE_CON(4), 12, GFLAGS),
+       GATE(0, "aclk_strc_sys", "aclk_cpu", 0, RK2928_CLKGATE_CON(4), 10, GFLAGS),
+
+       /* hclk_cpu gates */
+       GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK2928_CLKGATE_CON(5), 6, GFLAGS),
+       GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 2, GFLAGS),
+       GATE(HCLK_SPDIF, "hclk_spdif", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 1, GFLAGS),
+       GATE(0, "hclk_cpubus", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 8, GFLAGS),
+       /* hclk_ahb2apb is part of a clk branch */
+       GATE(0, "hclk_vio_bus", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 12, GFLAGS),
+       GATE(HCLK_LCDC0, "hclk_lcdc0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 1, GFLAGS),
+       GATE(HCLK_LCDC1, "hclk_lcdc1", "aclk_cpu", 0, RK2928_CLKGATE_CON(6), 2, GFLAGS),
+       GATE(HCLK_CIF0, "hclk_cif0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 4, GFLAGS),
+       GATE(HCLK_IPP, "hclk_ipp", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 9, GFLAGS),
+       GATE(HCLK_RGA, "hclk_rga", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 10, GFLAGS),
+
+       /* hclk_peri gates */
+       GATE(0, "hclk_peri_axi_matrix", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 0, GFLAGS),
+       GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 6, GFLAGS),
+       GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS),
+       GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS),
+       GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS),
+       GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 5, GFLAGS),
+       GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
+       GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 5, GFLAGS),
+       GATE(HCLK_PIDF, "hclk_pidfilter", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 6, GFLAGS),
+       GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 10, GFLAGS),
+       GATE(HCLK_SDIO, "hclk_sdio", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 11, GFLAGS),
+       GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 12, GFLAGS),
+
+       /* aclk_lcdc0_pre gates */
+       GATE(0, "aclk_vio0", "aclk_lcdc0_pre", 0, RK2928_CLKGATE_CON(6), 13, GFLAGS),
+       GATE(ACLK_LCDC0, "aclk_lcdc0", "aclk_vio0", 0, RK2928_CLKGATE_CON(6), 0, GFLAGS),
+       GATE(ACLK_CIF0, "aclk_cif0", "aclk_vio0", 0, RK2928_CLKGATE_CON(6), 5, GFLAGS),
+       GATE(ACLK_IPP, "aclk_ipp", "aclk_vio0", 0, RK2928_CLKGATE_CON(6), 8, GFLAGS),
+
+       /* aclk_lcdc1_pre gates */
+       GATE(0, "aclk_vio1", "aclk_lcdc1_pre", 0, RK2928_CLKGATE_CON(9), 5, GFLAGS),
+       GATE(ACLK_LCDC1, "aclk_lcdc1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 3, GFLAGS),
+       GATE(ACLK_RGA, "aclk_rga", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 11, GFLAGS),
+
+       /* atclk_cpu gates */
+       GATE(0, "atclk", "atclk_cpu", 0, RK2928_CLKGATE_CON(9), 3, GFLAGS),
+       GATE(0, "trace", "atclk_cpu", 0, RK2928_CLKGATE_CON(9), 2, GFLAGS),
+
+       /* pclk_cpu gates */
+       GATE(PCLK_PWM01, "pclk_pwm01", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 10, GFLAGS),
+       GATE(PCLK_TIMER0, "pclk_timer0", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 7, GFLAGS),
+       GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 4, GFLAGS),
+       GATE(PCLK_I2C1, "pclk_i2c1", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 5, GFLAGS),
+       GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 9, GFLAGS),
+       GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 10, GFLAGS),
+       GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 11, GFLAGS),
+       GATE(PCLK_EFUSE, "pclk_efuse", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 2, GFLAGS),
+       GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 3, GFLAGS),
+       GATE(0, "pclk_ddrupctl", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 7, GFLAGS),
+       GATE(0, "pclk_ddrpubl", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS),
+       GATE(0, "pclk_dbg", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 1, GFLAGS),
+       GATE(PCLK_GRF, "pclk_grf", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 4, GFLAGS),
+       GATE(PCLK_PMU, "pclk_pmu", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 5, GFLAGS),
+
+       /* aclk_peri */
+       GATE(ACLK_DMA2, "aclk_dma2", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 1, GFLAGS),
+       GATE(ACLK_SMC, "aclk_smc", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 8, GFLAGS),
+       GATE(0, "aclk_peri_niu", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 4, GFLAGS),
+       GATE(0, "aclk_cpu_peri", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 2, GFLAGS),
+       GATE(0, "aclk_peri_axi_matrix", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 3, GFLAGS),
+
+       /* pclk_peri gates */
+       GATE(0, "pclk_peri_axi_matrix", "pclk_peri", 0, RK2928_CLKGATE_CON(4), 1, GFLAGS),
+       GATE(PCLK_PWM23, "pclk_pwm23", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 11, GFLAGS),
+       GATE(PCLK_WDT, "pclk_wdt", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
+       GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 12, GFLAGS),
+       GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 13, GFLAGS),
+       GATE(PCLK_UART2, "pclk_uart2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 2, GFLAGS),
+       GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 3, GFLAGS),
+       GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 6, GFLAGS),
+       GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 7, GFLAGS),
+       GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 8, GFLAGS),
+       GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 12, GFLAGS),
+       GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 14, GFLAGS),
+};
+
+PNAME(mux_rk3066_lcdc0_p)      = { "dclk_lcdc0_src", "xin27m" };
+PNAME(mux_rk3066_lcdc1_p)      = { "dclk_lcdc1_src", "xin27m" };
+PNAME(mux_sclk_cif1_p)         = { "cif1_pre", "xin24m" };
+PNAME(mux_sclk_i2s1_p)         = { "i2s1_pre", "i2s1_frac", "xin12m" };
+PNAME(mux_sclk_i2s2_p)         = { "i2s2_pre", "i2s2_frac", "xin12m" };
+
+static struct clk_div_table div_aclk_cpu_t[] = {
+       { .val = 0, .div = 1 },
+       { .val = 1, .div = 2 },
+       { .val = 2, .div = 3 },
+       { .val = 3, .div = 4 },
+       { .val = 4, .div = 8 },
+       { /* sentinel */ },
+};
+
+static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
+       COMPOSITE_NOGATE(0, "armclk", mux_armclk_p, 0,
+                       RK2928_CLKSEL_CON(0), 8, 1, MFLAGS, 0, 5, DFLAGS),
+       DIVTBL(0, "aclk_cpu_pre", "armclk", 0,
+                       RK2928_CLKSEL_CON(1), 0, 3, DFLAGS, div_aclk_cpu_t),
+
+       GATE(CORE_L2C, "core_l2c", "aclk_cpu", 0,
+                       RK2928_CLKGATE_CON(9), 4, GFLAGS),
+
+       COMPOSITE(0, "aclk_peri_pre", mux_pll_src_gpll_cpll_p, 0,
+                       RK2928_CLKSEL_CON(10), 15, 1, MFLAGS, 0, 5, DFLAGS,
+                       RK2928_CLKGATE_CON(2), 0, GFLAGS),
+
+       COMPOSITE(0, "dclk_lcdc0_src", mux_pll_src_cpll_gpll_p, 0,
+                       RK2928_CLKSEL_CON(27), 0, 1, MFLAGS, 8, 8, DFLAGS,
+                       RK2928_CLKGATE_CON(3), 1, GFLAGS),
+       MUX(DCLK_LCDC0, "dclk_lcdc0", mux_rk3066_lcdc0_p, 0,
+                       RK2928_CLKSEL_CON(27), 4, 1, MFLAGS),
+       COMPOSITE(0, "dclk_lcdc1_src", mux_pll_src_cpll_gpll_p, 0,
+                       RK2928_CLKSEL_CON(28), 0, 1, MFLAGS, 8, 8, DFLAGS,
+                       RK2928_CLKGATE_CON(3), 2, GFLAGS),
+       MUX(DCLK_LCDC1, "dclk_lcdc1", mux_rk3066_lcdc1_p, 0,
+                       RK2928_CLKSEL_CON(28), 4, 1, MFLAGS),
+
+       COMPOSITE_NOMUX(0, "cif1_pre", "cif_src", 0,
+                       RK2928_CLKSEL_CON(29), 8, 5, DFLAGS,
+                       RK2928_CLKGATE_CON(3), 8, GFLAGS),
+       MUX(SCLK_CIF1, "sclk_cif1", mux_sclk_cif1_p, 0,
+                       RK2928_CLKSEL_CON(29), 15, 1, MFLAGS),
+
+       GATE(0, "pclkin_cif1", "ext_cif1", 0,
+                       RK2928_CLKGATE_CON(3), 4, GFLAGS),
+
+       COMPOSITE(0, "aclk_gpu_src", mux_pll_src_cpll_gpll_p, 0,
+                       RK2928_CLKSEL_CON(33), 8, 1, MFLAGS, 0, 5, DFLAGS,
+                       RK2928_CLKGATE_CON(3), 13, GFLAGS),
+       GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_src", 0,
+                       RK2928_CLKGATE_CON(5), 15, GFLAGS),
+
+       GATE(SCLK_TIMER2, "timer2", "xin24m", 0,
+                       RK2928_CLKGATE_CON(3), 2, GFLAGS),
+
+       COMPOSITE_NOMUX(0, "sclk_tsadc", "xin24m", 0,
+                       RK2928_CLKSEL_CON(34), 0, 16, DFLAGS,
+                       RK2928_CLKGATE_CON(2), 15, GFLAGS),
+
+       MUX(0, "i2s_src", mux_pll_src_gpll_cpll_p, 0,
+                       RK2928_CLKSEL_CON(2), 15, 1, MFLAGS),
+       COMPOSITE_NOMUX(0, "i2s0_pre", "i2s_src", 0,
+                       RK2928_CLKSEL_CON(2), 0, 7, DFLAGS,
+                       RK2928_CLKGATE_CON(0), 7, GFLAGS),
+       COMPOSITE_FRAC(0, "i2s0_frac", "i2s0_pre", 0,
+                       RK2928_CLKSEL_CON(6), 0,
+                       RK2928_CLKGATE_CON(0), 8, GFLAGS),
+       MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0,
+                       RK2928_CLKSEL_CON(2), 8, 2, MFLAGS),
+       COMPOSITE_NOMUX(0, "i2s1_pre", "i2s_src", 0,
+                       RK2928_CLKSEL_CON(3), 0, 7, DFLAGS,
+                       RK2928_CLKGATE_CON(0), 9, GFLAGS),
+       COMPOSITE_FRAC(0, "i2s1_frac", "i2s1_pre", 0,
+                       RK2928_CLKSEL_CON(7), 0,
+                       RK2928_CLKGATE_CON(0), 10, GFLAGS),
+       MUX(SCLK_I2S1, "sclk_i2s1", mux_sclk_i2s1_p, 0,
+                       RK2928_CLKSEL_CON(3), 8, 2, MFLAGS),
+       COMPOSITE_NOMUX(0, "i2s2_pre", "i2s_src", 0,
+                       RK2928_CLKSEL_CON(4), 0, 7, DFLAGS,
+                       RK2928_CLKGATE_CON(0), 11, GFLAGS),
+       COMPOSITE_FRAC(0, "i2s2_frac", "i2s2_pre", 0,
+                       RK2928_CLKSEL_CON(8), 0,
+                       RK2928_CLKGATE_CON(0), 12, GFLAGS),
+       MUX(SCLK_I2S2, "sclk_i2s2", mux_sclk_i2s2_p, 0,
+                       RK2928_CLKSEL_CON(4), 8, 2, MFLAGS),
+       COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
+                       RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
+                       RK2928_CLKGATE_CON(0), 13, GFLAGS),
+       COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
+                       RK2928_CLKSEL_CON(9), 0,
+                       RK2928_CLKGATE_CON(0), 14, GFLAGS),
+       MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
+                       RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
+
+       GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
+       GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
+       GATE(0, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
+       GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
+
+       GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 14, GFLAGS),
+
+       GATE(0, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS),
+
+       GATE(PCLK_TIMER1, "pclk_timer1", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 8, GFLAGS),
+       GATE(PCLK_TIMER2, "pclk_timer2", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS),
+       GATE(PCLK_GPIO6, "pclk_gpio6", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 15, GFLAGS),
+       GATE(PCLK_UART0, "pclk_uart0", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 0, GFLAGS),
+       GATE(PCLK_UART1, "pclk_uart1", "pclk_cpu", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS),
+
+       GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_peri", 0, RK2928_CLKGATE_CON(8), 13, GFLAGS),
+       GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK2928_CLKGATE_CON(4), 13, GFLAGS),
+};
+
+static struct clk_div_table div_rk3188_aclk_core_t[] = {
+       { .val = 0, .div = 1 },
+       { .val = 1, .div = 2 },
+       { .val = 2, .div = 3 },
+       { .val = 3, .div = 4 },
+       { .val = 4, .div = 8 },
+       { /* sentinel */ },
+};
+
+PNAME(mux_hsicphy_p)           = { "sclk_otgphy0", "sclk_otgphy1",
+                                   "gpll", "cpll" };
+
+static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
+       COMPOSITE_NOGATE(0, "armclk", mux_armclk_p, 0,
+                       RK2928_CLKSEL_CON(0), 8, 1, MFLAGS, 9, 5, DFLAGS),
+       COMPOSITE_NOMUX_DIVTBL(0, "aclk_core", "armclk", 0,
+                       RK2928_CLKSEL_CON(1), 3, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
+                       div_rk3188_aclk_core_t, RK2928_CLKGATE_CON(0), 7, GFLAGS),
+
+       /* do not source aclk_cpu_pre from the apll, to keep complexity down */
+       COMPOSITE_NOGATE(0, "aclk_cpu_pre", mux_aclk_cpu_p, CLK_SET_RATE_NO_REPARENT,
+                       RK2928_CLKSEL_CON(0), 5, 1, MFLAGS, 0, 5, DFLAGS),
+
+       GATE(CORE_L2C, "core_l2c", "armclk", 0,
+                       RK2928_CLKGATE_CON(9), 4, GFLAGS),
+
+       COMPOSITE(0, "aclk_peri_pre", mux_pll_src_cpll_gpll_p, 0,
+                       RK2928_CLKSEL_CON(10), 15, 1, MFLAGS, 0, 5, DFLAGS,
+                       RK2928_CLKGATE_CON(2), 0, GFLAGS),
+
+       COMPOSITE(DCLK_LCDC0, "dclk_lcdc0", mux_pll_src_cpll_gpll_p, 0,
+                       RK2928_CLKSEL_CON(27), 0, 1, MFLAGS, 8, 8, DFLAGS,
+                       RK2928_CLKGATE_CON(3), 1, GFLAGS),
+       COMPOSITE(DCLK_LCDC1, "dclk_lcdc1", mux_pll_src_cpll_gpll_p, 0,
+                       RK2928_CLKSEL_CON(28), 0, 1, MFLAGS, 8, 8, DFLAGS,
+                       RK2928_CLKGATE_CON(3), 2, GFLAGS),
+
+       COMPOSITE(0, "aclk_gpu_src", mux_pll_src_cpll_gpll_p, 0,
+                       RK2928_CLKSEL_CON(34), 7, 1, MFLAGS, 0, 5, DFLAGS,
+                       RK2928_CLKGATE_CON(3), 15, GFLAGS),
+       GATE(ACLK_GPU, "aclk_gpu", "aclk_gpu_src", 0,
+                       RK2928_CLKGATE_CON(9), 7, GFLAGS),
+
+       GATE(SCLK_TIMER2, "timer2", "xin24m", 0, RK2928_CLKGATE_CON(3), 4, GFLAGS),
+       GATE(SCLK_TIMER3, "timer3", "xin24m", 0, RK2928_CLKGATE_CON(1), 2, GFLAGS),
+       GATE(SCLK_TIMER4, "timer4", "xin24m", 0, RK2928_CLKGATE_CON(3), 5, GFLAGS),
+       GATE(SCLK_TIMER5, "timer5", "xin24m", 0, RK2928_CLKGATE_CON(3), 8, GFLAGS),
+       GATE(SCLK_TIMER6, "timer6", "xin24m", 0, RK2928_CLKGATE_CON(3), 14, GFLAGS),
+
+       COMPOSITE_NODIV(0, "sclk_hsicphy_480m", mux_hsicphy_p, 0,
+                       RK2928_CLKSEL_CON(30), 0, 2, DFLAGS,
+                       RK2928_CLKGATE_CON(3), 6, GFLAGS),
+       DIV(0, "sclk_hsicphy_12m", "sclk_hsicphy_480m", 0,
+                       RK2928_CLKGATE_CON(11), 8, 6, DFLAGS),
+
+       MUX(0, "i2s_src", mux_pll_src_gpll_cpll_p, 0,
+                       RK2928_CLKSEL_CON(2), 15, 1, MFLAGS),
+       COMPOSITE_NOMUX(0, "i2s0_pre", "i2s_src", 0,
+                       RK2928_CLKSEL_CON(3), 0, 7, DFLAGS,
+                       RK2928_CLKGATE_CON(0), 9, GFLAGS),
+       COMPOSITE_FRAC(0, "i2s0_frac", "i2s0_pre", 0,
+                       RK2928_CLKSEL_CON(7), 0,
+                       RK2928_CLKGATE_CON(0), 10, GFLAGS),
+       MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0,
+                       RK2928_CLKSEL_CON(3), 8, 2, MFLAGS),
+       COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
+                       RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
+                       RK2928_CLKGATE_CON(13), 13, GFLAGS),
+       COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
+                       RK2928_CLKSEL_CON(9), 0,
+                       RK2928_CLKGATE_CON(0), 14, GFLAGS),
+       MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
+                       RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
+
+       GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
+       GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS),
+
+       GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
+       GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
+
+       GATE(PCLK_TIMER3, "pclk_timer3", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS),
+
+       GATE(PCLK_UART0, "pclk_uart0", "hclk_ahb2apb", 0, RK2928_CLKGATE_CON(8), 0, GFLAGS),
+       GATE(PCLK_UART1, "pclk_uart1", "hclk_ahb2apb", 0, RK2928_CLKGATE_CON(8), 1, GFLAGS),
+
+       GATE(ACLK_GPS, "aclk_gps", "aclk_peri", 0, RK2928_CLKGATE_CON(8), 13, GFLAGS),
+};
+
+static void __init rk3188_common_clk_init(struct device_node *np)
+{
+       void __iomem *reg_base;
+       struct clk *clk;
+
+       reg_base = of_iomap(np, 0);
+       if (!reg_base) {
+               pr_err("%s: could not map cru region\n", __func__);
+               return;
+       }
+
+       rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+
+       /* xin12m is created by an cru-internal divider */
+       clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
+       if (IS_ERR(clk))
+               pr_warn("%s: could not register clock xin12m: %ld\n",
+                       __func__, PTR_ERR(clk));
+
+       clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
+       if (IS_ERR(clk))
+               pr_warn("%s: could not register clock usb480m: %ld\n",
+                       __func__, PTR_ERR(clk));
+
+       rockchip_clk_register_plls(rk3188_pll_clks,
+                                  ARRAY_SIZE(rk3188_pll_clks),
+                                  RK3188_GRF_SOC_STATUS);
+       rockchip_clk_register_branches(common_clk_branches,
+                                 ARRAY_SIZE(common_clk_branches));
+
+       rockchip_register_softrst(np, 9, reg_base + RK2928_SOFTRST_CON(0),
+                                 ROCKCHIP_SOFTRST_HIWORD_MASK);
+}
+
+static void __init rk3066a_clk_init(struct device_node *np)
+{
+       rk3188_common_clk_init(np);
+       rockchip_clk_register_branches(rk3066a_clk_branches,
+                                 ARRAY_SIZE(rk3066a_clk_branches));
+}
+CLK_OF_DECLARE(rk3066a_cru, "rockchip,rk3066a-cru", rk3066a_clk_init);
+
+static void __init rk3188a_clk_init(struct device_node *np)
+{
+       rk3188_common_clk_init(np);
+       rockchip_clk_register_branches(rk3188_clk_branches,
+                                 ARRAY_SIZE(rk3188_clk_branches));
+}
+CLK_OF_DECLARE(rk3188a_cru, "rockchip,rk3188a-cru", rk3188a_clk_init);
+
+static void __init rk3188_clk_init(struct device_node *np)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(rk3188_pll_clks); i++) {
+               struct rockchip_pll_clock *pll = &rk3188_pll_clks[i];
+               struct rockchip_pll_rate_table *rate;
+
+               if (!pll->rate_table)
+                       continue;
+
+               rate = pll->rate_table;
+               while (rate->rate > 0) {
+                       rate->bwadj = 0;
+                       rate++;
+               }
+       }
+
+       rk3188a_clk_init(np);
+}
+CLK_OF_DECLARE(rk3188_cru, "rockchip,rk3188-cru", rk3188_clk_init);
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
new file mode 100644 (file)
index 0000000..0d8c6c5
--- /dev/null
@@ -0,0 +1,717 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <dt-bindings/clock/rk3288-cru.h>
+#include "clk.h"
+
+#define RK3288_GRF_SOC_CON(x)  (0x244 + x * 4)
+#define RK3288_GRF_SOC_STATUS  0x280
+
+enum rk3288_plls {
+       apll, dpll, cpll, gpll, npll,
+};
+
+struct rockchip_pll_rate_table rk3288_pll_rates[] = {
+       RK3066_PLL_RATE(2208000000, 1, 92, 1),
+       RK3066_PLL_RATE(2184000000, 1, 91, 1),
+       RK3066_PLL_RATE(2160000000, 1, 90, 1),
+       RK3066_PLL_RATE(2136000000, 1, 89, 1),
+       RK3066_PLL_RATE(2112000000, 1, 88, 1),
+       RK3066_PLL_RATE(2088000000, 1, 87, 1),
+       RK3066_PLL_RATE(2064000000, 1, 86, 1),
+       RK3066_PLL_RATE(2040000000, 1, 85, 1),
+       RK3066_PLL_RATE(2016000000, 1, 84, 1),
+       RK3066_PLL_RATE(1992000000, 1, 83, 1),
+       RK3066_PLL_RATE(1968000000, 1, 82, 1),
+       RK3066_PLL_RATE(1944000000, 1, 81, 1),
+       RK3066_PLL_RATE(1920000000, 1, 80, 1),
+       RK3066_PLL_RATE(1896000000, 1, 79, 1),
+       RK3066_PLL_RATE(1872000000, 1, 78, 1),
+       RK3066_PLL_RATE(1848000000, 1, 77, 1),
+       RK3066_PLL_RATE(1824000000, 1, 76, 1),
+       RK3066_PLL_RATE(1800000000, 1, 75, 1),
+       RK3066_PLL_RATE(1776000000, 1, 74, 1),
+       RK3066_PLL_RATE(1752000000, 1, 73, 1),
+       RK3066_PLL_RATE(1728000000, 1, 72, 1),
+       RK3066_PLL_RATE(1704000000, 1, 71, 1),
+       RK3066_PLL_RATE(1680000000, 1, 70, 1),
+       RK3066_PLL_RATE(1656000000, 1, 69, 1),
+       RK3066_PLL_RATE(1632000000, 1, 68, 1),
+       RK3066_PLL_RATE(1608000000, 1, 67, 1),
+       RK3066_PLL_RATE(1560000000, 1, 65, 1),
+       RK3066_PLL_RATE(1512000000, 1, 63, 1),
+       RK3066_PLL_RATE(1488000000, 1, 62, 1),
+       RK3066_PLL_RATE(1464000000, 1, 61, 1),
+       RK3066_PLL_RATE(1440000000, 1, 60, 1),
+       RK3066_PLL_RATE(1416000000, 1, 59, 1),
+       RK3066_PLL_RATE(1392000000, 1, 58, 1),
+       RK3066_PLL_RATE(1368000000, 1, 57, 1),
+       RK3066_PLL_RATE(1344000000, 1, 56, 1),
+       RK3066_PLL_RATE(1320000000, 1, 55, 1),
+       RK3066_PLL_RATE(1296000000, 1, 54, 1),
+       RK3066_PLL_RATE(1272000000, 1, 53, 1),
+       RK3066_PLL_RATE(1248000000, 1, 52, 1),
+       RK3066_PLL_RATE(1224000000, 1, 51, 1),
+       RK3066_PLL_RATE(1200000000, 1, 50, 1),
+       RK3066_PLL_RATE(1188000000, 2, 99, 1),
+       RK3066_PLL_RATE(1176000000, 1, 49, 1),
+       RK3066_PLL_RATE(1128000000, 1, 47, 1),
+       RK3066_PLL_RATE(1104000000, 1, 46, 1),
+       RK3066_PLL_RATE(1008000000, 1, 84, 2),
+       RK3066_PLL_RATE( 912000000, 1, 76, 2),
+       RK3066_PLL_RATE( 891000000, 8, 594, 2),
+       RK3066_PLL_RATE( 888000000, 1, 74, 2),
+       RK3066_PLL_RATE( 816000000, 1, 68, 2),
+       RK3066_PLL_RATE( 798000000, 2, 133, 2),
+       RK3066_PLL_RATE( 792000000, 1, 66, 2),
+       RK3066_PLL_RATE( 768000000, 1, 64, 2),
+       RK3066_PLL_RATE( 742500000, 8, 495, 2),
+       RK3066_PLL_RATE( 696000000, 1, 58, 2),
+       RK3066_PLL_RATE( 600000000, 1, 50, 2),
+       RK3066_PLL_RATE( 594000000, 2, 198, 4),
+       RK3066_PLL_RATE( 552000000, 1, 46, 2),
+       RK3066_PLL_RATE( 504000000, 1, 84, 4),
+       RK3066_PLL_RATE( 456000000, 1, 76, 4),
+       RK3066_PLL_RATE( 408000000, 1, 68, 4),
+       RK3066_PLL_RATE( 384000000, 2, 128, 4),
+       RK3066_PLL_RATE( 360000000, 1, 60, 4),
+       RK3066_PLL_RATE( 312000000, 1, 52, 4),
+       RK3066_PLL_RATE( 300000000, 1, 50, 4),
+       RK3066_PLL_RATE( 297000000, 2, 198, 8),
+       RK3066_PLL_RATE( 252000000, 1, 84, 8),
+       RK3066_PLL_RATE( 216000000, 1, 72, 8),
+       RK3066_PLL_RATE( 148500000, 2, 99, 8),
+       RK3066_PLL_RATE( 126000000, 1, 84, 16),
+       RK3066_PLL_RATE(  48000000, 1, 64, 32),
+       { /* sentinel */ },
+};
+
+PNAME(mux_pll_p)               = { "xin24m", "xin32k" };
+PNAME(mux_armclk_p)            = { "apll_core", "gpll_core" };
+PNAME(mux_ddrphy_p)            = { "dpll_ddr", "gpll_ddr" };
+PNAME(mux_aclk_cpu_src_p)      = { "cpll_aclk_cpu", "gpll_aclk_cpu" };
+
+PNAME(mux_pll_src_cpll_gpll_p)         = { "cpll", "gpll" };
+PNAME(mux_pll_src_npll_cpll_gpll_p)    = { "npll", "cpll", "gpll" };
+PNAME(mux_pll_src_cpll_gpll_npll_p)    = { "cpll", "gpll", "npll" };
+PNAME(mux_pll_src_cpll_gpll_usb480m_p) = { "cpll", "gpll", "usb480m" };
+
+PNAME(mux_mmc_src_p)   = { "cpll", "gpll", "xin24m", "xin24m" };
+PNAME(mux_i2s_pre_p)   = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
+PNAME(mux_i2s_clkout_p)        = { "i2s_pre", "xin12m" };
+PNAME(mux_spdif_p)     = { "spdif_pre", "spdif_frac", "xin12m" };
+PNAME(mux_spdif_8ch_p) = { "spdif_8ch_pre", "spdif_8ch_frac", "xin12m" };
+PNAME(mux_uart0_pll_p) = { "cpll", "gpll", "usbphy_480m_src", "npll" };
+PNAME(mux_uart0_p)     = { "uart0_src", "uart0_frac", "xin24m" };
+PNAME(mux_uart1_p)     = { "uart1_src", "uart1_frac", "xin24m" };
+PNAME(mux_uart2_p)     = { "uart2_src", "uart2_frac", "xin24m" };
+PNAME(mux_uart3_p)     = { "uart3_src", "uart3_frac", "xin24m" };
+PNAME(mux_uart4_p)     = { "uart4_src", "uart4_frac", "xin24m" };
+PNAME(mux_cif_out_p)   = { "cif_src", "xin24m" };
+PNAME(mux_macref_p)    = { "mac_src", "ext_gmac" };
+PNAME(mux_hsadcout_p)  = { "hsadc_src", "ext_hsadc" };
+PNAME(mux_edp_24m_p)   = { "ext_edp_24m", "xin24m" };
+PNAME(mux_tspout_p)    = { "cpll", "gpll", "npll", "xin27m" };
+
+PNAME(mux_usbphy480m_p)                = { "sclk_otgphy0", "sclk_otgphy1",
+                                   "sclk_otgphy2" };
+PNAME(mux_hsicphy480m_p)       = { "cpll", "gpll", "usbphy480m_src" };
+PNAME(mux_hsicphy12m_p)                = { "hsicphy12m_xin12m", "hsicphy12m_usbphy" };
+
+static struct rockchip_pll_clock rk3288_pll_clks[] __initdata = {
+       [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK3288_PLL_CON(0),
+                    RK3288_MODE_CON, 0, 6, rk3288_pll_rates),
+       [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK3288_PLL_CON(4),
+                    RK3288_MODE_CON, 4, 5, NULL),
+       [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK3288_PLL_CON(8),
+                    RK3288_MODE_CON, 8, 7, rk3288_pll_rates),
+       [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK3288_PLL_CON(12),
+                    RK3288_MODE_CON, 12, 8, rk3288_pll_rates),
+       [npll] = PLL(pll_rk3066, PLL_NPLL, "npll",  mux_pll_p, 0, RK3288_PLL_CON(16),
+                    RK3288_MODE_CON, 14, 9, NULL),
+};
+
+static struct clk_div_table div_hclk_cpu_t[] = {
+       { .val = 0, .div = 1 },
+       { .val = 1, .div = 2 },
+       { .val = 3, .div = 4 },
+       { /* sentinel */},
+};
+
+#define MFLAGS CLK_MUX_HIWORD_MASK
+#define DFLAGS CLK_DIVIDER_HIWORD_MASK
+#define GFLAGS (CLK_GATE_HIWORD_MASK | CLK_GATE_SET_TO_DISABLE)
+
+static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
+       /*
+        * Clock-Architecture Diagram 1
+        */
+
+       GATE(0, "apll_core", "apll", 0,
+                       RK3288_CLKGATE_CON(0), 1, GFLAGS),
+       GATE(0, "gpll_core", "gpll", 0,
+                       RK3288_CLKGATE_CON(0), 2, GFLAGS),
+       COMPOSITE_NOGATE(0, "armclk", mux_armclk_p, 0,
+                       RK3288_CLKSEL_CON(0), 15, 1, MFLAGS, 8, 5, DFLAGS),
+
+       COMPOSITE_NOMUX(0, "armcore0", "armclk", 0,
+                       RK3288_CLKSEL_CON(36), 0, 3, DFLAGS,
+                       RK3288_CLKGATE_CON(12), 0, GFLAGS),
+       COMPOSITE_NOMUX(0, "armcore1", "armclk", 0,
+                       RK3288_CLKSEL_CON(36), 4, 3, DFLAGS,
+                       RK3288_CLKGATE_CON(12), 1, GFLAGS),
+       COMPOSITE_NOMUX(0, "armcore2", "armclk", 0,
+                       RK3288_CLKSEL_CON(36), 8, 3, DFLAGS,
+                       RK3288_CLKGATE_CON(12), 2, GFLAGS),
+       COMPOSITE_NOMUX(0, "armcore3", "armclk", 0,
+                       RK3288_CLKSEL_CON(36), 12, 3, DFLAGS,
+                       RK3288_CLKGATE_CON(12), 3, GFLAGS),
+       COMPOSITE_NOMUX(0, "l2ram", "armclk", 0,
+                       RK3288_CLKSEL_CON(37), 0, 3, DFLAGS,
+                       RK3288_CLKGATE_CON(12), 4, GFLAGS),
+       COMPOSITE_NOMUX(0, "aclk_core_m0", "armclk", 0,
+                       RK3288_CLKSEL_CON(0), 0, 4, DFLAGS,
+                       RK3288_CLKGATE_CON(12), 5, GFLAGS),
+       COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", 0,
+                       RK3288_CLKSEL_CON(0), 4, 4, DFLAGS,
+                       RK3288_CLKGATE_CON(12), 6, GFLAGS),
+       COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
+                       RK3288_CLKSEL_CON(37), 4, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(12), 7, GFLAGS),
+       COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", 0,
+                       RK3288_CLKSEL_CON(37), 9, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(12), 8, GFLAGS),
+       GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
+                       RK3288_CLKGATE_CON(12), 9, GFLAGS),
+       GATE(0, "cs_dbg", "pclk_dbg_pre", 0,
+                       RK3288_CLKGATE_CON(12), 10, GFLAGS),
+       GATE(0, "pclk_core_niu", "pclk_dbg_pre", 0,
+                       RK3288_CLKGATE_CON(12), 11, GFLAGS),
+
+       GATE(0, "dpll_ddr", "dpll", 0,
+                       RK3288_CLKGATE_CON(0), 8, GFLAGS),
+       GATE(0, "gpll_ddr", "gpll", 0,
+                       RK3288_CLKGATE_CON(0), 9, GFLAGS),
+       COMPOSITE_NOGATE(0, "ddrphy", mux_ddrphy_p, 0,
+                       RK3288_CLKSEL_CON(26), 2, 1, MFLAGS, 0, 2,
+                                       DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
+
+       GATE(0, "gpll_aclk_cpu", "gpll", 0,
+                       RK3288_CLKGATE_CON(0), 10, GFLAGS),
+       GATE(0, "cpll_aclk_cpu", "cpll", 0,
+                       RK3288_CLKGATE_CON(0), 11, GFLAGS),
+       COMPOSITE_NOGATE(0, "aclk_cpu_src", mux_aclk_cpu_src_p, 0,
+                       RK3288_CLKSEL_CON(1), 15, 1, MFLAGS, 3, 5, DFLAGS),
+       DIV(0, "aclk_cpu_pre", "aclk_cpu_src", 0,
+                       RK3288_CLKSEL_CON(1), 0, 3, DFLAGS),
+       GATE(0, "aclk_cpu", "aclk_cpu_pre", 0,
+                       RK3288_CLKGATE_CON(0), 3, GFLAGS),
+       COMPOSITE_NOMUX(0, "pclk_cpu", "aclk_cpu_pre", 0,
+                       RK3288_CLKSEL_CON(1), 12, 3, DFLAGS,
+                       RK3288_CLKGATE_CON(0), 5, GFLAGS),
+       COMPOSITE_NOMUX_DIVTBL(0, "hclk_cpu", "aclk_cpu_pre", 0,
+                       RK3288_CLKSEL_CON(1), 8, 2, DFLAGS, div_hclk_cpu_t,
+                       RK3288_CLKGATE_CON(0), 4, GFLAGS),
+       GATE(0, "c2c_host", "aclk_cpu_src", 0,
+                       RK3288_CLKGATE_CON(13), 8, GFLAGS),
+       COMPOSITE_NOMUX(0, "crypto", "aclk_cpu_pre", 0,
+                       RK3288_CLKSEL_CON(26), 6, 2, DFLAGS,
+                       RK3288_CLKGATE_CON(5), 4, GFLAGS),
+       GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", 0,
+                       RK3288_CLKGATE_CON(0), 7, GFLAGS),
+
+       COMPOSITE(0, "i2s_src", mux_pll_src_cpll_gpll_p, 0,
+                       RK3288_CLKSEL_CON(4), 15, 1, MFLAGS, 0, 7, DFLAGS,
+                       RK3288_CLKGATE_CON(4), 1, GFLAGS),
+       COMPOSITE_FRAC(0, "i2s_frac", "i2s_src", 0,
+                       RK3288_CLKSEL_CON(8), 0,
+                       RK3288_CLKGATE_CON(4), 2, GFLAGS),
+       MUX(0, "i2s_pre", mux_i2s_pre_p, 0,
+                       RK3288_CLKSEL_CON(4), 8, 2, MFLAGS),
+       COMPOSITE_NODIV(0, "i2s0_clkout", mux_i2s_clkout_p, 0,
+                       RK3288_CLKSEL_CON(4), 12, 1, MFLAGS,
+                       RK3288_CLKGATE_CON(4), 0, GFLAGS),
+       GATE(SCLK_I2S0, "sclk_i2s0", "i2s_pre", 0,
+                       RK3288_CLKGATE_CON(4), 3, GFLAGS),
+
+       MUX(0, "spdif_src", mux_pll_src_cpll_gpll_p, 0,
+                       RK3288_CLKSEL_CON(5), 15, 1, MFLAGS),
+       COMPOSITE_NOMUX(0, "spdif_pre", "spdif_src", 0,
+                       RK3288_CLKSEL_CON(5), 0, 7, DFLAGS,
+                       RK3288_CLKGATE_CON(4), 4, GFLAGS),
+       COMPOSITE_FRAC(0, "spdif_frac", "spdif_src", 0,
+                       RK3288_CLKSEL_CON(9), 0,
+                       RK3288_CLKGATE_CON(4), 5, GFLAGS),
+       COMPOSITE_NODIV(SCLK_SPDIF, "sclk_spdif", mux_spdif_p, 0,
+                       RK3288_CLKSEL_CON(5), 8, 2, MFLAGS,
+                       RK3288_CLKGATE_CON(4), 6, GFLAGS),
+       COMPOSITE_NOMUX(0, "spdif_8ch_pre", "spdif_src", 0,
+                       RK3288_CLKSEL_CON(40), 0, 7, DFLAGS,
+                       RK3288_CLKGATE_CON(4), 7, GFLAGS),
+       COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_src", 0,
+                       RK3288_CLKSEL_CON(41), 0,
+                       RK3288_CLKGATE_CON(4), 8, GFLAGS),
+       COMPOSITE_NODIV(SCLK_SPDIF8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0,
+                       RK3288_CLKSEL_CON(40), 8, 2, MFLAGS,
+                       RK3288_CLKGATE_CON(4), 9, GFLAGS),
+
+       GATE(0, "sclk_acc_efuse", "xin24m", 0,
+                       RK3288_CLKGATE_CON(0), 12, GFLAGS),
+
+       GATE(SCLK_TIMER0, "sclk_timer0", "xin24m", 0,
+                       RK3288_CLKGATE_CON(1), 0, GFLAGS),
+       GATE(SCLK_TIMER1, "sclk_timer1", "xin24m", 0,
+                       RK3288_CLKGATE_CON(1), 1, GFLAGS),
+       GATE(SCLK_TIMER2, "sclk_timer2", "xin24m", 0,
+                       RK3288_CLKGATE_CON(1), 2, GFLAGS),
+       GATE(SCLK_TIMER3, "sclk_timer3", "xin24m", 0,
+                       RK3288_CLKGATE_CON(1), 3, GFLAGS),
+       GATE(SCLK_TIMER4, "sclk_timer4", "xin24m", 0,
+                       RK3288_CLKGATE_CON(1), 4, GFLAGS),
+       GATE(SCLK_TIMER5, "sclk_timer5", "xin24m", 0,
+                       RK3288_CLKGATE_CON(1), 5, GFLAGS),
+
+       /*
+        * Clock-Architecture Diagram 2
+        */
+
+       COMPOSITE(0, "aclk_vepu", mux_pll_src_cpll_gpll_usb480m_p, 0,
+                       RK3288_CLKSEL_CON(32), 6, 2, MFLAGS, 0, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(3), 9, GFLAGS),
+       COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
+                       RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(3), 11, GFLAGS),
+
+       COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb480m_p, 0,
+                       RK3288_CLKSEL_CON(31), 6, 2, MFLAGS, 0, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(3), 0, GFLAGS),
+       DIV(0, "hclk_vio", "aclk_vio0", 0,
+                       RK3288_CLKSEL_CON(28), 8, 5, DFLAGS),
+       COMPOSITE(0, "aclk_vio1", mux_pll_src_cpll_gpll_usb480m_p, 0,
+                       RK3288_CLKSEL_CON(31), 14, 2, MFLAGS, 8, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(3), 2, GFLAGS),
+
+       COMPOSITE(0, "aclk_rga_pre", mux_pll_src_cpll_gpll_usb480m_p, 0,
+                       RK3288_CLKSEL_CON(30), 6, 2, MFLAGS, 0, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(3), 5, GFLAGS),
+       COMPOSITE(0, "sclk_rga", mux_pll_src_cpll_gpll_usb480m_p, 0,
+                       RK3288_CLKSEL_CON(30), 14, 2, MFLAGS, 8, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(3), 4, GFLAGS),
+
+       COMPOSITE(DCLK_VOP0, "dclk_vop0", mux_pll_src_cpll_gpll_npll_p, 0,
+                       RK3288_CLKSEL_CON(27), 0, 2, MFLAGS, 8, 8, DFLAGS,
+                       RK3288_CLKGATE_CON(3), 1, GFLAGS),
+       COMPOSITE(DCLK_VOP1, "dclk_vop1", mux_pll_src_cpll_gpll_npll_p, 0,
+                       RK3288_CLKSEL_CON(29), 6, 2, MFLAGS, 8, 8, DFLAGS,
+                       RK3288_CLKGATE_CON(3), 3, GFLAGS),
+
+       COMPOSITE_NODIV(0, "sclk_edp_24m", mux_edp_24m_p, 0,
+                       RK3288_CLKSEL_CON(28), 15, 1, MFLAGS,
+                       RK3288_CLKGATE_CON(3), 12, GFLAGS),
+       COMPOSITE(0, "sclk_edp", mux_pll_src_cpll_gpll_npll_p, 0,
+                       RK3288_CLKSEL_CON(28), 6, 2, MFLAGS, 0, 6, DFLAGS,
+                       RK3288_CLKGATE_CON(3), 13, GFLAGS),
+
+       COMPOSITE(0, "sclk_isp", mux_pll_src_cpll_gpll_npll_p, 0,
+                       RK3288_CLKSEL_CON(6), 6, 2, MFLAGS, 0, 6, DFLAGS,
+                       RK3288_CLKGATE_CON(3), 14, GFLAGS),
+       COMPOSITE(0, "sclk_isp_jpe", mux_pll_src_cpll_gpll_npll_p, 0,
+                       RK3288_CLKSEL_CON(6), 14, 2, MFLAGS, 8, 6, DFLAGS,
+                       RK3288_CLKGATE_CON(3), 15, GFLAGS),
+
+       GATE(0, "sclk_hdmi_hdcp", "xin24m", 0,
+                       RK3288_CLKGATE_CON(5), 12, GFLAGS),
+       GATE(0, "sclk_hdmi_cec", "xin32k", 0,
+                       RK3288_CLKGATE_CON(5), 11, GFLAGS),
+
+       COMPOSITE(0, "aclk_hevc", mux_pll_src_cpll_gpll_npll_p, 0,
+                       RK3288_CLKSEL_CON(39), 14, 2, MFLAGS, 8, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(13), 13, GFLAGS),
+       DIV(0, "hclk_hevc", "aclk_hevc", 0,
+                       RK3288_CLKSEL_CON(40), 12, 2, DFLAGS),
+
+       COMPOSITE(0, "sclk_hevc_cabac", mux_pll_src_cpll_gpll_npll_p, 0,
+                       RK3288_CLKSEL_CON(42), 6, 2, MFLAGS, 0, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(13), 14, GFLAGS),
+       COMPOSITE(0, "sclk_hevc_core", mux_pll_src_cpll_gpll_npll_p, 0,
+                       RK3288_CLKSEL_CON(42), 14, 2, MFLAGS, 8, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(13), 15, GFLAGS),
+
+       COMPOSITE_NODIV(0, "vip_src", mux_pll_src_cpll_gpll_p, 0,
+                       RK3288_CLKSEL_CON(26), 8, 1, MFLAGS,
+                       RK3288_CLKGATE_CON(3), 7, GFLAGS),
+       COMPOSITE_NOGATE(0, "sclk_vip_out", mux_cif_out_p, 0,
+                       RK3288_CLKSEL_CON(26), 15, 1, MFLAGS, 9, 5, DFLAGS),
+
+       DIV(0, "pclk_pd_alive", "gpll", 0,
+                       RK3288_CLKSEL_CON(33), 8, 5, DFLAGS),
+       COMPOSITE_NOMUX(0, "pclk_pd_pmu", "gpll", 0,
+                       RK3288_CLKSEL_CON(33), 0, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(5), 8, GFLAGS),
+
+       COMPOSITE(SCLK_GPU, "sclk_gpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
+                       RK3288_CLKSEL_CON(34), 6, 2, MFLAGS, 0, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(5), 7, GFLAGS),
+
+       COMPOSITE(0, "aclk_peri_src", mux_pll_src_cpll_gpll_p, 0,
+                       RK3288_CLKSEL_CON(10), 15, 1, MFLAGS, 0, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(2), 0, GFLAGS),
+       COMPOSITE_NOMUX(0, "pclk_peri", "aclk_peri_src", 0,
+                       RK3288_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+                       RK3288_CLKGATE_CON(2), 3, GFLAGS),
+       COMPOSITE_NOMUX(0, "hclk_peri", "aclk_peri_src", 0,
+                       RK3288_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
+                       RK3288_CLKGATE_CON(2), 2, GFLAGS),
+       GATE(0, "aclk_peri", "aclk_peri_src", 0,
+                       RK3288_CLKGATE_CON(2), 1, GFLAGS),
+
+       /*
+        * Clock-Architecture Diagram 3
+        */
+
+       COMPOSITE(SCLK_SPI0, "sclk_spi0", mux_pll_src_cpll_gpll_p, 0,
+                       RK3288_CLKSEL_CON(25), 7, 1, MFLAGS, 0, 7, DFLAGS,
+                       RK3288_CLKGATE_CON(2), 9, GFLAGS),
+       COMPOSITE(SCLK_SPI1, "sclk_spi1", mux_pll_src_cpll_gpll_p, 0,
+                       RK3288_CLKSEL_CON(25), 15, 1, MFLAGS, 8, 7, DFLAGS,
+                       RK3288_CLKGATE_CON(2), 10, GFLAGS),
+       COMPOSITE(SCLK_SPI2, "sclk_spi2", mux_pll_src_cpll_gpll_p, 0,
+                       RK3288_CLKSEL_CON(39), 7, 1, MFLAGS, 0, 7, DFLAGS,
+                       RK3288_CLKGATE_CON(2), 11, GFLAGS),
+
+       COMPOSITE(SCLK_SDMMC, "sclk_sdmmc", mux_mmc_src_p, 0,
+                       RK3288_CLKSEL_CON(11), 6, 2, MFLAGS, 0, 6, DFLAGS,
+                       RK3288_CLKGATE_CON(13), 0, GFLAGS),
+       COMPOSITE(SCLK_SDIO0, "sclk_sdio0", mux_mmc_src_p, 0,
+                       RK3288_CLKSEL_CON(12), 6, 2, MFLAGS, 0, 6, DFLAGS,
+                       RK3288_CLKGATE_CON(13), 1, GFLAGS),
+       COMPOSITE(SCLK_SDIO1, "sclk_sdio1", mux_mmc_src_p, 0,
+                       RK3288_CLKSEL_CON(34), 14, 2, MFLAGS, 8, 6, DFLAGS,
+                       RK3288_CLKGATE_CON(13), 2, GFLAGS),
+       COMPOSITE(SCLK_EMMC, "sclk_emmc", mux_mmc_src_p, 0,
+                       RK3288_CLKSEL_CON(12), 14, 2, MFLAGS, 8, 6, DFLAGS,
+                       RK3288_CLKGATE_CON(13), 3, GFLAGS),
+
+       COMPOSITE(0, "sclk_tspout", mux_tspout_p, 0,
+                       RK3288_CLKSEL_CON(35), 14, 2, MFLAGS, 8, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(4), 11, GFLAGS),
+       COMPOSITE(0, "sclk_tsp", mux_pll_src_cpll_gpll_npll_p, 0,
+                       RK3288_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(4), 10, GFLAGS),
+
+       GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", 0,
+                       RK3288_CLKGATE_CON(13), 4, GFLAGS),
+       GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", 0,
+                       RK3288_CLKGATE_CON(13), 5, GFLAGS),
+       GATE(SCLK_OTGPHY2, "sclk_otgphy2", "usb480m", 0,
+                       RK3288_CLKGATE_CON(13), 6, GFLAGS),
+       GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", 0,
+                       RK3288_CLKGATE_CON(13), 7, GFLAGS),
+
+       COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin32k", 0,
+                       RK3288_CLKSEL_CON(2), 0, 6, DFLAGS,
+                       RK3288_CLKGATE_CON(2), 7, GFLAGS),
+
+       COMPOSITE_NOMUX(SCLK_SARADC, "sclk_saradc", "xin24m", 0,
+                       RK3288_CLKSEL_CON(24), 8, 8, DFLAGS,
+                       RK3288_CLKGATE_CON(2), 8, GFLAGS),
+
+       GATE(SCLK_PS2C, "sclk_ps2c", "xin24m", 0,
+                       RK3288_CLKGATE_CON(5), 13, GFLAGS),
+
+       COMPOSITE(SCLK_NANDC0, "sclk_nandc0", mux_pll_src_cpll_gpll_p, 0,
+                       RK3288_CLKSEL_CON(38), 7, 1, MFLAGS, 0, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(5), 5, GFLAGS),
+       COMPOSITE(SCLK_NANDC1, "sclk_nandc1", mux_pll_src_cpll_gpll_p, 0,
+                       RK3288_CLKSEL_CON(38), 15, 1, MFLAGS, 8, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(5), 6, GFLAGS),
+
+       COMPOSITE(0, "uart0_src", mux_uart0_pll_p, 0,
+                       RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS,
+                       RK3288_CLKGATE_CON(1), 8, GFLAGS),
+       COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0,
+                       RK3288_CLKSEL_CON(17), 0,
+                       RK3288_CLKGATE_CON(1), 9, GFLAGS),
+       MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, 0,
+                       RK3288_CLKSEL_CON(13), 8, 2, MFLAGS),
+       MUX(0, "uart_src", mux_pll_src_cpll_gpll_p, 0,
+                       RK3288_CLKSEL_CON(13), 15, 1, MFLAGS),
+       COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0,
+                       RK3288_CLKSEL_CON(14), 0, 7, DFLAGS,
+                       RK3288_CLKGATE_CON(1), 10, GFLAGS),
+       COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", 0,
+                       RK3288_CLKSEL_CON(18), 0,
+                       RK3288_CLKGATE_CON(1), 11, GFLAGS),
+       MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, 0,
+                       RK3288_CLKSEL_CON(14), 8, 2, MFLAGS),
+       COMPOSITE_NOMUX(0, "uart2_src", "uart_src", 0,
+                       RK3288_CLKSEL_CON(15), 0, 7, DFLAGS,
+                       RK3288_CLKGATE_CON(1), 12, GFLAGS),
+       COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", 0,
+                       RK3288_CLKSEL_CON(19), 0,
+                       RK3288_CLKGATE_CON(1), 13, GFLAGS),
+       MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, 0,
+                       RK3288_CLKSEL_CON(15), 8, 2, MFLAGS),
+       COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0,
+                       RK3288_CLKSEL_CON(16), 0, 7, DFLAGS,
+                       RK3288_CLKGATE_CON(1), 14, GFLAGS),
+       COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", 0,
+                       RK3288_CLKSEL_CON(20), 0,
+                       RK3288_CLKGATE_CON(1), 15, GFLAGS),
+       MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, 0,
+                       RK3288_CLKSEL_CON(16), 8, 2, MFLAGS),
+       COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0,
+                       RK3288_CLKSEL_CON(3), 0, 7, DFLAGS,
+                       RK3288_CLKGATE_CON(2), 12, GFLAGS),
+       COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", 0,
+                       RK3288_CLKSEL_CON(7), 0,
+                       RK3288_CLKGATE_CON(2), 13, GFLAGS),
+       MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, 0,
+                       RK3288_CLKSEL_CON(3), 8, 2, MFLAGS),
+
+       COMPOSITE(0, "mac_src", mux_pll_src_npll_cpll_gpll_p, 0,
+                       RK3288_CLKSEL_CON(21), 0, 2, MFLAGS, 8, 5, DFLAGS,
+                       RK3288_CLKGATE_CON(2), 5, GFLAGS),
+       MUX(0, "macref", mux_macref_p, 0,
+                       RK3288_CLKSEL_CON(21), 4, 1, MFLAGS),
+       GATE(0, "sclk_macref_out", "macref", 0,
+                       RK3288_CLKGATE_CON(5), 3, GFLAGS),
+       GATE(SCLK_MACREF, "sclk_macref", "macref", 0,
+                       RK3288_CLKGATE_CON(5), 2, GFLAGS),
+       GATE(SCLK_MAC_RX, "sclk_mac_rx", "macref", 0,
+                       RK3288_CLKGATE_CON(5), 0, GFLAGS),
+       GATE(SCLK_MAC_TX, "sclk_mac_tx", "macref", 0,
+                       RK3288_CLKGATE_CON(5), 1, GFLAGS),
+
+       COMPOSITE(0, "hsadc_src", mux_pll_src_cpll_gpll_p, 0,
+                       RK3288_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS,
+                       RK3288_CLKGATE_CON(2), 6, GFLAGS),
+       MUX(SCLK_HSADC, "sclk_hsadc_out", mux_hsadcout_p, 0,
+                       RK3288_CLKSEL_CON(22), 4, 1, MFLAGS),
+
+       GATE(0, "jtag", "ext_jtag", 0,
+                       RK3288_CLKGATE_CON(4), 14, GFLAGS),
+
+       COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0,
+                       RK3288_CLKSEL_CON(13), 11, 2, MFLAGS,
+                       RK3288_CLKGATE_CON(5), 15, GFLAGS),
+       COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
+                       RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
+                       RK3288_CLKGATE_CON(3), 6, GFLAGS),
+       GATE(0, "hsicphy12m_xin12m", "xin12m", 0,
+                       RK3288_CLKGATE_CON(13), 9, GFLAGS),
+       DIV(0, "hsicphy12m_usbphy", "sclk_hsicphy480m", 0,
+                       RK3288_CLKSEL_CON(11), 8, 6, DFLAGS),
+       MUX(SCLK_HSICPHY12M, "sclk_hsicphy12m", mux_hsicphy12m_p, 0,
+                       RK3288_CLKSEL_CON(22), 4, 1, MFLAGS),
+
+       /*
+        * Clock-Architecture Diagram 4
+        */
+
+       /* aclk_cpu gates */
+       GATE(0, "sclk_intmem0", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 5, GFLAGS),
+       GATE(0, "sclk_intmem1", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 6, GFLAGS),
+       GATE(0, "sclk_intmem2", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 7, GFLAGS),
+       GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 12, GFLAGS),
+       GATE(0, "aclk_strc_sys", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 13, GFLAGS),
+       GATE(0, "aclk_intmem", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 4, GFLAGS),
+       GATE(ACLK_CRYPTO, "aclk_crypto", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 6, GFLAGS),
+       GATE(0, "aclk_ccp", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 8, GFLAGS),
+
+       /* hclk_cpu gates */
+       GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_cpu", 0, RK3288_CLKGATE_CON(11), 7, GFLAGS),
+       GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 8, GFLAGS),
+       GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 9, GFLAGS),
+       GATE(HCLK_SPDIF, "hclk_spdif", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 10, GFLAGS),
+       GATE(HCLK_SPDIF8CH, "hclk_spdif_8ch", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 11, GFLAGS),
+
+       /* pclk_cpu gates */
+       GATE(PCLK_PWM, "pclk_pwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 0, GFLAGS),
+       GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS),
+       GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS),
+       GATE(PCLK_I2C1, "pclk_i2c1", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS),
+       GATE(0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS),
+       GATE(0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS),
+       GATE(0, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS),
+       GATE(0, "pclk_publ1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 1, GFLAGS),
+       GATE(0, "pclk_efuse_1024", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 2, GFLAGS),
+       GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
+       GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
+       GATE(0, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
+       GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS),
+
+       /* ddrctrl [DDR Controller PHY clock] gates */
+       GATE(0, "nclk_ddrupctl0", "ddrphy", 0, RK3288_CLKGATE_CON(11), 4, GFLAGS),
+       GATE(0, "nclk_ddrupctl1", "ddrphy", 0, RK3288_CLKGATE_CON(11), 5, GFLAGS),
+
+       /* ddrphy gates */
+       GATE(0, "sclk_ddrphy0", "ddrphy", 0, RK3288_CLKGATE_CON(4), 12, GFLAGS),
+       GATE(0, "sclk_ddrphy1", "ddrphy", 0, RK3288_CLKGATE_CON(4), 13, GFLAGS),
+
+       /* aclk_peri gates */
+       GATE(0, "aclk_peri_axi_matrix", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 2, GFLAGS),
+       GATE(ACLK_DMAC2, "aclk_dmac2", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 3, GFLAGS),
+       GATE(0, "aclk_peri_niu", "aclk_peri", 0, RK3288_CLKGATE_CON(7), 11, GFLAGS),
+       GATE(ACLK_MMU, "aclk_mmu", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 12, GFLAGS),
+       GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 0, GFLAGS),
+       GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 2, GFLAGS),
+
+       /* hclk_peri gates */
+       GATE(0, "hclk_peri_matrix", "hclk_peri", 0, RK3288_CLKGATE_CON(6), 0, GFLAGS),
+       GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 4, GFLAGS),
+       GATE(HCLK_USBHOST0, "hclk_host0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 6, GFLAGS),
+       GATE(HCLK_USBHOST1, "hclk_host1", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 7, GFLAGS),
+       GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 8, GFLAGS),
+       GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 9, GFLAGS),
+       GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 10, GFLAGS),
+       GATE(0, "hclk_emem", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 12, GFLAGS),
+       GATE(0, "hclk_mem", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 13, GFLAGS),
+       GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 14, GFLAGS),
+       GATE(HCLK_NANDC1, "hclk_nandc1", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 15, GFLAGS),
+       GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 8, GFLAGS),
+       GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 3, GFLAGS),
+       GATE(HCLK_SDIO0, "hclk_sdio0", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 4, GFLAGS),
+       GATE(HCLK_SDIO1, "hclk_sdio1", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 5, GFLAGS),
+       GATE(HCLK_EMMC, "hclk_emmc", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 6, GFLAGS),
+       GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 7, GFLAGS),
+       GATE(0, "pmu_hclk_otg0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 5, GFLAGS),
+
+       /* pclk_peri gates */
+       GATE(0, "pclk_peri_matrix", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 1, GFLAGS),
+       GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 4, GFLAGS),
+       GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 5, GFLAGS),
+       GATE(PCLK_SPI2, "pclk_spi2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 6, GFLAGS),
+       GATE(PCLK_PS2C, "pclk_ps2c", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 7, GFLAGS),
+       GATE(PCLK_UART0, "pclk_uart0", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 8, GFLAGS),
+       GATE(PCLK_UART1, "pclk_uart1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 9, GFLAGS),
+       GATE(PCLK_I2C4, "pclk_i2c4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 15, GFLAGS),
+       GATE(PCLK_UART3, "pclk_uart3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 11, GFLAGS),
+       GATE(PCLK_UART4, "pclk_uart4", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 12, GFLAGS),
+       GATE(PCLK_I2C2, "pclk_i2c2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 13, GFLAGS),
+       GATE(PCLK_I2C3, "pclk_i2c3", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 14, GFLAGS),
+       GATE(PCLK_SARADC, "pclk_saradc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 1, GFLAGS),
+       GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 2, GFLAGS),
+       GATE(PCLK_SIM, "pclk_sim", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 3, GFLAGS),
+       GATE(PCLK_I2C5, "pclk_i2c5", "pclk_peri", 0, RK3288_CLKGATE_CON(7), 0, GFLAGS),
+       GATE(PCLK_GMAC, "pclk_gmac", "pclk_peri", 0, RK3288_CLKGATE_CON(8), 1, GFLAGS),
+
+       GATE(SCLK_LCDC_PWM0, "sclk_lcdc_pwm0", "xin24m", 0, RK3288_CLKGATE_CON(13), 10, GFLAGS),
+       GATE(SCLK_LCDC_PWM1, "sclk_lcdc_pwm1", "xin24m", 0, RK3288_CLKGATE_CON(13), 11, GFLAGS),
+       GATE(0, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS),
+       GATE(0, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS),
+       GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3288_CLKGATE_CON(5), 15, GFLAGS),
+
+       /* sclk_gpu gates */
+       GATE(ACLK_GPU, "aclk_gpu", "sclk_gpu", 0, RK3288_CLKGATE_CON(18), 0, GFLAGS),
+
+       /* pclk_pd_alive gates */
+       GATE(PCLK_GPIO8, "pclk_gpio8", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 8, GFLAGS),
+       GATE(PCLK_GPIO7, "pclk_gpio7", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 7, GFLAGS),
+       GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 1, GFLAGS),
+       GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 2, GFLAGS),
+       GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 3, GFLAGS),
+       GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 4, GFLAGS),
+       GATE(PCLK_GPIO5, "pclk_gpio5", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 5, GFLAGS),
+       GATE(PCLK_GPIO6, "pclk_gpio6", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 6, GFLAGS),
+       GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 11, GFLAGS),
+       GATE(0, "pclk_alive_niu", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 12, GFLAGS),
+
+       /* pclk_pd_pmu gates */
+       GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 0, GFLAGS),
+       GATE(0, "pclk_intmem1", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 1, GFLAGS),
+       GATE(0, "pclk_pmu_niu", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 2, GFLAGS),
+       GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 3, GFLAGS),
+       GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 4, GFLAGS),
+
+       /* hclk_vio gates */
+       GATE(HCLK_RGA, "hclk_rga", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 1, GFLAGS),
+       GATE(HCLK_VOP0, "hclk_vop0", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 6, GFLAGS),
+       GATE(HCLK_VOP1, "hclk_vop1", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 8, GFLAGS),
+       GATE(0, "hclk_vio_ahb_arbi", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 9, GFLAGS),
+       GATE(0, "hclk_vio_niu", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 10, GFLAGS),
+       GATE(0, "hclk_vip", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 15, GFLAGS),
+       GATE(HCLK_IEP, "hclk_iep", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 3, GFLAGS),
+       GATE(HCLK_ISP, "hclk_isp", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 1, GFLAGS),
+       GATE(0, "hclk_vio2_h2p", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 10, GFLAGS),
+       GATE(0, "pclk_mipi_dsi0", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 4, GFLAGS),
+       GATE(0, "pclk_mipi_dsi1", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 5, GFLAGS),
+       GATE(0, "pclk_mipi_csi", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 6, GFLAGS),
+       GATE(0, "pclk_lvds_phy", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 7, GFLAGS),
+       GATE(0, "pclk_edp_ctrl", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 8, GFLAGS),
+       GATE(0, "pclk_hdmi_ctrl", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 9, GFLAGS),
+       GATE(0, "pclk_vio2_h2p", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 11, GFLAGS),
+
+       /* aclk_vio0 gates */
+       GATE(ACLK_VOP0, "aclk_vop0", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 5, GFLAGS),
+       GATE(0, "aclk_iep", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 2, GFLAGS),
+       GATE(0, "aclk_vio0_niu", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 11, GFLAGS),
+       GATE(0, "aclk_vip", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 14, GFLAGS),
+
+       /* aclk_vio1 gates */
+       GATE(ACLK_VOP1, "aclk_vop1", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 7, GFLAGS),
+       GATE(0, "aclk_isp", "aclk_vio1", 0, RK3288_CLKGATE_CON(16), 2, GFLAGS),
+       GATE(0, "aclk_vio1_niu", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 12, GFLAGS),
+
+       /* aclk_rga_pre gates */
+       GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 0, GFLAGS),
+       GATE(0, "aclk_rga_niu", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 13, GFLAGS),
+
+       /*
+        * Other ungrouped clocks.
+        */
+
+       GATE(0, "pclk_vip_in", "ext_vip", 0, RK3288_CLKGATE_CON(16), 0, GFLAGS),
+       GATE(0, "pclk_isp_in", "ext_isp", 0, RK3288_CLKGATE_CON(16), 3, GFLAGS),
+};
+
+static void __init rk3288_clk_init(struct device_node *np)
+{
+       void __iomem *reg_base;
+       struct clk *clk;
+
+       reg_base = of_iomap(np, 0);
+       if (!reg_base) {
+               pr_err("%s: could not map cru region\n", __func__);
+               return;
+       }
+
+       rockchip_clk_init(np, reg_base, CLK_NR_CLKS);
+
+       /* xin12m is created by an cru-internal divider */
+       clk = clk_register_fixed_factor(NULL, "xin12m", "xin24m", 0, 1, 2);
+       if (IS_ERR(clk))
+               pr_warn("%s: could not register clock xin12m: %ld\n",
+                       __func__, PTR_ERR(clk));
+
+
+       clk = clk_register_fixed_factor(NULL, "usb480m", "xin24m", 0, 20, 1);
+       if (IS_ERR(clk))
+               pr_warn("%s: could not register clock usb480m: %ld\n",
+                       __func__, PTR_ERR(clk));
+
+       rockchip_clk_register_plls(rk3288_pll_clks,
+                                  ARRAY_SIZE(rk3288_pll_clks),
+                                  RK3288_GRF_SOC_STATUS);
+       rockchip_clk_register_branches(rk3288_clk_branches,
+                                 ARRAY_SIZE(rk3288_clk_branches));
+
+       rockchip_register_softrst(np, 9, reg_base + RK3288_SOFTRST_CON(0),
+                                 ROCKCHIP_SOFTRST_HIWORD_MASK);
+}
+CLK_OF_DECLARE(rk3288_cru, "rockchip,rk3288-cru", rk3288_clk_init);
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
new file mode 100644 (file)
index 0000000..278cf9d
--- /dev/null
@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on
+ *
+ * samsung/clk.c
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2013 Linaro Ltd.
+ * Author: Thomas Abraham <thomas.ab@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include "clk.h"
+
+/**
+ * Register a clock branch.
+ * Most clock branches have a form like
+ *
+ * src1 --|--\
+ *        |M |--[GATE]-[DIV]-
+ * src2 --|--/
+ *
+ * sometimes without one of those components.
+ */
+struct clk *rockchip_clk_register_branch(const char *name,
+               const char **parent_names, u8 num_parents, void __iomem *base,
+               int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
+               u8 div_shift, u8 div_width, u8 div_flags,
+               struct clk_div_table *div_table, int gate_offset,
+               u8 gate_shift, u8 gate_flags, unsigned long flags,
+               spinlock_t *lock)
+{
+       struct clk *clk;
+       struct clk_mux *mux = NULL;
+       struct clk_gate *gate = NULL;
+       struct clk_divider *div = NULL;
+       const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
+                            *gate_ops = NULL;
+
+       if (num_parents > 1) {
+               mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+               if (!mux)
+                       return ERR_PTR(-ENOMEM);
+
+               mux->reg = base + muxdiv_offset;
+               mux->shift = mux_shift;
+               mux->mask = BIT(mux_width) - 1;
+               mux->flags = mux_flags;
+               mux->lock = lock;
+               mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
+                                                       : &clk_mux_ops;
+       }
+
+       if (gate_offset >= 0) {
+               gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+               if (!gate)
+                       return ERR_PTR(-ENOMEM);
+
+               gate->flags = gate_flags;
+               gate->reg = base + gate_offset;
+               gate->bit_idx = gate_shift;
+               gate->lock = lock;
+               gate_ops = &clk_gate_ops;
+       }
+
+       if (div_width > 0) {
+               div = kzalloc(sizeof(*div), GFP_KERNEL);
+               if (!div)
+                       return ERR_PTR(-ENOMEM);
+
+               div->flags = div_flags;
+               div->reg = base + muxdiv_offset;
+               div->shift = div_shift;
+               div->width = div_width;
+               div->lock = lock;
+               div->table = div_table;
+               div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
+                                               ? &clk_divider_ro_ops
+                                               : &clk_divider_ops;
+       }
+
+       clk = clk_register_composite(NULL, name, parent_names, num_parents,
+                                    mux ? &mux->hw : NULL, mux_ops,
+                                    div ? &div->hw : NULL, div_ops,
+                                    gate ? &gate->hw : NULL, gate_ops,
+                                    flags);
+
+       return clk;
+}
+
+static DEFINE_SPINLOCK(clk_lock);
+static struct clk **clk_table;
+static void __iomem *reg_base;
+static struct clk_onecell_data clk_data;
+static struct device_node *cru_node;
+static struct regmap *grf;
+
+void __init rockchip_clk_init(struct device_node *np, void __iomem *base,
+                             unsigned long nr_clks)
+{
+       reg_base = base;
+       cru_node = np;
+       grf = ERR_PTR(-EPROBE_DEFER);
+
+       clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
+       if (!clk_table)
+               pr_err("%s: could not allocate clock lookup table\n", __func__);
+
+       clk_data.clks = clk_table;
+       clk_data.clk_num = nr_clks;
+       of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+}
+
+struct regmap *rockchip_clk_get_grf(void)
+{
+       if (IS_ERR(grf))
+               grf = syscon_regmap_lookup_by_phandle(cru_node, "rockchip,grf");
+       return grf;
+}
+
+void rockchip_clk_add_lookup(struct clk *clk, unsigned int id)
+{
+       if (clk_table && id)
+               clk_table[id] = clk;
+}
+
+void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
+                               unsigned int nr_pll, int grf_lock_offset)
+{
+       struct clk *clk;
+       int idx;
+
+       for (idx = 0; idx < nr_pll; idx++, list++) {
+               clk = rockchip_clk_register_pll(list->type, list->name,
+                               list->parent_names, list->num_parents,
+                               reg_base, list->con_offset, grf_lock_offset,
+                               list->lock_shift, list->mode_offset,
+                               list->mode_shift, list->rate_table, &clk_lock);
+               if (IS_ERR(clk)) {
+                       pr_err("%s: failed to register clock %s\n", __func__,
+                               list->name);
+                       continue;
+               }
+
+               rockchip_clk_add_lookup(clk, list->id);
+       }
+}
+
+void __init rockchip_clk_register_branches(
+                                     struct rockchip_clk_branch *list,
+                                     unsigned int nr_clk)
+{
+       struct clk *clk = NULL;
+       unsigned int idx;
+       unsigned long flags;
+
+       for (idx = 0; idx < nr_clk; idx++, list++) {
+               flags = list->flags;
+
+               /* catch simple muxes */
+               switch (list->branch_type) {
+               case branch_mux:
+                       clk = clk_register_mux(NULL, list->name,
+                               list->parent_names, list->num_parents,
+                               flags, reg_base + list->muxdiv_offset,
+                               list->mux_shift, list->mux_width,
+                               list->mux_flags, &clk_lock);
+                       break;
+               case branch_divider:
+                       if (list->div_table)
+                               clk = clk_register_divider_table(NULL,
+                                       list->name, list->parent_names[0],
+                                       flags, reg_base + list->muxdiv_offset,
+                                       list->div_shift, list->div_width,
+                                       list->div_flags, list->div_table,
+                                       &clk_lock);
+                       else
+                               clk = clk_register_divider(NULL, list->name,
+                                       list->parent_names[0], flags,
+                                       reg_base + list->muxdiv_offset,
+                                       list->div_shift, list->div_width,
+                                       list->div_flags, &clk_lock);
+                       break;
+               case branch_fraction_divider:
+                       /* unimplemented */
+                       continue;
+                       break;
+               case branch_gate:
+                       flags |= CLK_SET_RATE_PARENT;
+
+                       /* keep all gates untouched for now */
+                       flags |= CLK_IGNORE_UNUSED;
+
+                       clk = clk_register_gate(NULL, list->name,
+                               list->parent_names[0], flags,
+                               reg_base + list->gate_offset,
+                               list->gate_shift, list->gate_flags, &clk_lock);
+                       break;
+               case branch_composite:
+                       /* keep all gates untouched for now */
+                       flags |= CLK_IGNORE_UNUSED;
+
+                       clk = rockchip_clk_register_branch(list->name,
+                               list->parent_names, list->num_parents,
+                               reg_base, list->muxdiv_offset, list->mux_shift,
+                               list->mux_width, list->mux_flags,
+                               list->div_shift, list->div_width,
+                               list->div_flags, list->div_table,
+                               list->gate_offset, list->gate_shift,
+                               list->gate_flags, flags, &clk_lock);
+                       break;
+               }
+
+               /* none of the cases above matched */
+               if (!clk) {
+                       pr_err("%s: unknown clock type %d\n",
+                              __func__, list->branch_type);
+                       continue;
+               }
+
+               if (IS_ERR(clk)) {
+                       pr_err("%s: failed to register clock %s: %ld\n",
+                              __func__, list->name, PTR_ERR(clk));
+                       continue;
+               }
+
+               rockchip_clk_add_lookup(clk, list->id);
+       }
+}
diff --git a/drivers/clk/rockchip/clk.h b/drivers/clk/rockchip/clk.h
new file mode 100644 (file)
index 0000000..887cbde
--- /dev/null
@@ -0,0 +1,347 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on
+ *
+ * samsung/clk.h
+ * Copyright (c) 2013 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2013 Linaro Ltd.
+ * Author: Thomas Abraham <thomas.ab@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CLK_ROCKCHIP_CLK_H
+#define CLK_ROCKCHIP_CLK_H
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#define HIWORD_UPDATE(val, mask, shift) \
+               ((val) << (shift) | (mask) << ((shift) + 16))
+
+/* register positions shared by RK2928, RK3066 and RK3188 */
+#define RK2928_PLL_CON(x)              (x * 0x4)
+#define RK2928_MODE_CON                0x40
+#define RK2928_CLKSEL_CON(x)   (x * 0x4 + 0x44)
+#define RK2928_CLKGATE_CON(x)  (x * 0x4 + 0xd0)
+#define RK2928_GLB_SRST_FST            0x100
+#define RK2928_GLB_SRST_SND            0x104
+#define RK2928_SOFTRST_CON(x)  (x * 0x4 + 0x110)
+#define RK2928_MISC_CON                0x134
+
+#define RK3288_PLL_CON(x)              RK2928_PLL_CON(x)
+#define RK3288_MODE_CON                        0x50
+#define RK3288_CLKSEL_CON(x)           (x * 0x4 + 0x60)
+#define RK3288_CLKGATE_CON(x)          (x * 0x4 + 0x160)
+#define RK3288_GLB_SRST_FST            0x1b0
+#define RK3288_GLB_SRST_SND            0x1b4
+#define RK3288_SOFTRST_CON(x)          (x * 0x4 + 0x1b8)
+#define RK3288_MISC_CON                        0x1e8
+
+enum rockchip_pll_type {
+       pll_rk3066,
+};
+
+#define RK3066_PLL_RATE(_rate, _nr, _nf, _no)  \
+{                                              \
+       .rate   = _rate##U,                     \
+       .nr = _nr,                              \
+       .nf = _nf,                              \
+       .no = _no,                              \
+       .bwadj = (_nf >> 1),                    \
+}
+
+struct rockchip_pll_rate_table {
+       unsigned long rate;
+       unsigned int nr;
+       unsigned int nf;
+       unsigned int no;
+       unsigned int bwadj;
+};
+
+/**
+ * struct rockchip_pll_clock: information about pll clock
+ * @id: platform specific id of the clock.
+ * @name: name of this pll clock.
+ * @parent_name: name of the parent clock.
+ * @flags: optional flags for basic clock.
+ * @con_offset: offset of the register for configuring the PLL.
+ * @mode_offset: offset of the register for configuring the PLL-mode.
+ * @mode_shift: offset inside the mode-register for the mode of this pll.
+ * @lock_shift: offset inside the lock register for the lock status.
+ * @type: Type of PLL to be registered.
+ * @rate_table: Table of usable pll rates
+ */
+struct rockchip_pll_clock {
+       unsigned int            id;
+       const char              *name;
+       const char              **parent_names;
+       u8                      num_parents;
+       unsigned long           flags;
+       int                     con_offset;
+       int                     mode_offset;
+       int                     mode_shift;
+       int                     lock_shift;
+       enum rockchip_pll_type  type;
+       struct rockchip_pll_rate_table *rate_table;
+};
+
+#define PLL(_type, _id, _name, _pnames, _flags, _con, _mode, _mshift,  \
+               _lshift, _rtable)                                       \
+       {                                                               \
+               .id             = _id,                                  \
+               .type           = _type,                                \
+               .name           = _name,                                \
+               .parent_names   = _pnames,                              \
+               .num_parents    = ARRAY_SIZE(_pnames),                  \
+               .flags          = CLK_GET_RATE_NOCACHE | _flags,        \
+               .con_offset     = _con,                                 \
+               .mode_offset    = _mode,                                \
+               .mode_shift     = _mshift,                              \
+               .lock_shift     = _lshift,                              \
+               .rate_table     = _rtable,                              \
+       }
+
+struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
+               const char *name, const char **parent_names, u8 num_parents,
+               void __iomem *base, int con_offset, int grf_lock_offset,
+               int lock_shift, int reg_mode, int mode_shift,
+               struct rockchip_pll_rate_table *rate_table,
+               spinlock_t *lock);
+
+#define PNAME(x) static const char *x[] __initconst
+
+enum rockchip_clk_branch_type {
+       branch_composite,
+       branch_mux,
+       branch_divider,
+       branch_fraction_divider,
+       branch_gate,
+};
+
+struct rockchip_clk_branch {
+       unsigned int                    id;
+       enum rockchip_clk_branch_type   branch_type;
+       const char                      *name;
+       const char                      **parent_names;
+       u8                              num_parents;
+       unsigned long                   flags;
+       int                             muxdiv_offset;
+       u8                              mux_shift;
+       u8                              mux_width;
+       u8                              mux_flags;
+       u8                              div_shift;
+       u8                              div_width;
+       u8                              div_flags;
+       struct clk_div_table            *div_table;
+       int                             gate_offset;
+       u8                              gate_shift;
+       u8                              gate_flags;
+};
+
+#define COMPOSITE(_id, cname, pnames, f, mo, ms, mw, mf, ds, dw,\
+                 df, go, gs, gf)                               \
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_composite,             \
+               .name           = cname,                        \
+               .parent_names   = pnames,                       \
+               .num_parents    = ARRAY_SIZE(pnames),           \
+               .flags          = f,                            \
+               .muxdiv_offset  = mo,                           \
+               .mux_shift      = ms,                           \
+               .mux_width      = mw,                           \
+               .mux_flags      = mf,                           \
+               .div_shift      = ds,                           \
+               .div_width      = dw,                           \
+               .div_flags      = df,                           \
+               .gate_offset    = go,                           \
+               .gate_shift     = gs,                           \
+               .gate_flags     = gf,                           \
+       }
+
+#define COMPOSITE_NOMUX(_id, cname, pname, f, mo, ds, dw, df,  \
+                       go, gs, gf)                             \
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_composite,             \
+               .name           = cname,                        \
+               .parent_names   = (const char *[]){ pname },    \
+               .num_parents    = 1,                            \
+               .flags          = f,                            \
+               .muxdiv_offset  = mo,                           \
+               .div_shift      = ds,                           \
+               .div_width      = dw,                           \
+               .div_flags      = df,                           \
+               .gate_offset    = go,                           \
+               .gate_shift     = gs,                           \
+               .gate_flags     = gf,                           \
+       }
+
+#define COMPOSITE_NOMUX_DIVTBL(_id, cname, pname, f, mo, ds, dw,\
+                              df, dt, go, gs, gf)              \
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_composite,             \
+               .name           = cname,                        \
+               .parent_names   = (const char *[]){ pname },    \
+               .num_parents    = 1,                            \
+               .flags          = f,                            \
+               .muxdiv_offset  = mo,                           \
+               .div_shift      = ds,                           \
+               .div_width      = dw,                           \
+               .div_flags      = df,                           \
+               .div_table      = dt,                           \
+               .gate_offset    = go,                           \
+               .gate_shift     = gs,                           \
+               .gate_flags     = gf,                           \
+       }
+
+#define COMPOSITE_NODIV(_id, cname, pnames, f, mo, ms, mw, mf, \
+                       go, gs, gf)                             \
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_composite,             \
+               .name           = cname,                        \
+               .parent_names   = pnames,                       \
+               .num_parents    = ARRAY_SIZE(pnames),           \
+               .flags          = f,                            \
+               .muxdiv_offset  = mo,                           \
+               .mux_shift      = ms,                           \
+               .mux_width      = mw,                           \
+               .mux_flags      = mf,                           \
+               .gate_offset    = go,                           \
+               .gate_shift     = gs,                           \
+               .gate_flags     = gf,                           \
+       }
+
+#define COMPOSITE_NOGATE(_id, cname, pnames, f, mo, ms, mw, mf,        \
+                        ds, dw, df)                            \
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_composite,             \
+               .name           = cname,                        \
+               .parent_names   = pnames,                       \
+               .num_parents    = ARRAY_SIZE(pnames),           \
+               .flags          = f,                            \
+               .muxdiv_offset  = mo,                           \
+               .mux_shift      = ms,                           \
+               .mux_width      = mw,                           \
+               .mux_flags      = mf,                           \
+               .div_shift      = ds,                           \
+               .div_width      = dw,                           \
+               .div_flags      = df,                           \
+               .gate_offset    = -1,                           \
+       }
+
+#define COMPOSITE_FRAC(_id, cname, pname, f, mo, df, go, gs, gf)\
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_fraction_divider,      \
+               .name           = cname,                        \
+               .parent_names   = (const char *[]){ pname },    \
+               .num_parents    = 1,                            \
+               .flags          = f,                            \
+               .muxdiv_offset  = mo,                           \
+               .div_shift      = 16,                           \
+               .div_width      = 16,                           \
+               .div_flags      = df,                           \
+               .gate_offset    = go,                           \
+               .gate_shift     = gs,                           \
+               .gate_flags     = gf,                           \
+       }
+
+#define MUX(_id, cname, pnames, f, o, s, w, mf)                        \
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_mux,                   \
+               .name           = cname,                        \
+               .parent_names   = pnames,                       \
+               .num_parents    = ARRAY_SIZE(pnames),           \
+               .flags          = f,                            \
+               .muxdiv_offset  = o,                            \
+               .mux_shift      = s,                            \
+               .mux_width      = w,                            \
+               .mux_flags      = mf,                           \
+               .gate_offset    = -1,                           \
+       }
+
+#define DIV(_id, cname, pname, f, o, s, w, df)                 \
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_divider,               \
+               .name           = cname,                        \
+               .parent_names   = (const char *[]){ pname },    \
+               .num_parents    = 1,                            \
+               .flags          = f,                            \
+               .muxdiv_offset  = o,                            \
+               .div_shift      = s,                            \
+               .div_width      = w,                            \
+               .div_flags      = df,                           \
+               .gate_offset    = -1,                           \
+       }
+
+#define DIVTBL(_id, cname, pname, f, o, s, w, df, dt)          \
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_divider,               \
+               .name           = cname,                        \
+               .parent_names   = (const char *[]){ pname },    \
+               .num_parents    = 1,                            \
+               .flags          = f,                            \
+               .muxdiv_offset  = o,                            \
+               .div_shift      = s,                            \
+               .div_width      = w,                            \
+               .div_flags      = df,                           \
+               .div_table      = dt,                           \
+       }
+
+#define GATE(_id, cname, pname, f, o, b, gf)                   \
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_gate,                  \
+               .name           = cname,                        \
+               .parent_names   = (const char *[]){ pname },    \
+               .num_parents    = 1,                            \
+               .flags          = f,                            \
+               .gate_offset    = o,                            \
+               .gate_shift     = b,                            \
+               .gate_flags     = gf,                           \
+       }
+
+
+void rockchip_clk_init(struct device_node *np, void __iomem *base,
+                      unsigned long nr_clks);
+struct regmap *rockchip_clk_get_grf(void);
+void rockchip_clk_add_lookup(struct clk *clk, unsigned int id);
+void rockchip_clk_register_branches(struct rockchip_clk_branch *clk_list,
+                                   unsigned int nr_clk);
+void rockchip_clk_register_plls(struct rockchip_pll_clock *pll_list,
+                               unsigned int nr_pll, int grf_lock_offset);
+
+#define ROCKCHIP_SOFTRST_HIWORD_MASK   BIT(0)
+
+#ifdef CONFIG_RESET_CONTROLLER
+void rockchip_register_softrst(struct device_node *np,
+                              unsigned int num_regs,
+                              void __iomem *base, u8 flags);
+#else
+static inline void rockchip_register_softrst(struct device_node *np,
+                              unsigned int num_regs,
+                              void __iomem *base, u8 flags)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/clk/rockchip/softrst.c b/drivers/clk/rockchip/softrst.c
new file mode 100644 (file)
index 0000000..552f7bb
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+#include "clk.h"
+
+struct rockchip_softrst {
+       struct reset_controller_dev     rcdev;
+       void __iomem                    *reg_base;
+       int                             num_regs;
+       int                             num_per_reg;
+       u8                              flags;
+       spinlock_t                      lock;
+};
+
+static int rockchip_softrst_assert(struct reset_controller_dev *rcdev,
+                             unsigned long id)
+{
+       struct rockchip_softrst *softrst = container_of(rcdev,
+                                                    struct rockchip_softrst,
+                                                    rcdev);
+       int bank = id / softrst->num_per_reg;
+       int offset = id % softrst->num_per_reg;
+
+       if (softrst->flags & ROCKCHIP_SOFTRST_HIWORD_MASK) {
+               writel(BIT(offset) | (BIT(offset) << 16),
+                      softrst->reg_base + (bank * 4));
+       } else {
+               unsigned long flags;
+               u32 reg;
+
+               spin_lock_irqsave(&softrst->lock, flags);
+
+               reg = readl(softrst->reg_base + (bank * 4));
+               writel(reg | BIT(offset), softrst->reg_base + (bank * 4));
+
+               spin_unlock_irqrestore(&softrst->lock, flags);
+       }
+
+       return 0;
+}
+
+static int rockchip_softrst_deassert(struct reset_controller_dev *rcdev,
+                               unsigned long id)
+{
+       struct rockchip_softrst *softrst = container_of(rcdev,
+                                                    struct rockchip_softrst,
+                                                    rcdev);
+       int bank = id / softrst->num_per_reg;
+       int offset = id % softrst->num_per_reg;
+
+       if (softrst->flags & ROCKCHIP_SOFTRST_HIWORD_MASK) {
+               writel((BIT(offset) << 16), softrst->reg_base + (bank * 4));
+       } else {
+               unsigned long flags;
+               u32 reg;
+
+               spin_lock_irqsave(&softrst->lock, flags);
+
+               reg = readl(softrst->reg_base + (bank * 4));
+               writel(reg & ~BIT(offset), softrst->reg_base + (bank * 4));
+
+               spin_unlock_irqrestore(&softrst->lock, flags);
+       }
+
+       return 0;
+}
+
+static struct reset_control_ops rockchip_softrst_ops = {
+       .assert         = rockchip_softrst_assert,
+       .deassert       = rockchip_softrst_deassert,
+};
+
+void __init rockchip_register_softrst(struct device_node *np,
+                                     unsigned int num_regs,
+                                     void __iomem *base, u8 flags)
+{
+       struct rockchip_softrst *softrst;
+       int ret;
+
+       softrst = kzalloc(sizeof(*softrst), GFP_KERNEL);
+       if (!softrst)
+               return;
+
+       spin_lock_init(&softrst->lock);
+
+       softrst->reg_base = base;
+       softrst->flags = flags;
+       softrst->num_regs = num_regs;
+       softrst->num_per_reg = (flags & ROCKCHIP_SOFTRST_HIWORD_MASK) ? 16
+                                                                     : 32;
+
+       softrst->rcdev.owner = THIS_MODULE;
+       softrst->rcdev.nr_resets =  num_regs * softrst->num_per_reg;
+       softrst->rcdev.ops = &rockchip_softrst_ops;
+       softrst->rcdev.of_node = np;
+       ret = reset_controller_register(&softrst->rcdev);
+       if (ret) {
+               pr_err("%s: could not register reset controller, %d\n",
+                      __func__, ret);
+               kfree(softrst);
+       }
+};
index 69e8177..2949a55 100644 (file)
@@ -11,6 +11,7 @@ obj-$(CONFIG_SOC_EXYNOS5410)  += clk-exynos5410.o
 obj-$(CONFIG_SOC_EXYNOS5420)   += clk-exynos5420.o
 obj-$(CONFIG_SOC_EXYNOS5440)   += clk-exynos5440.o
 obj-$(CONFIG_ARCH_EXYNOS)      += clk-exynos-audss.o
+obj-$(CONFIG_ARCH_EXYNOS)      += clk-exynos-clkout.o
 obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
 obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
 obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
diff --git a/drivers/clk/samsung/clk-exynos-clkout.c b/drivers/clk/samsung/clk-exynos-clkout.c
new file mode 100644 (file)
index 0000000..3a7cb25
--- /dev/null
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Tomasz Figa <t.figa@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Clock driver for Exynos clock output
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/syscore_ops.h>
+
+#define EXYNOS_CLKOUT_NR_CLKS          1
+#define EXYNOS_CLKOUT_PARENTS          32
+
+#define EXYNOS_PMU_DEBUG_REG           0xa00
+#define EXYNOS_CLKOUT_DISABLE_SHIFT    0
+#define EXYNOS_CLKOUT_MUX_SHIFT                8
+#define EXYNOS4_CLKOUT_MUX_MASK                0xf
+#define EXYNOS5_CLKOUT_MUX_MASK                0x1f
+
+struct exynos_clkout {
+       struct clk_gate gate;
+       struct clk_mux mux;
+       spinlock_t slock;
+       struct clk_onecell_data data;
+       struct clk *clk_table[EXYNOS_CLKOUT_NR_CLKS];
+       void __iomem *reg;
+       u32 pmu_debug_save;
+};
+
+static struct exynos_clkout *clkout;
+
+static int exynos_clkout_suspend(void)
+{
+       clkout->pmu_debug_save = readl(clkout->reg + EXYNOS_PMU_DEBUG_REG);
+
+       return 0;
+}
+
+static void exynos_clkout_resume(void)
+{
+       writel(clkout->pmu_debug_save, clkout->reg + EXYNOS_PMU_DEBUG_REG);
+}
+
+static struct syscore_ops exynos_clkout_syscore_ops = {
+       .suspend = exynos_clkout_suspend,
+       .resume = exynos_clkout_resume,
+};
+
+static void __init exynos_clkout_init(struct device_node *node, u32 mux_mask)
+{
+       const char *parent_names[EXYNOS_CLKOUT_PARENTS];
+       struct clk *parents[EXYNOS_CLKOUT_PARENTS];
+       int parent_count;
+       int ret;
+       int i;
+
+       clkout = kzalloc(sizeof(*clkout), GFP_KERNEL);
+       if (!clkout)
+               return;
+
+       spin_lock_init(&clkout->slock);
+
+       parent_count = 0;
+       for (i = 0; i < EXYNOS_CLKOUT_PARENTS; ++i) {
+               char name[] = "clkoutXX";
+
+               snprintf(name, sizeof(name), "clkout%d", i);
+               parents[i] = of_clk_get_by_name(node, name);
+               if (IS_ERR(parents[i])) {
+                       parent_names[i] = "none";
+                       continue;
+               }
+
+               parent_names[i] = __clk_get_name(parents[i]);
+               parent_count = i + 1;
+       }
+
+       if (!parent_count)
+               goto free_clkout;
+
+       clkout->reg = of_iomap(node, 0);
+       if (!clkout->reg)
+               goto clks_put;
+
+       clkout->gate.reg = clkout->reg + EXYNOS_PMU_DEBUG_REG;
+       clkout->gate.bit_idx = EXYNOS_CLKOUT_DISABLE_SHIFT;
+       clkout->gate.flags = CLK_GATE_SET_TO_DISABLE;
+       clkout->gate.lock = &clkout->slock;
+
+       clkout->mux.reg = clkout->reg + EXYNOS_PMU_DEBUG_REG;
+       clkout->mux.mask = mux_mask;
+       clkout->mux.shift = EXYNOS_CLKOUT_MUX_SHIFT;
+       clkout->mux.lock = &clkout->slock;
+
+       clkout->clk_table[0] = clk_register_composite(NULL, "clkout",
+                               parent_names, parent_count, &clkout->mux.hw,
+                               &clk_mux_ops, NULL, NULL, &clkout->gate.hw,
+                               &clk_gate_ops, CLK_SET_RATE_PARENT
+                               | CLK_SET_RATE_NO_REPARENT);
+       if (IS_ERR(clkout->clk_table[0]))
+               goto err_unmap;
+
+       clkout->data.clks = clkout->clk_table;
+       clkout->data.clk_num = EXYNOS_CLKOUT_NR_CLKS;
+       ret = of_clk_add_provider(node, of_clk_src_onecell_get, &clkout->data);
+       if (ret)
+               goto err_clk_unreg;
+
+       register_syscore_ops(&exynos_clkout_syscore_ops);
+
+       return;
+
+err_clk_unreg:
+       clk_unregister(clkout->clk_table[0]);
+err_unmap:
+       iounmap(clkout->reg);
+clks_put:
+       for (i = 0; i < EXYNOS_CLKOUT_PARENTS; ++i)
+               if (!IS_ERR(parents[i]))
+                       clk_put(parents[i]);
+free_clkout:
+       kfree(clkout);
+
+       pr_err("%s: failed to register clkout clock\n", __func__);
+}
+
+static void __init exynos4_clkout_init(struct device_node *node)
+{
+       exynos_clkout_init(node, EXYNOS4_CLKOUT_MUX_MASK);
+}
+CLK_OF_DECLARE(exynos4210_clkout, "samsung,exynos4210-pmu",
+               exynos4_clkout_init);
+CLK_OF_DECLARE(exynos4212_clkout, "samsung,exynos4212-pmu",
+               exynos4_clkout_init);
+CLK_OF_DECLARE(exynos4412_clkout, "samsung,exynos4412-pmu",
+               exynos4_clkout_init);
+
+static void __init exynos5_clkout_init(struct device_node *node)
+{
+       exynos_clkout_init(node, EXYNOS5_CLKOUT_MUX_MASK);
+}
+CLK_OF_DECLARE(exynos5250_clkout, "samsung,exynos5250-pmu",
+               exynos5_clkout_init);
+CLK_OF_DECLARE(exynos5420_clkout, "samsung,exynos5420-pmu",
+               exynos5_clkout_init);
index 7a17bd4..dc85f8e 100644 (file)
 #define SRC_CPU                        0x14200
 #define DIV_CPU0               0x14500
 #define DIV_CPU1               0x14504
+#define PWR_CTRL1              0x15020
+#define PWR_CTRL2              0x15024
+
+/* Below definitions are used for PWR_CTRL settings */
+#define PWR_CTRL1_CORE2_DOWN_RATIO(x)          (((x) & 0x7) << 28)
+#define PWR_CTRL1_CORE1_DOWN_RATIO(x)          (((x) & 0x7) << 16)
+#define PWR_CTRL1_DIV2_DOWN_EN                 (1 << 9)
+#define PWR_CTRL1_DIV1_DOWN_EN                 (1 << 8)
+#define PWR_CTRL1_USE_CORE3_WFE                        (1 << 7)
+#define PWR_CTRL1_USE_CORE2_WFE                        (1 << 6)
+#define PWR_CTRL1_USE_CORE1_WFE                        (1 << 5)
+#define PWR_CTRL1_USE_CORE0_WFE                        (1 << 4)
+#define PWR_CTRL1_USE_CORE3_WFI                        (1 << 3)
+#define PWR_CTRL1_USE_CORE2_WFI                        (1 << 2)
+#define PWR_CTRL1_USE_CORE1_WFI                        (1 << 1)
+#define PWR_CTRL1_USE_CORE0_WFI                        (1 << 0)
 
 /* list of PLLs to be registered */
 enum exynos3250_plls {
@@ -168,6 +184,8 @@ static unsigned long exynos3250_cmu_clk_regs[] __initdata = {
        SRC_CPU,
        DIV_CPU0,
        DIV_CPU1,
+       PWR_CTRL1,
+       PWR_CTRL2,
 };
 
 static int exynos3250_clk_suspend(void)
@@ -748,6 +766,27 @@ static struct samsung_pll_clock exynos3250_plls[nr_plls] __initdata = {
                        UPLL_LOCK, UPLL_CON0, NULL),
 };
 
+static void __init exynos3_core_down_clock(void)
+{
+       unsigned int tmp;
+
+       /*
+        * Enable arm clock down (in idle) and set arm divider
+        * ratios in WFI/WFE state.
+        */
+       tmp = (PWR_CTRL1_CORE2_DOWN_RATIO(7) | PWR_CTRL1_CORE1_DOWN_RATIO(7) |
+               PWR_CTRL1_DIV2_DOWN_EN | PWR_CTRL1_DIV1_DOWN_EN |
+               PWR_CTRL1_USE_CORE1_WFE | PWR_CTRL1_USE_CORE0_WFE |
+               PWR_CTRL1_USE_CORE1_WFI | PWR_CTRL1_USE_CORE0_WFI);
+       __raw_writel(tmp, reg_base + PWR_CTRL1);
+
+       /*
+        * Disable the clock up feature on Exynos4x12, in case it was
+        * enabled by bootloader.
+        */
+       __raw_writel(0x0, reg_base + PWR_CTRL2);
+}
+
 static void __init exynos3250_cmu_init(struct device_node *np)
 {
        struct samsung_clk_provider *ctx;
@@ -775,6 +814,10 @@ static void __init exynos3250_cmu_init(struct device_node *np)
        samsung_clk_register_div(ctx, div_clks, ARRAY_SIZE(div_clks));
        samsung_clk_register_gate(ctx, gate_clks, ARRAY_SIZE(gate_clks));
 
+       exynos3_core_down_clock();
+
        exynos3250_clk_sleep_init();
+
+       samsung_clk_of_add_provider(np, ctx);
 }
 CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
index 4f150c9..ac163d7 100644 (file)
 #define DIV_LEFTBUS            0x4500
 #define GATE_IP_LEFTBUS                0x4800
 #define E4X12_GATE_IP_IMAGE    0x4930
+#define CLKOUT_CMU_LEFTBUS     0x4a00
 #define SRC_RIGHTBUS           0x8200
 #define DIV_RIGHTBUS           0x8500
 #define GATE_IP_RIGHTBUS       0x8800
 #define E4X12_GATE_IP_PERIR    0x8960
+#define CLKOUT_CMU_RIGHTBUS    0x8a00
 #define EPLL_LOCK              0xc010
 #define VPLL_LOCK              0xc020
 #define EPLL_CON0              0xc110
 #define GATE_IP_PERIL          0xc950
 #define E4210_GATE_IP_PERIR    0xc960
 #define GATE_BLOCK             0xc970
+#define CLKOUT_CMU_TOP         0xca00
 #define E4X12_MPLL_LOCK                0x10008
 #define E4X12_MPLL_CON0                0x10108
 #define SRC_DMC                        0x10200
 #define DIV_DMC0               0x10500
 #define DIV_DMC1               0x10504
 #define GATE_IP_DMC            0x10900
+#define CLKOUT_CMU_DMC         0x10a00
 #define APLL_LOCK              0x14000
 #define E4210_MPLL_LOCK                0x14008
 #define APLL_CON0              0x14100
 #define DIV_CPU1               0x14504
 #define GATE_SCLK_CPU          0x14800
 #define GATE_IP_CPU            0x14900
+#define CLKOUT_CMU_CPU         0x14a00
+#define PWR_CTRL1              0x15020
+#define E4X12_PWR_CTRL2                0x15024
 #define E4X12_DIV_ISP0         0x18300
 #define E4X12_DIV_ISP1         0x18304
 #define E4X12_GATE_ISP0                0x18800
 #define E4X12_GATE_ISP1                0x18804
 
+/* Below definitions are used for PWR_CTRL settings */
+#define PWR_CTRL1_CORE2_DOWN_RATIO(x)          (((x) & 0x7) << 28)
+#define PWR_CTRL1_CORE1_DOWN_RATIO(x)          (((x) & 0x7) << 16)
+#define PWR_CTRL1_DIV2_DOWN_EN                 (1 << 9)
+#define PWR_CTRL1_DIV1_DOWN_EN                 (1 << 8)
+#define PWR_CTRL1_USE_CORE3_WFE                        (1 << 7)
+#define PWR_CTRL1_USE_CORE2_WFE                        (1 << 6)
+#define PWR_CTRL1_USE_CORE1_WFE                        (1 << 5)
+#define PWR_CTRL1_USE_CORE0_WFE                        (1 << 4)
+#define PWR_CTRL1_USE_CORE3_WFI                        (1 << 3)
+#define PWR_CTRL1_USE_CORE2_WFI                        (1 << 2)
+#define PWR_CTRL1_USE_CORE1_WFI                        (1 << 1)
+#define PWR_CTRL1_USE_CORE0_WFI                        (1 << 0)
+
 /* the exynos4 soc type */
 enum exynos4_soc {
        EXYNOS4210,
@@ -155,6 +176,7 @@ static unsigned long exynos4210_clk_save[] __initdata = {
        E4210_GATE_IP_LCD1,
        E4210_GATE_IP_PERIR,
        E4210_MPLL_CON0,
+       PWR_CTRL1,
 };
 
 static unsigned long exynos4x12_clk_save[] __initdata = {
@@ -164,6 +186,8 @@ static unsigned long exynos4x12_clk_save[] __initdata = {
        E4X12_DIV_ISP,
        E4X12_DIV_CAM1,
        E4X12_MPLL_CON0,
+       PWR_CTRL1,
+       E4X12_PWR_CTRL2,
 };
 
 static unsigned long exynos4_clk_pll_regs[] __initdata = {
@@ -242,6 +266,11 @@ static unsigned long exynos4_clk_regs[] __initdata = {
        DIV_CPU1,
        GATE_SCLK_CPU,
        GATE_IP_CPU,
+       CLKOUT_CMU_LEFTBUS,
+       CLKOUT_CMU_RIGHTBUS,
+       CLKOUT_CMU_TOP,
+       CLKOUT_CMU_DMC,
+       CLKOUT_CMU_CPU,
 };
 
 static const struct samsung_clk_reg_dump src_mask_suspend[] = {
@@ -397,10 +426,32 @@ PNAME(mout_audio2_p4210) = { "cdclk2", "none", "sclk_hdmi24m",
                                "sclk_epll", "sclk_vpll", };
 PNAME(mout_mixer_p4210)        = { "sclk_dac", "sclk_hdmi", };
 PNAME(mout_dac_p4210)  = { "sclk_vpll", "sclk_hdmiphy", };
+PNAME(mout_pwi_p4210) = { "xxti", "xusbxti", "sclk_hdmi24m", "sclk_usbphy0",
+                               "sclk_usbphy1", "sclk_hdmiphy", "none",
+                               "sclk_epll", "sclk_vpll" };
+PNAME(clkout_left_p4210) = { "sclk_mpll_div_2", "sclk_apll_div_2",
+                               "div_gdl", "div_gpl" };
+PNAME(clkout_right_p4210) = { "sclk_mpll_div_2", "sclk_apll_div_2",
+                               "div_gdr", "div_gpr" };
+PNAME(clkout_top_p4210) = { "fout_epll", "fout_vpll", "sclk_hdmi24m",
+                               "sclk_usbphy0", "sclk_usbphy1", "sclk_hdmiphy",
+                               "cdclk0", "cdclk1", "cdclk2", "spdif_extclk",
+                               "aclk160", "aclk133", "aclk200", "aclk100",
+                               "sclk_mfc", "sclk_g3d", "sclk_g2d",
+                               "cam_a_pclk", "cam_b_pclk", "s_rxbyteclkhs0_2l",
+                               "s_rxbyteclkhs0_4l" };
+PNAME(clkout_dmc_p4210) = { "div_dmcd", "div_dmcp", "div_acp_pclk", "div_dmc",
+                               "div_dphy", "none", "div_pwi" };
+PNAME(clkout_cpu_p4210) = { "fout_apll_div_2", "none", "fout_mpll_div_2",
+                               "none", "arm_clk_div_2", "div_corem0",
+                               "div_corem1", "div_corem0", "div_atb",
+                               "div_periph", "div_pclk_dbg", "div_hpm" };
 
 /* Exynos 4x12-specific parent groups */
 PNAME(mout_mpll_user_p4x12) = { "fin_pll", "sclk_mpll", };
 PNAME(mout_core_p4x12) = { "mout_apll", "mout_mpll_user_c", };
+PNAME(mout_gdl_p4x12)  = { "mout_mpll_user_l", "sclk_apll", };
+PNAME(mout_gdr_p4x12)  = { "mout_mpll_user_r", "sclk_apll", };
 PNAME(sclk_ampll_p4x12)        = { "mout_mpll_user_t", "sclk_apll", };
 PNAME(group1_p4x12)    = { "xxti", "xusbxti", "sclk_hdmi24m", "sclk_usbphy0",
                                "none", "sclk_hdmiphy", "mout_mpll_user_t",
@@ -418,6 +469,32 @@ PNAME(aclk_p4412)  = { "mout_mpll_user_t", "sclk_apll", };
 PNAME(mout_user_aclk400_mcuisp_p4x12) = {"fin_pll", "div_aclk400_mcuisp", };
 PNAME(mout_user_aclk200_p4x12) = {"fin_pll", "div_aclk200", };
 PNAME(mout_user_aclk266_gps_p4x12) = {"fin_pll", "div_aclk266_gps", };
+PNAME(mout_pwi_p4x12) = { "xxti", "xusbxti", "sclk_hdmi24m", "sclk_usbphy0",
+                               "none", "sclk_hdmiphy", "sclk_mpll",
+                               "sclk_epll", "sclk_vpll" };
+PNAME(clkout_left_p4x12) = { "sclk_mpll_user_l_div_2", "sclk_apll_div_2",
+                               "div_gdl", "div_gpl" };
+PNAME(clkout_right_p4x12) = { "sclk_mpll_user_r_div_2", "sclk_apll_div_2",
+                               "div_gdr", "div_gpr" };
+PNAME(clkout_top_p4x12) = { "fout_epll", "fout_vpll", "sclk_hdmi24m",
+                               "sclk_usbphy0", "none", "sclk_hdmiphy",
+                               "cdclk0", "cdclk1", "cdclk2", "spdif_extclk",
+                               "aclk160", "aclk133", "aclk200", "aclk100",
+                               "sclk_mfc", "sclk_g3d", "aclk400_mcuisp",
+                               "cam_a_pclk", "cam_b_pclk", "s_rxbyteclkhs0_2l",
+                               "s_rxbyteclkhs0_4l", "rx_half_byte_clk_csis0",
+                               "rx_half_byte_clk_csis1", "div_jpeg",
+                               "sclk_pwm_isp", "sclk_spi0_isp",
+                               "sclk_spi1_isp", "sclk_uart_isp",
+                               "sclk_mipihsi", "sclk_hdmi", "sclk_fimd0",
+                               "sclk_pcm0" };
+PNAME(clkout_dmc_p4x12) = { "div_dmcd", "div_dmcp", "aclk_acp", "div_acp_pclk",
+                               "div_dmc", "div_dphy", "fout_mpll_div_2",
+                               "div_pwi", "none", "div_c2c", "div_c2c_aclk" };
+PNAME(clkout_cpu_p4x12) = { "fout_apll_div_2", "none", "none", "none",
+                               "arm_clk_div_2", "div_corem0", "div_corem1",
+                               "div_cores", "div_atb", "div_periph",
+                               "div_pclk_dbg", "div_hpm" };
 
 /* fixed rate clocks generated outside the soc */
 static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata = {
@@ -436,6 +513,24 @@ static struct samsung_fixed_rate_clock exynos4210_fixed_rate_clks[] __initdata =
        FRATE(0, "sclk_usbphy1", NULL, CLK_IS_ROOT, 48000000),
 };
 
+static struct samsung_fixed_factor_clock exynos4_fixed_factor_clks[] __initdata = {
+       FFACTOR(0, "sclk_apll_div_2", "sclk_apll", 1, 2, 0),
+       FFACTOR(0, "fout_mpll_div_2", "fout_mpll", 1, 2, 0),
+       FFACTOR(0, "fout_apll_div_2", "fout_apll", 1, 2, 0),
+       FFACTOR(0, "arm_clk_div_2", "arm_clk", 1, 2, 0),
+};
+
+static struct samsung_fixed_factor_clock exynos4210_fixed_factor_clks[] __initdata = {
+       FFACTOR(0, "sclk_mpll_div_2", "sclk_mpll", 1, 2, 0),
+};
+
+static struct samsung_fixed_factor_clock exynos4x12_fixed_factor_clks[] __initdata = {
+       FFACTOR(0, "sclk_mpll_user_l_div_2", "mout_mpll_user_l", 1, 2, 0),
+       FFACTOR(0, "sclk_mpll_user_r_div_2", "mout_mpll_user_r", 1, 2, 0),
+       FFACTOR(0, "sclk_mpll_user_t_div_2", "mout_mpll_user_t", 1, 2, 0),
+       FFACTOR(0, "sclk_mpll_user_c_div_2", "mout_mpll_user_c", 1, 2, 0),
+};
+
 /* list of mux clocks supported in all exynos4 soc's */
 static struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
        MUX_FA(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
@@ -451,6 +546,9 @@ static struct samsung_mux_clock exynos4_mux_clks[] __initdata = {
        MUX(0, "mout_onenand1", mout_onenand1_p, SRC_TOP0, 0, 1),
        MUX(CLK_SCLK_EPLL, "sclk_epll", mout_epll_p, SRC_TOP0, 4, 1),
        MUX(0, "mout_onenand", mout_onenand_p, SRC_TOP0, 28, 1),
+
+       MUX(0, "mout_dmc_bus", sclk_ampll_p4210, SRC_DMC, 4, 1),
+       MUX(0, "mout_dphy", sclk_ampll_p4210, SRC_DMC, 8, 1),
 };
 
 /* list of mux clocks supported in exynos4210 soc */
@@ -459,6 +557,14 @@ static struct samsung_mux_clock exynos4210_mux_early[] __initdata = {
 };
 
 static struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
+       MUX(0, "mout_gdl", sclk_ampll_p4210, SRC_LEFTBUS, 0, 1),
+       MUX(0, "mout_clkout_leftbus", clkout_left_p4210,
+                       CLKOUT_CMU_LEFTBUS, 0, 5),
+
+       MUX(0, "mout_gdr", sclk_ampll_p4210, SRC_RIGHTBUS, 0, 1),
+       MUX(0, "mout_clkout_rightbus", clkout_right_p4210,
+                       CLKOUT_CMU_RIGHTBUS, 0, 5),
+
        MUX(0, "mout_aclk200", sclk_ampll_p4210, SRC_TOP0, 12, 1),
        MUX(0, "mout_aclk100", sclk_ampll_p4210, SRC_TOP0, 16, 1),
        MUX(0, "mout_aclk160", sclk_ampll_p4210, SRC_TOP0, 20, 1),
@@ -472,6 +578,7 @@ static struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
        MUX(0, "mout_mipi1", group1_p4210, E4210_SRC_LCD1, 12, 4),
        MUX(CLK_SCLK_MPLL, "sclk_mpll", mout_mpll_p, SRC_CPU, 8, 1),
        MUX(CLK_MOUT_CORE, "mout_core", mout_core_p4210, SRC_CPU, 16, 1),
+       MUX(0, "mout_hpm", mout_core_p4210, SRC_CPU, 20, 1),
        MUX(CLK_SCLK_VPLL, "sclk_vpll", sclk_vpll_p4210, SRC_TOP0, 8, 1),
        MUX(CLK_MOUT_FIMC0, "mout_fimc0", group1_p4210, SRC_CAM, 0, 4),
        MUX(CLK_MOUT_FIMC1, "mout_fimc1", group1_p4210, SRC_CAM, 4, 4),
@@ -503,12 +610,30 @@ static struct samsung_mux_clock exynos4210_mux_clks[] __initdata = {
        MUX(0, "mout_spi0", group1_p4210, SRC_PERIL1, 16, 4),
        MUX(0, "mout_spi1", group1_p4210, SRC_PERIL1, 20, 4),
        MUX(0, "mout_spi2", group1_p4210, SRC_PERIL1, 24, 4),
+       MUX(0, "mout_clkout_top", clkout_top_p4210, CLKOUT_CMU_TOP, 0, 5),
+
+       MUX(0, "mout_pwi", mout_pwi_p4210, SRC_DMC, 16, 4),
+       MUX(0, "mout_clkout_dmc", clkout_dmc_p4210, CLKOUT_CMU_DMC, 0, 5),
+
+       MUX(0, "mout_clkout_cpu", clkout_cpu_p4210, CLKOUT_CMU_CPU, 0, 5),
 };
 
 /* list of mux clocks supported in exynos4x12 soc */
 static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
+       MUX(0, "mout_mpll_user_l", mout_mpll_p, SRC_LEFTBUS, 4, 1),
+       MUX(0, "mout_gdl", mout_gdl_p4x12, SRC_LEFTBUS, 0, 1),
+       MUX(0, "mout_clkout_leftbus", clkout_left_p4x12,
+                       CLKOUT_CMU_LEFTBUS, 0, 5),
+
+       MUX(0, "mout_mpll_user_r", mout_mpll_p, SRC_RIGHTBUS, 4, 1),
+       MUX(0, "mout_gdr", mout_gdr_p4x12, SRC_RIGHTBUS, 0, 1),
+       MUX(0, "mout_clkout_rightbus", clkout_right_p4x12,
+                       CLKOUT_CMU_RIGHTBUS, 0, 5),
+
        MUX(CLK_MOUT_MPLL_USER_C, "mout_mpll_user_c", mout_mpll_user_p4x12,
                        SRC_CPU, 24, 1),
+       MUX(0, "mout_clkout_cpu", clkout_cpu_p4x12, CLKOUT_CMU_CPU, 0, 5),
+
        MUX(0, "mout_aclk266_gps", aclk_p4412, SRC_TOP1, 4, 1),
        MUX(0, "mout_aclk400_mcuisp", aclk_p4412, SRC_TOP1, 8, 1),
        MUX(CLK_MOUT_MPLL_USER_T, "mout_mpll_user_t", mout_mpll_user_p4x12,
@@ -531,6 +656,7 @@ static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
        MUX(CLK_SCLK_MPLL, "sclk_mpll", mout_mpll_p, SRC_DMC, 12, 1),
        MUX(CLK_SCLK_VPLL, "sclk_vpll", mout_vpll_p, SRC_TOP0, 8, 1),
        MUX(CLK_MOUT_CORE, "mout_core", mout_core_p4x12, SRC_CPU, 16, 1),
+       MUX(0, "mout_hpm", mout_core_p4x12, SRC_CPU, 20, 1),
        MUX(CLK_MOUT_FIMC0, "mout_fimc0", group1_p4x12, SRC_CAM, 0, 4),
        MUX(CLK_MOUT_FIMC1, "mout_fimc1", group1_p4x12, SRC_CAM, 4, 4),
        MUX(CLK_MOUT_FIMC2, "mout_fimc2", group1_p4x12, SRC_CAM, 8, 4),
@@ -565,15 +691,39 @@ static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
        MUX(0, "mout_spi0_isp", group1_p4x12, E4X12_SRC_ISP, 4, 4),
        MUX(0, "mout_spi1_isp", group1_p4x12, E4X12_SRC_ISP, 8, 4),
        MUX(0, "mout_uart_isp", group1_p4x12, E4X12_SRC_ISP, 12, 4),
+       MUX(0, "mout_clkout_top", clkout_top_p4x12, CLKOUT_CMU_TOP, 0, 5),
+
+       MUX(0, "mout_c2c", sclk_ampll_p4210, SRC_DMC, 0, 1),
+       MUX(0, "mout_pwi", mout_pwi_p4x12, SRC_DMC, 16, 4),
        MUX(0, "mout_g2d0", sclk_ampll_p4210, SRC_DMC, 20, 1),
        MUX(0, "mout_g2d1", sclk_evpll_p, SRC_DMC, 24, 1),
        MUX(0, "mout_g2d", mout_g2d_p, SRC_DMC, 28, 1),
+       MUX(0, "mout_clkout_dmc", clkout_dmc_p4x12, CLKOUT_CMU_DMC, 0, 5),
 };
 
 /* list of divider clocks supported in all exynos4 soc's */
 static struct samsung_div_clock exynos4_div_clks[] __initdata = {
+       DIV(0, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3),
+       DIV(0, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3),
+       DIV(0, "div_clkout_leftbus", "mout_clkout_leftbus",
+                       CLKOUT_CMU_LEFTBUS, 8, 6),
+
+       DIV(0, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3),
+       DIV(0, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3),
+       DIV(0, "div_clkout_rightbus", "mout_clkout_rightbus",
+                       CLKOUT_CMU_RIGHTBUS, 8, 6),
+
        DIV(0, "div_core", "mout_core", DIV_CPU0, 0, 3),
+       DIV(0, "div_corem0", "div_core2", DIV_CPU0, 4, 3),
+       DIV(0, "div_corem1", "div_core2", DIV_CPU0, 8, 3),
+       DIV(0, "div_periph", "div_core2", DIV_CPU0, 12, 3),
+       DIV(0, "div_atb", "mout_core", DIV_CPU0, 16, 3),
+       DIV(0, "div_pclk_dbg", "div_atb", DIV_CPU0, 20, 3),
        DIV(0, "div_core2", "div_core", DIV_CPU0, 28, 3),
+       DIV(0, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
+       DIV(0, "div_hpm", "div_copy", DIV_CPU1, 4, 3),
+       DIV(0, "div_clkout_cpu", "mout_clkout_cpu", CLKOUT_CMU_CPU, 8, 6),
+
        DIV(0, "div_fimc0", "mout_fimc0", DIV_CAM, 0, 4),
        DIV(0, "div_fimc1", "mout_fimc1", DIV_CAM, 4, 4),
        DIV(0, "div_fimc2", "mout_fimc2", DIV_CAM, 8, 4),
@@ -631,6 +781,16 @@ static struct samsung_div_clock exynos4_div_clks[] __initdata = {
                        CLK_SET_RATE_PARENT, 0),
        DIV_F(0, "div_mmc_pre3", "div_mmc3", DIV_FSYS2, 24, 8,
                        CLK_SET_RATE_PARENT, 0),
+       DIV(0, "div_clkout_top", "mout_clkout_top", CLKOUT_CMU_TOP, 8, 6),
+
+       DIV(0, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3),
+       DIV(0, "div_acp_pclk", "div_acp", DIV_DMC0, 4, 3),
+       DIV(0, "div_dphy", "mout_dphy", DIV_DMC0, 8, 3),
+       DIV(0, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3),
+       DIV(0, "div_dmcd", "div_dmc", DIV_DMC0, 16, 3),
+       DIV(0, "div_dmcp", "div_dmcd", DIV_DMC0, 20, 3),
+       DIV(0, "div_pwi", "mout_pwi", DIV_DMC1, 8, 4),
+       DIV(0, "div_clkout_dmc", "mout_clkout_dmc", CLKOUT_CMU_DMC, 8, 6),
 };
 
 /* list of divider clocks supported in exynos4210 soc */
@@ -671,6 +831,8 @@ static struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
        DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
                                                8, 3, CLK_GET_RATE_NOCACHE, 0),
        DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
+       DIV(0, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
+       DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
 };
 
 /* list of gate clocks supported in all exynos4 soc's */
@@ -680,6 +842,8 @@ static struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
         * the device name and clock alias names specified below for some
         * of the clocks can be removed.
         */
+       GATE(CLK_PPMULEFT, "ppmuleft", "aclk200", GATE_IP_LEFTBUS, 1, 0, 0),
+       GATE(CLK_PPMURIGHT, "ppmuright", "aclk200", GATE_IP_RIGHTBUS, 1, 0, 0),
        GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi", SRC_MASK_TV, 0, 0, 0),
        GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif", SRC_MASK_PERIL1, 8, 0,
                0),
@@ -695,11 +859,13 @@ static struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
        GATE(CLK_SROMC, "sromc", "aclk133", GATE_IP_FSYS, 11, 0, 0),
        GATE(CLK_SCLK_G3D, "sclk_g3d", "div_g3d", GATE_IP_G3D, 0,
                        CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_PPMUG3D, "ppmug3d", "aclk200", GATE_IP_G3D, 1, 0, 0),
        GATE(CLK_USB_DEVICE, "usb_device", "aclk133", GATE_IP_FSYS, 13, 0, 0),
        GATE(CLK_ONENAND, "onenand", "aclk133", GATE_IP_FSYS, 15, 0, 0),
        GATE(CLK_NFCON, "nfcon", "aclk133", GATE_IP_FSYS, 16, 0, 0),
        GATE(CLK_GPS, "gps", "aclk133", GATE_IP_GPS, 0, 0, 0),
        GATE(CLK_SMMU_GPS, "smmu_gps", "aclk133", GATE_IP_GPS, 1, 0, 0),
+       GATE(CLK_PPMUGPS, "ppmugps", "aclk200", GATE_IP_GPS, 2, 0, 0),
        GATE(CLK_SLIMBUS, "slimbus", "aclk100", GATE_IP_PERIL, 25, 0, 0),
        GATE(CLK_SCLK_CAM0, "sclk_cam0", "div_cam0", GATE_SCLK_CAM, 4,
                        CLK_SET_RATE_PARENT, 0),
@@ -781,19 +947,24 @@ static struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
                        0, 0),
        GATE(CLK_SMMU_JPEG, "smmu_jpeg", "aclk160", GATE_IP_CAM, 11,
                        0, 0),
+       GATE(CLK_PPMUCAMIF, "ppmucamif", "aclk160", GATE_IP_CAM, 16, 0, 0),
        GATE(CLK_PIXELASYNCM0, "pxl_async0", "aclk160", GATE_IP_CAM, 17, 0, 0),
        GATE(CLK_PIXELASYNCM1, "pxl_async1", "aclk160", GATE_IP_CAM, 18, 0, 0),
        GATE(CLK_SMMU_TV, "smmu_tv", "aclk160", GATE_IP_TV, 4,
                        0, 0),
+       GATE(CLK_PPMUTV, "ppmutv", "aclk160", GATE_IP_TV, 5, 0, 0),
        GATE(CLK_MFC, "mfc", "aclk100", GATE_IP_MFC, 0, 0, 0),
        GATE(CLK_SMMU_MFCL, "smmu_mfcl", "aclk100", GATE_IP_MFC, 1,
                        0, 0),
        GATE(CLK_SMMU_MFCR, "smmu_mfcr", "aclk100", GATE_IP_MFC, 2,
                        0, 0),
+       GATE(CLK_PPMUMFC_L, "ppmumfc_l", "aclk100", GATE_IP_MFC, 3, 0, 0),
+       GATE(CLK_PPMUMFC_R, "ppmumfc_r", "aclk100", GATE_IP_MFC, 4, 0, 0),
        GATE(CLK_FIMD0, "fimd0", "aclk160", GATE_IP_LCD0, 0,
                        0, 0),
        GATE(CLK_SMMU_FIMD0, "smmu_fimd0", "aclk160", GATE_IP_LCD0, 4,
                        0, 0),
+       GATE(CLK_PPMULCD0, "ppmulcd0", "aclk160", GATE_IP_LCD0, 5, 0, 0),
        GATE(CLK_PDMA0, "pdma0", "aclk133", GATE_IP_FSYS, 0,
                        0, 0),
        GATE(CLK_PDMA1, "pdma1", "aclk133", GATE_IP_FSYS, 1,
@@ -806,6 +977,7 @@ static struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
                        0, 0),
        GATE(CLK_SDMMC3, "sdmmc3", "aclk133", GATE_IP_FSYS, 8,
                        0, 0),
+       GATE(CLK_PPMUFILE, "ppmufile", "aclk133", GATE_IP_FSYS, 17, 0, 0),
        GATE(CLK_UART0, "uart0", "aclk100", GATE_IP_PERIL, 0,
                        0, 0),
        GATE(CLK_UART1, "uart1", "aclk100", GATE_IP_PERIL, 1,
@@ -852,6 +1024,21 @@ static struct samsung_gate_clock exynos4_gate_clks[] __initdata = {
                        0, 0),
        GATE(CLK_AC97, "ac97", "aclk100", GATE_IP_PERIL, 27,
                        0, 0),
+       GATE(CLK_PPMUDMC0, "ppmudmc0", "aclk133", GATE_IP_DMC, 8, 0, 0),
+       GATE(CLK_PPMUDMC1, "ppmudmc1", "aclk133", GATE_IP_DMC, 9, 0, 0),
+       GATE(CLK_PPMUCPU, "ppmucpu", "aclk133", GATE_IP_DMC, 10, 0, 0),
+       GATE(CLK_PPMUACP, "ppmuacp", "aclk133", GATE_IP_DMC, 16, 0, 0),
+
+       GATE(CLK_OUT_LEFTBUS, "clkout_leftbus", "div_clkout_leftbus",
+                       CLKOUT_CMU_LEFTBUS, 16, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_OUT_RIGHTBUS, "clkout_rightbus", "div_clkout_rightbus",
+                       CLKOUT_CMU_RIGHTBUS, 16, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_OUT_TOP, "clkout_top", "div_clkout_top",
+                       CLKOUT_CMU_TOP, 16, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_OUT_DMC, "clkout_dmc", "div_clkout_dmc",
+                       CLKOUT_CMU_DMC, 16, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_OUT_CPU, "clkout_cpu", "div_clkout_cpu",
+                       CLKOUT_CMU_CPU, 16, CLK_SET_RATE_PARENT, 0),
 };
 
 /* list of gate clocks supported in exynos4210 soc */
@@ -863,6 +1050,9 @@ static struct samsung_gate_clock exynos4210_gate_clks[] __initdata = {
        GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk200", E4210_GATE_IP_IMAGE, 3, 0, 0),
        GATE(CLK_SMMU_MDMA, "smmu_mdma", "aclk200", E4210_GATE_IP_IMAGE, 5, 0,
                0),
+       GATE(CLK_PPMUIMAGE, "ppmuimage", "aclk200", E4210_GATE_IP_IMAGE, 9, 0,
+               0),
+       GATE(CLK_PPMULCD1, "ppmulcd1", "aclk160", E4210_GATE_IP_LCD1, 5, 0, 0),
        GATE(CLK_PCIE_PHY, "pcie_phy", "aclk133", GATE_IP_FSYS, 2, 0, 0),
        GATE(CLK_SATA_PHY, "sata_phy", "aclk133", GATE_IP_FSYS, 3, 0, 0),
        GATE(CLK_SATA, "sata", "aclk133", GATE_IP_FSYS, 10, 0, 0),
@@ -906,6 +1096,8 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
        GATE(CLK_MDMA, "mdma", "aclk200", E4X12_GATE_IP_IMAGE, 2, 0, 0),
        GATE(CLK_SMMU_MDMA, "smmu_mdma", "aclk200", E4X12_GATE_IP_IMAGE, 5, 0,
                0),
+       GATE(CLK_PPMUIMAGE, "ppmuimage", "aclk200", E4X12_GATE_IP_IMAGE, 9, 0,
+               0),
        GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0),
        GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0),
        GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1,
@@ -925,21 +1117,13 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
        GATE(CLK_RTC, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15,
                        0, 0),
        GATE(CLK_KEYIF, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0),
-       GATE(CLK_SCLK_PWM_ISP, "sclk_pwm_isp", "div_pwm_isp",
-                       E4X12_SRC_MASK_ISP, 0, CLK_SET_RATE_PARENT, 0),
-       GATE(CLK_SCLK_SPI0_ISP, "sclk_spi0_isp", "div_spi0_isp_pre",
-                       E4X12_SRC_MASK_ISP, 4, CLK_SET_RATE_PARENT, 0),
-       GATE(CLK_SCLK_SPI1_ISP, "sclk_spi1_isp", "div_spi1_isp_pre",
-                       E4X12_SRC_MASK_ISP, 8, CLK_SET_RATE_PARENT, 0),
-       GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "div_uart_isp",
-                       E4X12_SRC_MASK_ISP, 12, CLK_SET_RATE_PARENT, 0),
-       GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "sclk_pwm_isp",
+       GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "div_pwm_isp",
                        E4X12_GATE_IP_ISP, 0, 0, 0),
-       GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "sclk_spi0_isp",
+       GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "div_spi0_isp_pre",
                        E4X12_GATE_IP_ISP, 1, 0, 0),
-       GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "sclk_spi1_isp",
+       GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "div_spi1_isp_pre",
                        E4X12_GATE_IP_ISP, 2, 0, 0),
-       GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "sclk_uart_isp",
+       GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "div_uart_isp",
                        E4X12_GATE_IP_ISP, 3, 0, 0),
        GATE(CLK_WDT, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0),
        GATE(CLK_PCM0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2,
@@ -1070,7 +1254,7 @@ static void __init exynos4_clk_register_finpll(struct samsung_clk_provider *ctx)
 
 }
 
-static struct of_device_id ext_clk_match[] __initdata = {
+static const struct of_device_id ext_clk_match[] __initconst = {
        { .compatible = "samsung,clock-xxti", .data = (void *)0, },
        { .compatible = "samsung,clock-xusbxti", .data = (void *)1, },
        {},
@@ -1172,6 +1356,32 @@ static struct samsung_pll_clock exynos4x12_plls[nr_plls] __initdata = {
                        VPLL_LOCK, VPLL_CON0, NULL),
 };
 
+static void __init exynos4_core_down_clock(enum exynos4_soc soc)
+{
+       unsigned int tmp;
+
+       /*
+        * Enable arm clock down (in idle) and set arm divider
+        * ratios in WFI/WFE state.
+        */
+       tmp = (PWR_CTRL1_CORE2_DOWN_RATIO(7) | PWR_CTRL1_CORE1_DOWN_RATIO(7) |
+               PWR_CTRL1_DIV2_DOWN_EN | PWR_CTRL1_DIV1_DOWN_EN |
+               PWR_CTRL1_USE_CORE1_WFE | PWR_CTRL1_USE_CORE0_WFE |
+               PWR_CTRL1_USE_CORE1_WFI | PWR_CTRL1_USE_CORE0_WFI);
+       /* On Exynos4412 enable it also on core 2 and 3 */
+       if (num_possible_cpus() == 4)
+               tmp |= PWR_CTRL1_USE_CORE3_WFE | PWR_CTRL1_USE_CORE2_WFE |
+                      PWR_CTRL1_USE_CORE3_WFI | PWR_CTRL1_USE_CORE2_WFI;
+       __raw_writel(tmp, reg_base + PWR_CTRL1);
+
+       /*
+        * Disable the clock up feature on Exynos4x12, in case it was
+        * enabled by bootloader.
+        */
+       if (exynos4_soc == EXYNOS4X12)
+               __raw_writel(0x0, reg_base + E4X12_PWR_CTRL2);
+}
+
 /* register exynos4 clocks */
 static void __init exynos4_clk_init(struct device_node *np,
                                    enum exynos4_soc soc)
@@ -1232,6 +1442,8 @@ static void __init exynos4_clk_init(struct device_node *np,
                        ARRAY_SIZE(exynos4_div_clks));
        samsung_clk_register_gate(ctx, exynos4_gate_clks,
                        ARRAY_SIZE(exynos4_gate_clks));
+       samsung_clk_register_fixed_factor(ctx, exynos4_fixed_factor_clks,
+                       ARRAY_SIZE(exynos4_fixed_factor_clks));
 
        if (exynos4_soc == EXYNOS4210) {
                samsung_clk_register_fixed_rate(ctx, exynos4210_fixed_rate_clks,
@@ -1244,6 +1456,9 @@ static void __init exynos4_clk_init(struct device_node *np,
                        ARRAY_SIZE(exynos4210_gate_clks));
                samsung_clk_register_alias(ctx, exynos4210_aliases,
                        ARRAY_SIZE(exynos4210_aliases));
+               samsung_clk_register_fixed_factor(ctx,
+                       exynos4210_fixed_factor_clks,
+                       ARRAY_SIZE(exynos4210_fixed_factor_clks));
        } else {
                samsung_clk_register_mux(ctx, exynos4x12_mux_clks,
                        ARRAY_SIZE(exynos4x12_mux_clks));
@@ -1253,13 +1468,19 @@ static void __init exynos4_clk_init(struct device_node *np,
                        ARRAY_SIZE(exynos4x12_gate_clks));
                samsung_clk_register_alias(ctx, exynos4x12_aliases,
                        ARRAY_SIZE(exynos4x12_aliases));
+               samsung_clk_register_fixed_factor(ctx,
+                       exynos4x12_fixed_factor_clks,
+                       ARRAY_SIZE(exynos4x12_fixed_factor_clks));
        }
 
        samsung_clk_register_alias(ctx, exynos4_aliases,
                        ARRAY_SIZE(exynos4_aliases));
 
+       exynos4_core_down_clock(soc);
        exynos4_clk_sleep_init();
 
+       samsung_clk_of_add_provider(np, ctx);
+
        pr_info("%s clocks: sclk_apll = %ld, sclk_mpll = %ld\n"
                "\tsclk_epll = %ld, sclk_vpll = %ld, arm_clk = %ld\n",
                exynos4_soc == EXYNOS4210 ? "Exynos4210" : "Exynos4x12",
index 1fad4c5..70ec3d2 100644 (file)
@@ -661,7 +661,7 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
        GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0),
        GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0),
        GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub",
-                       GATE_IP_DISP1, 2, 0, 0),
+                       GATE_IP_DISP1, 9, 0, 0),
        GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub",
                        GATE_IP_DISP1, 8, 0, 0),
        GATE(CLK_SMMU_2D, "smmu_2d", "div_aclk200", GATE_IP_ACP, 7, 0, 0),
@@ -748,7 +748,7 @@ static struct samsung_pll_clock exynos5250_plls[nr_plls] __initdata = {
                VPLL_LOCK, VPLL_CON0, NULL),
 };
 
-static struct of_device_id ext_clk_match[] __initdata = {
+static const struct of_device_id ext_clk_match[] __initconst = {
        { .compatible = "samsung,clock-xxti", .data = (void *)0, },
        { },
 };
@@ -820,6 +820,8 @@ static void __init exynos5250_clk_init(struct device_node *np)
 
        exynos5250_clk_sleep_init();
 
+       samsung_clk_of_add_provider(np, ctx);
+
        pr_info("Exynos5250: clock setup completed, armclk=%ld\n",
                        _get_rate("div_arm2"));
 }
index 64596ba..ce3de97 100644 (file)
@@ -206,6 +206,8 @@ void __init exynos5260_cmu_register_one(struct device_node *np,
        if (cmu->clk_regs)
                exynos5260_clk_sleep_init(reg_base, cmu->clk_regs,
                        cmu->nr_clk_regs);
+
+       samsung_clk_of_add_provider(np, ctx);
 }
 
 
index c9505ab..231475b 100644 (file)
@@ -204,6 +204,8 @@ static void __init exynos5410_clk_init(struct device_node *np)
        samsung_clk_register_gate(ctx, exynos5410_gate_clks,
                        ARRAY_SIZE(exynos5410_gate_clks));
 
+       samsung_clk_of_add_provider(np, ctx);
+
        pr_debug("Exynos5410: clock setup completed.\n");
 }
 CLK_OF_DECLARE(exynos5410_clk, "samsung,exynos5410-clock", exynos5410_clk_init);
index 9d7d7ee..848d602 100644 (file)
@@ -28,6 +28,7 @@
 #define GATE_BUS_CPU           0x700
 #define GATE_SCLK_CPU          0x800
 #define CLKOUT_CMU_CPU         0xa00
+#define SRC_MASK_CPERI         0x4300
 #define GATE_IP_G2D            0x8800
 #define CPLL_LOCK              0x10020
 #define DPLL_LOCK              0x10030
@@ -70,6 +71,8 @@
 #define SRC_TOP11              0x10284
 #define SRC_TOP12              0x10288
 #define SRC_TOP13              0x1028c /* 5800 specific */
+#define SRC_MASK_TOP0          0x10300
+#define SRC_MASK_TOP1          0x10304
 #define SRC_MASK_TOP2          0x10308
 #define SRC_MASK_TOP7          0x1031c
 #define SRC_MASK_DISP10                0x1032c
@@ -77,6 +80,7 @@
 #define SRC_MASK_FSYS          0x10340
 #define SRC_MASK_PERIC0                0x10350
 #define SRC_MASK_PERIC1                0x10354
+#define SRC_MASK_ISP           0x10370
 #define DIV_TOP0               0x10500
 #define DIV_TOP1               0x10504
 #define DIV_TOP2               0x10508
 #define DIV2_RATIO0            0x10590
 #define DIV4_RATIO             0x105a0
 #define GATE_BUS_TOP           0x10700
+#define GATE_BUS_DISP1         0x10728
 #define GATE_BUS_GEN           0x1073c
 #define GATE_BUS_FSYS0         0x10740
 #define GATE_BUS_FSYS2         0x10748
@@ -190,6 +195,10 @@ static unsigned long exynos5x_clk_regs[] __initdata = {
        SRC_MASK_FSYS,
        SRC_MASK_PERIC0,
        SRC_MASK_PERIC1,
+       SRC_MASK_TOP0,
+       SRC_MASK_TOP1,
+       SRC_MASK_MAU,
+       SRC_MASK_ISP,
        SRC_ISP,
        DIV_TOP0,
        DIV_TOP1,
@@ -208,6 +217,7 @@ static unsigned long exynos5x_clk_regs[] __initdata = {
        SCLK_DIV_ISP1,
        DIV2_RATIO0,
        DIV4_RATIO,
+       GATE_BUS_DISP1,
        GATE_BUS_TOP,
        GATE_BUS_GEN,
        GATE_BUS_FSYS0,
@@ -249,6 +259,22 @@ static unsigned long exynos5800_clk_regs[] __initdata = {
        GATE_IP_CAM,
 };
 
+static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
+       { .offset = SRC_MASK_CPERI,             .value = 0xffffffff, },
+       { .offset = SRC_MASK_TOP0,              .value = 0x11111111, },
+       { .offset = SRC_MASK_TOP1,              .value = 0x11101111, },
+       { .offset = SRC_MASK_TOP2,              .value = 0x11111110, },
+       { .offset = SRC_MASK_TOP7,              .value = 0x00111100, },
+       { .offset = SRC_MASK_DISP10,            .value = 0x11111110, },
+       { .offset = SRC_MASK_MAU,               .value = 0x10000000, },
+       { .offset = SRC_MASK_FSYS,              .value = 0x11111110, },
+       { .offset = SRC_MASK_PERIC0,            .value = 0x11111110, },
+       { .offset = SRC_MASK_PERIC1,            .value = 0x11111100, },
+       { .offset = SRC_MASK_ISP,               .value = 0x11111000, },
+       { .offset = GATE_BUS_DISP1,             .value = 0xffffffff, },
+       { .offset = GATE_IP_PERIC,              .value = 0xffffffff, },
+};
+
 static int exynos5420_clk_suspend(void)
 {
        samsung_clk_save(reg_base, exynos5x_save,
@@ -258,6 +284,9 @@ static int exynos5420_clk_suspend(void)
                samsung_clk_save(reg_base, exynos5800_save,
                                ARRAY_SIZE(exynos5800_clk_regs));
 
+       samsung_clk_restore(reg_base, exynos5420_set_clksrc,
+                               ARRAY_SIZE(exynos5420_set_clksrc));
+
        return 0;
 }
 
@@ -631,7 +660,8 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
                        SRC_TOP4, 16, 1),
        MUX(0, "mout_user_aclk266", mout_user_aclk266_p, SRC_TOP4, 20, 1),
        MUX(0, "mout_user_aclk166", mout_user_aclk166_p, SRC_TOP4, 24, 1),
-       MUX(0, "mout_user_aclk333", mout_user_aclk333_p, SRC_TOP4, 28, 1),
+       MUX(CLK_MOUT_USER_ACLK333, "mout_user_aclk333", mout_user_aclk333_p,
+                       SRC_TOP4, 28, 1),
 
        MUX(0, "mout_user_aclk400_disp1", mout_user_aclk400_disp1_p,
                        SRC_TOP5, 0, 1),
@@ -684,7 +714,8 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
                        SRC_TOP11, 12, 1),
        MUX(0, "mout_sw_aclk266", mout_sw_aclk266_p, SRC_TOP11, 20, 1),
        MUX(0, "mout_sw_aclk166", mout_sw_aclk166_p, SRC_TOP11, 24, 1),
-       MUX(0, "mout_sw_aclk333", mout_sw_aclk333_p, SRC_TOP11, 28, 1),
+       MUX(CLK_MOUT_SW_ACLK333, "mout_sw_aclk333", mout_sw_aclk333_p,
+                       SRC_TOP11, 28, 1),
 
        MUX(0, "mout_sw_aclk400_disp1", mout_sw_aclk400_disp1_p,
                        SRC_TOP12, 4, 1),
@@ -890,8 +921,6 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
                        GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0),
        GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen",
                        GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0),
-       GATE(CLK_ACLK66_PERIC, "aclk66_peric", "mout_user_aclk66_peric",
-                       GATE_BUS_TOP, 11, CLK_IGNORE_UNUSED, 0),
        GATE(0, "aclk266_isp", "mout_user_aclk266_isp",
                        GATE_BUS_TOP, 13, 0, 0),
        GATE(0, "aclk166", "mout_user_aclk166",
@@ -994,34 +1023,61 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
                        SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
 
        /* PERIC Block */
-       GATE(CLK_UART0, "uart0", "aclk66_peric", GATE_IP_PERIC, 0, 0, 0),
-       GATE(CLK_UART1, "uart1", "aclk66_peric", GATE_IP_PERIC, 1, 0, 0),
-       GATE(CLK_UART2, "uart2", "aclk66_peric", GATE_IP_PERIC, 2, 0, 0),
-       GATE(CLK_UART3, "uart3", "aclk66_peric", GATE_IP_PERIC, 3, 0, 0),
-       GATE(CLK_I2C0, "i2c0", "aclk66_peric", GATE_IP_PERIC, 6, 0, 0),
-       GATE(CLK_I2C1, "i2c1", "aclk66_peric", GATE_IP_PERIC, 7, 0, 0),
-       GATE(CLK_I2C2, "i2c2", "aclk66_peric", GATE_IP_PERIC, 8, 0, 0),
-       GATE(CLK_I2C3, "i2c3", "aclk66_peric", GATE_IP_PERIC, 9, 0, 0),
-       GATE(CLK_USI0, "usi0", "aclk66_peric", GATE_IP_PERIC, 10, 0, 0),
-       GATE(CLK_USI1, "usi1", "aclk66_peric", GATE_IP_PERIC, 11, 0, 0),
-       GATE(CLK_USI2, "usi2", "aclk66_peric", GATE_IP_PERIC, 12, 0, 0),
-       GATE(CLK_USI3, "usi3", "aclk66_peric", GATE_IP_PERIC, 13, 0, 0),
-       GATE(CLK_I2C_HDMI, "i2c_hdmi", "aclk66_peric", GATE_IP_PERIC, 14, 0, 0),
-       GATE(CLK_TSADC, "tsadc", "aclk66_peric", GATE_IP_PERIC, 15, 0, 0),
-       GATE(CLK_SPI0, "spi0", "aclk66_peric", GATE_IP_PERIC, 16, 0, 0),
-       GATE(CLK_SPI1, "spi1", "aclk66_peric", GATE_IP_PERIC, 17, 0, 0),
-       GATE(CLK_SPI2, "spi2", "aclk66_peric", GATE_IP_PERIC, 18, 0, 0),
-       GATE(CLK_I2S1, "i2s1", "aclk66_peric", GATE_IP_PERIC, 20, 0, 0),
-       GATE(CLK_I2S2, "i2s2", "aclk66_peric", GATE_IP_PERIC, 21, 0, 0),
-       GATE(CLK_PCM1, "pcm1", "aclk66_peric", GATE_IP_PERIC, 22, 0, 0),
-       GATE(CLK_PCM2, "pcm2", "aclk66_peric", GATE_IP_PERIC, 23, 0, 0),
-       GATE(CLK_PWM, "pwm", "aclk66_peric", GATE_IP_PERIC, 24, 0, 0),
-       GATE(CLK_SPDIF, "spdif", "aclk66_peric", GATE_IP_PERIC, 26, 0, 0),
-       GATE(CLK_USI4, "usi4", "aclk66_peric", GATE_IP_PERIC, 28, 0, 0),
-       GATE(CLK_USI5, "usi5", "aclk66_peric", GATE_IP_PERIC, 30, 0, 0),
-       GATE(CLK_USI6, "usi6", "aclk66_peric", GATE_IP_PERIC, 31, 0, 0),
-
-       GATE(CLK_KEYIF, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0),
+       GATE(CLK_UART0, "uart0", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 0, 0, 0),
+       GATE(CLK_UART1, "uart1", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 1, 0, 0),
+       GATE(CLK_UART2, "uart2", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 2, 0, 0),
+       GATE(CLK_UART3, "uart3", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 3, 0, 0),
+       GATE(CLK_I2C0, "i2c0", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 6, 0, 0),
+       GATE(CLK_I2C1, "i2c1", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 7, 0, 0),
+       GATE(CLK_I2C2, "i2c2", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 8, 0, 0),
+       GATE(CLK_I2C3, "i2c3", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 9, 0, 0),
+       GATE(CLK_USI0, "usi0", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 10, 0, 0),
+       GATE(CLK_USI1, "usi1", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 11, 0, 0),
+       GATE(CLK_USI2, "usi2", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 12, 0, 0),
+       GATE(CLK_USI3, "usi3", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 13, 0, 0),
+       GATE(CLK_I2C_HDMI, "i2c_hdmi", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 14, 0, 0),
+       GATE(CLK_TSADC, "tsadc", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 15, 0, 0),
+       GATE(CLK_SPI0, "spi0", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 16, 0, 0),
+       GATE(CLK_SPI1, "spi1", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 17, 0, 0),
+       GATE(CLK_SPI2, "spi2", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 18, 0, 0),
+       GATE(CLK_I2S1, "i2s1", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 20, 0, 0),
+       GATE(CLK_I2S2, "i2s2", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 21, 0, 0),
+       GATE(CLK_PCM1, "pcm1", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 22, 0, 0),
+       GATE(CLK_PCM2, "pcm2", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 23, 0, 0),
+       GATE(CLK_PWM, "pwm", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 24, 0, 0),
+       GATE(CLK_SPDIF, "spdif", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 26, 0, 0),
+       GATE(CLK_USI4, "usi4", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 28, 0, 0),
+       GATE(CLK_USI5, "usi5", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 30, 0, 0),
+       GATE(CLK_USI6, "usi6", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 31, 0, 0),
+
+       GATE(CLK_KEYIF, "keyif", "mout_user_aclk66_peric",
+                       GATE_BUS_PERIC, 22, 0, 0),
 
        /* PERIS Block */
        GATE(CLK_CHIPID, "chipid", "aclk66_psgen",
@@ -1142,6 +1198,28 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
        GATE(CLK_G3D, "g3d", "mout_user_aclk_g3d", GATE_IP_G3D, 9, 0, 0),
 };
 
+static const struct samsung_pll_rate_table exynos5420_pll2550x_24mhz_tbl[] = {
+       PLL_35XX_RATE(2000000000, 250, 3, 0),
+       PLL_35XX_RATE(1900000000, 475, 6, 0),
+       PLL_35XX_RATE(1800000000, 225, 3, 0),
+       PLL_35XX_RATE(1700000000, 425, 6, 0),
+       PLL_35XX_RATE(1600000000, 200, 3, 0),
+       PLL_35XX_RATE(1500000000, 250, 4, 0),
+       PLL_35XX_RATE(1400000000, 175, 3, 0),
+       PLL_35XX_RATE(1300000000, 325, 6, 0),
+       PLL_35XX_RATE(1200000000, 200, 2, 1),
+       PLL_35XX_RATE(1100000000, 275, 3, 1),
+       PLL_35XX_RATE(1000000000, 250, 3, 1),
+       PLL_35XX_RATE(900000000,  150, 2, 1),
+       PLL_35XX_RATE(800000000,  200, 3, 1),
+       PLL_35XX_RATE(700000000,  175, 3, 1),
+       PLL_35XX_RATE(600000000,  200, 2, 2),
+       PLL_35XX_RATE(500000000,  250, 3, 2),
+       PLL_35XX_RATE(400000000,  200, 3, 2),
+       PLL_35XX_RATE(300000000,  200, 2, 3),
+       PLL_35XX_RATE(200000000,  200, 3, 3),
+};
+
 static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = {
        [apll] = PLL(pll_2550, CLK_FOUT_APLL, "fout_apll", "fin_pll", APLL_LOCK,
                APLL_CON0, NULL),
@@ -1167,7 +1245,7 @@ static struct samsung_pll_clock exynos5x_plls[nr_plls] __initdata = {
                KPLL_CON0, NULL),
 };
 
-static struct of_device_id ext_clk_match[] __initdata = {
+static const struct of_device_id ext_clk_match[] __initconst = {
        { .compatible = "samsung,exynos5420-oscclk", .data = (void *)0, },
        { },
 };
@@ -1195,6 +1273,12 @@ static void __init exynos5x_clk_init(struct device_node *np,
        samsung_clk_of_register_fixed_ext(ctx, exynos5x_fixed_rate_ext_clks,
                        ARRAY_SIZE(exynos5x_fixed_rate_ext_clks),
                        ext_clk_match);
+
+       if (_get_rate("fin_pll") == 24 * MHZ) {
+               exynos5x_plls[apll].rate_table = exynos5420_pll2550x_24mhz_tbl;
+               exynos5x_plls[kpll].rate_table = exynos5420_pll2550x_24mhz_tbl;
+       }
+
        samsung_clk_register_pll(ctx, exynos5x_plls, ARRAY_SIZE(exynos5x_plls),
                                        reg_base);
        samsung_clk_register_fixed_rate(ctx, exynos5x_fixed_rate_clks,
@@ -1226,6 +1310,8 @@ static void __init exynos5x_clk_init(struct device_node *np,
        }
 
        exynos5420_clk_sleep_init();
+
+       samsung_clk_of_add_provider(np, ctx);
 }
 
 static void __init exynos5420_clk_init(struct device_node *np)
index 647f144..00d1d00 100644 (file)
@@ -84,7 +84,7 @@ static struct samsung_gate_clock exynos5440_gate_clks[] __initdata = {
        GATE(CLK_CS250_O, "cs250_o", "cs250", CLKEN_OV_VAL, 19, 0, 0),
 };
 
-static struct of_device_id ext_clk_match[] __initdata = {
+static const struct of_device_id ext_clk_match[] __initconst = {
        { .compatible = "samsung,clock-xtal", .data = (void *)0, },
        {},
 };
@@ -123,6 +123,8 @@ static void __init exynos5440_clk_init(struct device_node *np)
        samsung_clk_register_gate(ctx, exynos5440_gate_clks,
                        ARRAY_SIZE(exynos5440_gate_clks));
 
+       samsung_clk_of_add_provider(np, ctx);
+
        pr_info("Exynos5440: arm_clk = %ldHz\n", _get_rate("arm_clk"));
        pr_info("exynos5440 clock initialization complete\n");
 }
index ba07168..5d2f034 100644 (file)
@@ -152,6 +152,11 @@ struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
        ALIAS(HCLK, NULL, "hclk"),
        ALIAS(MPLL, NULL, "mpll"),
        ALIAS(FCLK, NULL, "fclk"),
+       ALIAS(PCLK, NULL, "watchdog"),
+       ALIAS(PCLK_SDI, NULL, "sdi"),
+       ALIAS(HCLK_NAND, NULL, "nand"),
+       ALIAS(PCLK_I2S, NULL, "iis"),
+       ALIAS(PCLK_I2C, NULL, "i2c"),
 };
 
 /* S3C2410 specific clocks */
@@ -378,7 +383,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
        if (!np)
                s3c2410_common_clk_register_fixed_ext(ctx, xti_f);
 
-       if (current_soc == 2410) {
+       if (current_soc == S3C2410) {
                if (_get_rate("xti") == 12 * MHZ) {
                        s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl;
                        s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl;
@@ -432,7 +437,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
                samsung_clk_register_fixed_factor(ctx, s3c2410_ffactor,
                                ARRAY_SIZE(s3c2410_ffactor));
                samsung_clk_register_alias(ctx, s3c2410_aliases,
-                       ARRAY_SIZE(s3c2410_common_aliases));
+                       ARRAY_SIZE(s3c2410_aliases));
                break;
        case S3C2440:
                samsung_clk_register_mux(ctx, s3c2440_muxes,
@@ -461,6 +466,8 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
        }
 
        s3c2410_clk_sleep_init();
+
+       samsung_clk_of_add_provider(np, ctx);
 }
 
 static void __init s3c2410_clk_init(struct device_node *np)
index 23e4313..34af09f 100644 (file)
@@ -265,6 +265,8 @@ void __init s3c2412_common_clk_init(struct device_node *np, unsigned long xti_f,
                                   ARRAY_SIZE(s3c2412_aliases));
 
        s3c2412_clk_sleep_init();
+
+       samsung_clk_of_add_provider(np, ctx);
 }
 
 static void __init s3c2412_clk_init(struct device_node *np)
index c4bbdab..c92f853 100644 (file)
@@ -445,6 +445,8 @@ void __init s3c2443_common_clk_init(struct device_node *np, unsigned long xti_f,
        }
 
        s3c2443_clk_sleep_init();
+
+       samsung_clk_of_add_provider(np, ctx);
 }
 
 static void __init s3c2416_clk_init(struct device_node *np)
index efa16ee..0f590e5 100644 (file)
@@ -418,8 +418,10 @@ static struct samsung_clock_alias s3c64xx_clock_aliases[] = {
        ALIAS(SCLK_MMC2, "s3c-sdhci.2", "mmc_busclk.2"),
        ALIAS(SCLK_MMC1, "s3c-sdhci.1", "mmc_busclk.2"),
        ALIAS(SCLK_MMC0, "s3c-sdhci.0", "mmc_busclk.2"),
-       ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi-bus"),
-       ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi-bus"),
+       ALIAS(PCLK_SPI1, "s3c6410-spi.1", "spi_busclk0"),
+       ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi_busclk2"),
+       ALIAS(PCLK_SPI0, "s3c6410-spi.0", "spi_busclk0"),
+       ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi_busclk2"),
        ALIAS(SCLK_AUDIO1, "samsung-pcm.1", "audio-bus"),
        ALIAS(SCLK_AUDIO1, "samsung-i2s.1", "audio-bus"),
        ALIAS(SCLK_AUDIO0, "samsung-pcm.0", "audio-bus"),
@@ -516,6 +518,8 @@ void __init s3c64xx_clk_init(struct device_node *np, unsigned long xtal_f,
                                        ARRAY_SIZE(s3c64xx_clock_aliases));
        s3c64xx_clk_sleep_init();
 
+       samsung_clk_of_add_provider(np, ctx);
+
        pr_info("%s clocks: apll = %lu, mpll = %lu\n"
                "\tepll = %lu, arm_clk = %lu\n",
                is_s3c6400 ? "S3C6400" : "S3C6410",
index 49629c7..deab84d 100644 (file)
@@ -53,7 +53,6 @@ struct samsung_clk_provider *__init samsung_clk_init(struct device_node *np,
 {
        struct samsung_clk_provider *ctx;
        struct clk **clk_table;
-       int ret;
        int i;
 
        ctx = kzalloc(sizeof(struct samsung_clk_provider), GFP_KERNEL);
@@ -72,17 +71,19 @@ struct samsung_clk_provider *__init samsung_clk_init(struct device_node *np,
        ctx->clk_data.clk_num = nr_clks;
        spin_lock_init(&ctx->lock);
 
-       if (!np)
-               return ctx;
-
-       ret = of_clk_add_provider(np, of_clk_src_onecell_get,
-                       &ctx->clk_data);
-       if (ret)
-               panic("could not register clock provide\n");
-
        return ctx;
 }
 
+void __init samsung_clk_of_add_provider(struct device_node *np,
+                               struct samsung_clk_provider *ctx)
+{
+       if (np) {
+               if (of_clk_add_provider(np, of_clk_src_onecell_get,
+                                       &ctx->clk_data))
+                       panic("could not register clk provider\n");
+       }
+}
+
 /* add a clock instance to the clock lookup table used for dt based lookup */
 void samsung_clk_add_lookup(struct samsung_clk_provider *ctx, struct clk *clk,
                                unsigned int id)
@@ -284,7 +285,7 @@ void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
 void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
                        struct samsung_fixed_rate_clock *fixed_rate_clk,
                        unsigned int nr_fixed_rate_clk,
-                       struct of_device_id *clk_matches)
+                       const struct of_device_id *clk_matches)
 {
        const struct of_device_id *match;
        struct device_node *clk_np;
index 9693b80..66ab36b 100644 (file)
@@ -327,11 +327,13 @@ struct samsung_pll_clock {
 extern struct samsung_clk_provider *__init samsung_clk_init(
                        struct device_node *np, void __iomem *base,
                        unsigned long nr_clks);
+extern void __init samsung_clk_of_add_provider(struct device_node *np,
+                       struct samsung_clk_provider *ctx);
 extern void __init samsung_clk_of_register_fixed_ext(
                        struct samsung_clk_provider *ctx,
                        struct samsung_fixed_rate_clock *fixed_rate_clk,
                        unsigned int nr_fixed_rate_clk,
-                       struct of_device_id *clk_matches);
+                       const struct of_device_id *clk_matches);
 
 extern void samsung_clk_add_lookup(struct samsung_clk_provider *ctx,
                        struct clk *clk, unsigned int id);
index 65894f7..4daa597 100644 (file)
@@ -742,19 +742,19 @@ void __init spear1310_clk_init(void __iomem *misc_base, void __iomem *ras_base)
        clk = clk_register_gate(NULL, "pcie_sata_0_clk", "ahb_clk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_0_CLK_ENB,
                        0, &_lock);
-       clk_register_clkdev(clk, NULL, "dw_pcie.0");
+       clk_register_clkdev(clk, NULL, "b1000000.pcie");
        clk_register_clkdev(clk, NULL, "b1000000.ahci");
 
        clk = clk_register_gate(NULL, "pcie_sata_1_clk", "ahb_clk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_1_CLK_ENB,
                        0, &_lock);
-       clk_register_clkdev(clk, NULL, "dw_pcie.1");
+       clk_register_clkdev(clk, NULL, "b1800000.pcie");
        clk_register_clkdev(clk, NULL, "b1800000.ahci");
 
        clk = clk_register_gate(NULL, "pcie_sata_2_clk", "ahb_clk", 0,
                        SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_2_CLK_ENB,
                        0, &_lock);
-       clk_register_clkdev(clk, NULL, "dw_pcie.2");
+       clk_register_clkdev(clk, NULL, "b4000000.pcie");
        clk_register_clkdev(clk, NULL, "b4000000.ahci");
 
        clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
index fe835c1..5a5c664 100644 (file)
@@ -839,7 +839,7 @@ void __init spear1340_clk_init(void __iomem *misc_base)
        clk = clk_register_gate(NULL, "pcie_sata_clk", "ahb_clk", 0,
                        SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_PCIE_SATA_CLK_ENB,
                        0, &_lock);
-       clk_register_clkdev(clk, NULL, "dw_pcie");
+       clk_register_clkdev(clk, NULL, "b1000000.pcie");
        clk_register_clkdev(clk, NULL, "b1000000.ahci");
 
        clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
index c2d2043..bb5f387 100644 (file)
@@ -211,7 +211,7 @@ static inline void spear310_clk_init(void) { }
 /* array of all spear 320 clock lookups */
 #ifdef CONFIG_MACH_SPEAR320
 
-#define SPEAR320_CONTROL_REG           (soc_config_base + 0x0000)
+#define SPEAR320_CONTROL_REG           (soc_config_base + 0x0010)
 #define SPEAR320_EXT_CTRL_REG          (soc_config_base + 0x0018)
 
        #define SPEAR320_UARTX_PCLK_MASK                0x1
@@ -245,7 +245,8 @@ static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
        "ras_syn0_gclk", };
 static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", };
 
-static void __init spear320_clk_init(void __iomem *soc_config_base)
+static void __init spear320_clk_init(void __iomem *soc_config_base,
+                                    struct clk *ras_apb_clk)
 {
        struct clk *clk;
 
@@ -342,6 +343,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
                        SPEAR320_CONTROL_REG, UART1_PCLK_SHIFT, UART1_PCLK_MASK,
                        0, &_lock);
        clk_register_clkdev(clk, NULL, "a3000000.serial");
+       /* Enforce ras_apb_clk */
+       clk_set_parent(clk, ras_apb_clk);
 
        clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
                        ARRAY_SIZE(uartx_parents),
@@ -349,6 +352,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
                        SPEAR320_EXT_CTRL_REG, SPEAR320_UART2_PCLK_SHIFT,
                        SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "a4000000.serial");
+       /* Enforce ras_apb_clk */
+       clk_set_parent(clk, ras_apb_clk);
 
        clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
                        ARRAY_SIZE(uartx_parents),
@@ -379,12 +384,12 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
        clk_register_clkdev(clk, NULL, "60100000.serial");
 }
 #else
-static inline void spear320_clk_init(void __iomem *soc_config_base) { }
+static inline void spear320_clk_init(void __iomem *sb, struct clk *rc) { }
 #endif
 
 void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)
 {
-       struct clk *clk, *clk1;
+       struct clk *clk, *clk1, *ras_apb_clk;
 
        clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
                        32000);
@@ -613,6 +618,7 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
        clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB,
                        RAS_APB_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, "ras_apb_clk", NULL);
+       ras_apb_clk = clk;
 
        clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0,
                        RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock);
@@ -659,5 +665,5 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
        else if (of_machine_is_compatible("st,spear310"))
                spear310_clk_init();
        else if (of_machine_is_compatible("st,spear320"))
-               spear320_clk_init(soc_config_base);
+               spear320_clk_init(soc_config_base, ras_apb_clk);
 }
index c7455ff..ede7b2f 100644 (file)
@@ -1 +1 @@
-obj-y += clkgen-mux.o clkgen-pll.o clkgen-fsyn.o
+obj-y += clkgen-mux.o clkgen-pll.o clkgen-fsyn.o clk-flexgen.o
diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c
new file mode 100644 (file)
index 0000000..2282cef
--- /dev/null
@@ -0,0 +1,331 @@
+/*
+ * clk-flexgen.c
+ *
+ * Copyright (C) ST-Microelectronics SA 2013
+ * Author:  Maxime Coquelin <maxime.coquelin@st.com> for ST-Microelectronics.
+ * License terms:  GNU General Public License (GPL), version 2  */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+struct flexgen {
+       struct clk_hw hw;
+
+       /* Crossbar */
+       struct clk_mux mux;
+       /* Pre-divisor's gate */
+       struct clk_gate pgate;
+       /* Pre-divisor */
+       struct clk_divider pdiv;
+       /* Final divisor's gate */
+       struct clk_gate fgate;
+       /* Final divisor */
+       struct clk_divider fdiv;
+};
+
+#define to_flexgen(_hw) container_of(_hw, struct flexgen, hw)
+
+static int flexgen_enable(struct clk_hw *hw)
+{
+       struct flexgen *flexgen = to_flexgen(hw);
+       struct clk_hw *pgate_hw = &flexgen->pgate.hw;
+       struct clk_hw *fgate_hw = &flexgen->fgate.hw;
+
+       pgate_hw->clk = hw->clk;
+       fgate_hw->clk = hw->clk;
+
+       clk_gate_ops.enable(pgate_hw);
+
+       clk_gate_ops.enable(fgate_hw);
+
+       pr_debug("%s: flexgen output enabled\n", __clk_get_name(hw->clk));
+       return 0;
+}
+
+static void flexgen_disable(struct clk_hw *hw)
+{
+       struct flexgen *flexgen = to_flexgen(hw);
+       struct clk_hw *fgate_hw = &flexgen->fgate.hw;
+
+       /* disable only the final gate */
+       fgate_hw->clk = hw->clk;
+
+       clk_gate_ops.disable(fgate_hw);
+
+       pr_debug("%s: flexgen output disabled\n", __clk_get_name(hw->clk));
+}
+
+static int flexgen_is_enabled(struct clk_hw *hw)
+{
+       struct flexgen *flexgen = to_flexgen(hw);
+       struct clk_hw *fgate_hw = &flexgen->fgate.hw;
+
+       fgate_hw->clk = hw->clk;
+
+       if (!clk_gate_ops.is_enabled(fgate_hw))
+               return 0;
+
+       return 1;
+}
+
+static u8 flexgen_get_parent(struct clk_hw *hw)
+{
+       struct flexgen *flexgen = to_flexgen(hw);
+       struct clk_hw *mux_hw = &flexgen->mux.hw;
+
+       mux_hw->clk = hw->clk;
+
+       return clk_mux_ops.get_parent(mux_hw);
+}
+
+static int flexgen_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct flexgen *flexgen = to_flexgen(hw);
+       struct clk_hw *mux_hw = &flexgen->mux.hw;
+
+       mux_hw->clk = hw->clk;
+
+       return clk_mux_ops.set_parent(mux_hw, index);
+}
+
+static inline unsigned long
+clk_best_div(unsigned long parent_rate, unsigned long rate)
+{
+       return parent_rate / rate + ((rate > (2*(parent_rate % rate))) ? 0 : 1);
+}
+
+static long flexgen_round_rate(struct clk_hw *hw, unsigned long rate,
+                                  unsigned long *prate)
+{
+       unsigned long div;
+
+       /* Round div according to exact prate and wished rate */
+       div = clk_best_div(*prate, rate);
+
+       if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+               *prate = rate * div;
+               return rate;
+       }
+
+       return *prate / div;
+}
+
+unsigned long flexgen_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct flexgen *flexgen = to_flexgen(hw);
+       struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
+       struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
+       unsigned long mid_rate;
+
+       pdiv_hw->clk = hw->clk;
+       fdiv_hw->clk = hw->clk;
+
+       mid_rate = clk_divider_ops.recalc_rate(pdiv_hw, parent_rate);
+
+       return clk_divider_ops.recalc_rate(fdiv_hw, mid_rate);
+}
+
+static int flexgen_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long parent_rate)
+{
+       struct flexgen *flexgen = to_flexgen(hw);
+       struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
+       struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
+       unsigned long primary_div = 0;
+       int ret = 0;
+
+       pdiv_hw->clk = hw->clk;
+       fdiv_hw->clk = hw->clk;
+
+       primary_div = clk_best_div(parent_rate, rate);
+
+       clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate);
+       ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * primary_div);
+
+       return ret;
+}
+
+static const struct clk_ops flexgen_ops = {
+       .enable = flexgen_enable,
+       .disable = flexgen_disable,
+       .is_enabled = flexgen_is_enabled,
+       .get_parent = flexgen_get_parent,
+       .set_parent = flexgen_set_parent,
+       .round_rate = flexgen_round_rate,
+       .recalc_rate = flexgen_recalc_rate,
+       .set_rate = flexgen_set_rate,
+};
+
+struct clk *clk_register_flexgen(const char *name,
+                               const char **parent_names, u8 num_parents,
+                               void __iomem *reg, spinlock_t *lock, u32 idx,
+                               unsigned long flexgen_flags) {
+       struct flexgen *fgxbar;
+       struct clk *clk;
+       struct clk_init_data init;
+       u32  xbar_shift;
+       void __iomem *xbar_reg, *fdiv_reg;
+
+       fgxbar = kzalloc(sizeof(struct flexgen), GFP_KERNEL);
+       if (!fgxbar)
+               return ERR_PTR(-ENOMEM);
+
+       init.name = name;
+       init.ops = &flexgen_ops;
+       init.flags = CLK_IS_BASIC | flexgen_flags;
+       init.parent_names = parent_names;
+       init.num_parents = num_parents;
+
+       xbar_reg = reg + 0x18 + (idx & ~0x3);
+       xbar_shift = (idx % 4) * 0x8;
+       fdiv_reg = reg + 0x164 + idx * 4;
+
+       /* Crossbar element config */
+       fgxbar->mux.lock = lock;
+       fgxbar->mux.mask = BIT(6) - 1;
+       fgxbar->mux.reg = xbar_reg;
+       fgxbar->mux.shift = xbar_shift;
+       fgxbar->mux.table = NULL;
+
+
+       /* Pre-divider's gate config (in xbar register)*/
+       fgxbar->pgate.lock = lock;
+       fgxbar->pgate.reg = xbar_reg;
+       fgxbar->pgate.bit_idx = xbar_shift + 6;
+
+       /* Pre-divider config */
+       fgxbar->pdiv.lock = lock;
+       fgxbar->pdiv.reg = reg + 0x58 + idx * 4;
+       fgxbar->pdiv.width = 10;
+
+       /* Final divider's gate config */
+       fgxbar->fgate.lock = lock;
+       fgxbar->fgate.reg = fdiv_reg;
+       fgxbar->fgate.bit_idx = 6;
+
+       /* Final divider config */
+       fgxbar->fdiv.lock = lock;
+       fgxbar->fdiv.reg = fdiv_reg;
+       fgxbar->fdiv.width = 6;
+
+       fgxbar->hw.init = &init;
+
+       clk = clk_register(NULL, &fgxbar->hw);
+       if (IS_ERR(clk))
+               kfree(fgxbar);
+       else
+               pr_debug("%s: parent %s rate %u\n",
+                       __clk_get_name(clk),
+                       __clk_get_name(clk_get_parent(clk)),
+                       (unsigned int)clk_get_rate(clk));
+       return clk;
+}
+
+static const char ** __init flexgen_get_parents(struct device_node *np,
+                                                      int *num_parents)
+{
+       const char **parents;
+       int nparents, i;
+
+       nparents = of_count_phandle_with_args(np, "clocks", "#clock-cells");
+       if (WARN_ON(nparents <= 0))
+               return NULL;
+
+       parents = kcalloc(nparents, sizeof(const char *), GFP_KERNEL);
+       if (!parents)
+               return NULL;
+
+       for (i = 0; i < nparents; i++)
+               parents[i] = of_clk_get_parent_name(np, i);
+
+       *num_parents = nparents;
+       return parents;
+}
+
+void __init st_of_flexgen_setup(struct device_node *np)
+{
+       struct device_node *pnode;
+       void __iomem *reg;
+       struct clk_onecell_data *clk_data;
+       const char **parents;
+       int num_parents, i;
+       spinlock_t *rlock = NULL;
+       unsigned long flex_flags = 0;
+
+       pnode = of_get_parent(np);
+       if (!pnode)
+               return;
+
+       reg = of_iomap(pnode, 0);
+       if (!reg)
+               return;
+
+       parents = flexgen_get_parents(np, &num_parents);
+       if (!parents)
+               return;
+
+       clk_data = kzalloc(sizeof(*clk_data), GFP_KERNEL);
+       if (!clk_data)
+               goto err;
+
+       clk_data->clk_num = of_property_count_strings(np ,
+                       "clock-output-names");
+       if (clk_data->clk_num <= 0) {
+               pr_err("%s: Failed to get number of output clocks (%d)",
+                               __func__, clk_data->clk_num);
+               goto err;
+       }
+
+       clk_data->clks = kcalloc(clk_data->clk_num, sizeof(struct clk *),
+                       GFP_KERNEL);
+       if (!clk_data->clks)
+               goto err;
+
+       rlock = kzalloc(sizeof(spinlock_t), GFP_KERNEL);
+       if (!rlock)
+               goto err;
+
+       for (i = 0; i < clk_data->clk_num; i++) {
+               struct clk *clk;
+               const char *clk_name;
+
+               if (of_property_read_string_index(np, "clock-output-names",
+                                                 i, &clk_name)) {
+                       break;
+               }
+
+               /*
+                * If we read an empty clock name then the output is unused
+                */
+               if (*clk_name == '\0')
+                       continue;
+
+               clk = clk_register_flexgen(clk_name, parents, num_parents,
+                                          reg, rlock, i, flex_flags);
+
+               if (IS_ERR(clk))
+                       goto err;
+
+               clk_data->clks[i] = clk;
+       }
+
+       kfree(parents);
+       of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+
+       return;
+
+err:
+       if (clk_data)
+               kfree(clk_data->clks);
+       kfree(clk_data);
+       kfree(parents);
+       kfree(rlock);
+}
+CLK_OF_DECLARE(flexgen, "st,flexgen", st_of_flexgen_setup);
index 4f53ee0..af94ed8 100644 (file)
@@ -41,7 +41,7 @@ struct stm_fs {
        unsigned long nsdiv;
 };
 
-static struct stm_fs fs216c65_rtbl[] = {
+static const struct stm_fs fs216c65_rtbl[] = {
        { .mdiv = 0x1f, .pe = 0x0,      .sdiv = 0x7,    .nsdiv = 0 },   /* 312.5 Khz */
        { .mdiv = 0x17, .pe = 0x25ed,   .sdiv = 0x1,    .nsdiv = 0 },   /* 27    MHz */
        { .mdiv = 0x1a, .pe = 0x7b36,   .sdiv = 0x2,    .nsdiv = 1 },   /* 36.87 MHz */
@@ -49,31 +49,86 @@ static struct stm_fs fs216c65_rtbl[] = {
        { .mdiv = 0x11, .pe = 0x1c72,   .sdiv = 0x1,    .nsdiv = 1 },   /* 108   MHz */
 };
 
-static struct stm_fs fs432c65_rtbl[] = {
-       { .mdiv = 0x1f, .pe = 0x0,      .sdiv = 0x7,    .nsdiv = 0 },   /* 625   Khz */
-       { .mdiv = 0x11, .pe = 0x1c72,   .sdiv = 0x2,    .nsdiv = 1 },   /* 108   MHz */
-       { .mdiv = 0x19, .pe = 0x121a,   .sdiv = 0x0,    .nsdiv = 1 },   /* 297   MHz */
+static const struct stm_fs fs432c65_rtbl[] = {
+       { .mdiv = 0x1f, .pe = 0x0,      .sdiv = 0x7,    .nsdiv = 0 },   /* 625     Khz */
+       { .mdiv = 0x13, .pe = 0x777c,   .sdiv = 0x4,    .nsdiv = 1 },   /* 25.175  MHz */
+       { .mdiv = 0x19, .pe = 0x4d35,   .sdiv = 0x2,    .nsdiv = 0 },   /* 25.200  MHz */
+       { .mdiv = 0x11, .pe = 0x1c72,   .sdiv = 0x4,    .nsdiv = 1 },   /* 27.000  MHz */
+       { .mdiv = 0x17, .pe = 0x28f5,   .sdiv = 0x2,    .nsdiv = 0 },   /* 27.027  MHz */
+       { .mdiv = 0x16, .pe = 0x3359,   .sdiv = 0x2,    .nsdiv = 0 },   /* 28.320  MHz */
+       { .mdiv = 0x1f, .pe = 0x2083,   .sdiv = 0x3,    .nsdiv = 1 },   /* 30.240  MHz */
+       { .mdiv = 0x1e, .pe = 0x430d,   .sdiv = 0x3,    .nsdiv = 1 },   /* 31.500  MHz */
+       { .mdiv = 0x17, .pe = 0x0,      .sdiv = 0x3,    .nsdiv = 1 },   /* 40.000  MHz */
+       { .mdiv = 0x19, .pe = 0x121a,   .sdiv = 0x1,    .nsdiv = 0 },   /* 49.500  MHz */
+       { .mdiv = 0x13, .pe = 0x6667,   .sdiv = 0x3,    .nsdiv = 1 },   /* 50.000  MHz */
+       { .mdiv = 0x10, .pe = 0x1ee6,   .sdiv = 0x3,    .nsdiv = 1 },   /* 57.284  MHz */
+       { .mdiv = 0x1d, .pe = 0x3b14,   .sdiv = 0x2,    .nsdiv = 1 },   /* 65.000  MHz */
+       { .mdiv = 0x12, .pe = 0x7c65,   .sdiv = 0x1,    .nsdiv = 0 },   /* 71.000  MHz */
+       { .mdiv = 0x19, .pe = 0xecd,    .sdiv = 0x2,    .nsdiv = 1 },   /* 74.176  MHz */
+       { .mdiv = 0x19, .pe = 0x121a,   .sdiv = 0x2,    .nsdiv = 1 },   /* 74.250  MHz */
+       { .mdiv = 0x19, .pe = 0x3334,   .sdiv = 0x2,    .nsdiv = 1 },   /* 75.000  MHz */
+       { .mdiv = 0x18, .pe = 0x5138,   .sdiv = 0x2,    .nsdiv = 1 },   /* 78.800  MHz */
+       { .mdiv = 0x1d, .pe = 0x77d,    .sdiv = 0x0,    .nsdiv = 0 },   /* 85.500  MHz */
+       { .mdiv = 0x1c, .pe = 0x13d5,   .sdiv = 0x0,    .nsdiv = 0 },   /* 88.750  MHz */
+       { .mdiv = 0x11, .pe = 0x1c72,   .sdiv = 0x2,    .nsdiv = 1 },   /* 108.000 MHz */
+       { .mdiv = 0x17, .pe = 0x28f5,   .sdiv = 0x0,    .nsdiv = 0 },   /* 108.108 MHz */
+       { .mdiv = 0x10, .pe = 0x6e26,   .sdiv = 0x2,    .nsdiv = 1 },   /* 118.963 MHz */
+       { .mdiv = 0x15, .pe = 0x3e63,   .sdiv = 0x0,    .nsdiv = 0 },   /* 119.000 MHz */
+       { .mdiv = 0x1c, .pe = 0x471d,   .sdiv = 0x1,    .nsdiv = 1 },   /* 135.000 MHz */
+       { .mdiv = 0x19, .pe = 0xecd,    .sdiv = 0x1,    .nsdiv = 1 },   /* 148.352 MHz */
+       { .mdiv = 0x19, .pe = 0x121a,   .sdiv = 0x1,    .nsdiv = 1 },   /* 148.500 MHz */
+       { .mdiv = 0x19, .pe = 0x121a,   .sdiv = 0x0,    .nsdiv = 1 },   /* 297     MHz */
 };
 
-static struct stm_fs fs660c32_rtbl[] = {
-       { .mdiv = 0x01, .pe = 0x2aaa,   .sdiv = 0x8,    .nsdiv = 0 },   /* 600   KHz */
-       { .mdiv = 0x02, .pe = 0x3d33,   .sdiv = 0x0,    .nsdiv = 0 },   /* 148.5 Mhz */
-       { .mdiv = 0x13, .pe = 0x5bcc,   .sdiv = 0x0,    .nsdiv = 1 },   /* 297   Mhz */
-       { .mdiv = 0x0e, .pe = 0x1025,   .sdiv = 0x0,    .nsdiv = 1 },   /* 333   Mhz */
-       { .mdiv = 0x0b, .pe = 0x715f,   .sdiv = 0x0,    .nsdiv = 1 },   /* 350   Mhz */
+static const struct stm_fs fs660c32_rtbl[] = {
+       { .mdiv = 0x14, .pe = 0x376b,   .sdiv = 0x4,    .nsdiv = 1 },   /* 25.175  MHz */
+       { .mdiv = 0x14, .pe = 0x30c3,   .sdiv = 0x4,    .nsdiv = 1 },   /* 25.200  MHz */
+       { .mdiv = 0x10, .pe = 0x71c7,   .sdiv = 0x4,    .nsdiv = 1 },   /* 27.000  MHz */
+       { .mdiv = 0x00, .pe = 0x47af,   .sdiv = 0x3,    .nsdiv = 0 },   /* 27.027  MHz */
+       { .mdiv = 0x0e, .pe = 0x4e1a,   .sdiv = 0x4,    .nsdiv = 1 },   /* 28.320  MHz */
+       { .mdiv = 0x0b, .pe = 0x534d,   .sdiv = 0x4,    .nsdiv = 1 },   /* 30.240  MHz */
+       { .mdiv = 0x17, .pe = 0x6fbf,   .sdiv = 0x2,    .nsdiv = 0 },   /* 31.500  MHz */
+       { .mdiv = 0x01, .pe = 0x0,      .sdiv = 0x4,    .nsdiv = 1 },   /* 40.000  MHz */
+       { .mdiv = 0x15, .pe = 0x2aab,   .sdiv = 0x3,    .nsdiv = 1 },   /* 49.500  MHz */
+       { .mdiv = 0x14, .pe = 0x6666,   .sdiv = 0x3,    .nsdiv = 1 },   /* 50.000  MHz */
+       { .mdiv = 0x1d, .pe = 0x395f,   .sdiv = 0x1,    .nsdiv = 0 },   /* 57.284  MHz */
+       { .mdiv = 0x08, .pe = 0x4ec5,   .sdiv = 0x3,    .nsdiv = 1 },   /* 65.000  MHz */
+       { .mdiv = 0x05, .pe = 0x1770,   .sdiv = 0x3,    .nsdiv = 1 },   /* 71.000  MHz */
+       { .mdiv = 0x03, .pe = 0x4ba7,   .sdiv = 0x3,    .nsdiv = 1 },   /* 74.176  MHz */
+       { .mdiv = 0x0f, .pe = 0x3426,   .sdiv = 0x1,    .nsdiv = 0 },   /* 74.250  MHz */
+       { .mdiv = 0x0e, .pe = 0x7777,   .sdiv = 0x1,    .nsdiv = 0 },   /* 75.000  MHz */
+       { .mdiv = 0x01, .pe = 0x4053,   .sdiv = 0x3,    .nsdiv = 1 },   /* 78.800  MHz */
+       { .mdiv = 0x09, .pe = 0x15b5,   .sdiv = 0x1,    .nsdiv = 0 },   /* 85.500  MHz */
+       { .mdiv = 0x1b, .pe = 0x3f19,   .sdiv = 0x2,    .nsdiv = 1 },   /* 88.750  MHz */
+       { .mdiv = 0x10, .pe = 0x71c7,   .sdiv = 0x2,    .nsdiv = 1 },   /* 108.000 MHz */
+       { .mdiv = 0x00, .pe = 0x47af,   .sdiv = 0x1,    .nsdiv = 0 },   /* 108.108 MHz */
+       { .mdiv = 0x0c, .pe = 0x3118,   .sdiv = 0x2,    .nsdiv = 1 },   /* 118.963 MHz */
+       { .mdiv = 0x0c, .pe = 0x2f54,   .sdiv = 0x2,    .nsdiv = 1 },   /* 119.000 MHz */
+       { .mdiv = 0x07, .pe = 0xe39,    .sdiv = 0x2,    .nsdiv = 1 },   /* 135.000 MHz */
+       { .mdiv = 0x03, .pe = 0x4ba7,   .sdiv = 0x2,    .nsdiv = 1 },   /* 148.352 MHz */
+       { .mdiv = 0x0f, .pe = 0x3426,   .sdiv = 0x0,    .nsdiv = 0 },   /* 148.500 MHz */
+       { .mdiv = 0x03, .pe = 0x4ba7,   .sdiv = 0x1,    .nsdiv = 1 },   /* 296.704 MHz */
+       { .mdiv = 0x03, .pe = 0x471c,   .sdiv = 0x1,    .nsdiv = 1 },   /* 297.000 MHz */
+       { .mdiv = 0x00, .pe = 0x295f,   .sdiv = 0x1,    .nsdiv = 1 },   /* 326.700 MHz */
+       { .mdiv = 0x1f, .pe = 0x3633,   .sdiv = 0x0,    .nsdiv = 1 },   /* 333.000 MHz */
+       { .mdiv = 0x1c, .pe = 0x0,      .sdiv = 0x0,    .nsdiv = 1 },   /* 352.000 Mhz */
 };
 
 struct clkgen_quadfs_data {
        bool reset_present;
        bool bwfilter_present;
        bool lockstatus_present;
+       bool powerup_polarity;
+       bool standby_polarity;
        bool nsdiv_present;
+       bool nrst_present;
        struct clkgen_field ndiv;
        struct clkgen_field ref_bw;
        struct clkgen_field nreset;
        struct clkgen_field npda;
        struct clkgen_field lock_status;
 
+       struct clkgen_field nrst[QUADFS_MAX_CHAN];
        struct clkgen_field nsb[QUADFS_MAX_CHAN];
        struct clkgen_field en[QUADFS_MAX_CHAN];
        struct clkgen_field mdiv[QUADFS_MAX_CHAN];
@@ -82,9 +137,9 @@ struct clkgen_quadfs_data {
        struct clkgen_field nsdiv[QUADFS_MAX_CHAN];
 
        const struct clk_ops *pll_ops;
-       struct stm_fs *rtbl;
+       const struct stm_fs *rtbl;
        u8 rtbl_cnt;
-       int  (*get_rate)(unsigned long , struct stm_fs *,
+       int  (*get_rate)(unsigned long , const struct stm_fs *,
                        unsigned long *);
 };
 
@@ -94,11 +149,11 @@ static const struct clk_ops st_quadfs_fs216c65_ops;
 static const struct clk_ops st_quadfs_fs432c65_ops;
 static const struct clk_ops st_quadfs_fs660c32_ops;
 
-static int clk_fs216c65_get_rate(unsigned long, struct stm_fs *,
+static int clk_fs216c65_get_rate(unsigned long, const struct stm_fs *,
                unsigned long *);
-static int clk_fs432c65_get_rate(unsigned long, struct stm_fs *,
+static int clk_fs432c65_get_rate(unsigned long, const struct stm_fs *,
                unsigned long *);
-static int clk_fs660c32_dig_get_rate(unsigned long, struct stm_fs *,
+static int clk_fs660c32_dig_get_rate(unsigned long, const struct stm_fs *,
                unsigned long *);
 /*
  * Values for all of the standalone instances of this clock
@@ -106,7 +161,7 @@ static int clk_fs660c32_dig_get_rate(unsigned long, struct stm_fs *,
  * that the individual channel standby control bits (nsb) are in the
  * first register along with the PLL control bits.
  */
-static struct clkgen_quadfs_data st_fs216c65_416 = {
+static const struct clkgen_quadfs_data st_fs216c65_416 = {
        /* 416 specific */
        .npda   = CLKGEN_FIELD(0x0, 0x1, 14),
        .nsb    = { CLKGEN_FIELD(0x0, 0x1, 10),
@@ -143,7 +198,7 @@ static struct clkgen_quadfs_data st_fs216c65_416 = {
        .get_rate       = clk_fs216c65_get_rate,
 };
 
-static struct clkgen_quadfs_data st_fs432c65_416 = {
+static const struct clkgen_quadfs_data st_fs432c65_416 = {
        .npda   = CLKGEN_FIELD(0x0, 0x1, 14),
        .nsb    = { CLKGEN_FIELD(0x0, 0x1, 10),
                    CLKGEN_FIELD(0x0, 0x1, 11),
@@ -179,7 +234,7 @@ static struct clkgen_quadfs_data st_fs432c65_416 = {
        .get_rate       = clk_fs432c65_get_rate,
 };
 
-static struct clkgen_quadfs_data st_fs660c32_E_416 = {
+static const struct clkgen_quadfs_data st_fs660c32_E_416 = {
        .npda   = CLKGEN_FIELD(0x0, 0x1, 14),
        .nsb    = { CLKGEN_FIELD(0x0, 0x1, 10),
                    CLKGEN_FIELD(0x0, 0x1, 11),
@@ -215,7 +270,7 @@ static struct clkgen_quadfs_data st_fs660c32_E_416 = {
        .get_rate       = clk_fs660c32_dig_get_rate,
 };
 
-static struct clkgen_quadfs_data st_fs660c32_F_416 = {
+static const struct clkgen_quadfs_data st_fs660c32_F_416 = {
        .npda   = CLKGEN_FIELD(0x0, 0x1, 14),
        .nsb    = { CLKGEN_FIELD(0x0, 0x1, 10),
                    CLKGEN_FIELD(0x0, 0x1, 11),
@@ -251,6 +306,91 @@ static struct clkgen_quadfs_data st_fs660c32_F_416 = {
        .get_rate       = clk_fs660c32_dig_get_rate,
 };
 
+static const struct clkgen_quadfs_data st_fs660c32_C_407 = {
+       .nrst_present = true,
+       .nrst   = { CLKGEN_FIELD(0x2f0, 0x1, 0),
+                   CLKGEN_FIELD(0x2f0, 0x1, 1),
+                   CLKGEN_FIELD(0x2f0, 0x1, 2),
+                   CLKGEN_FIELD(0x2f0, 0x1, 3) },
+       .npda   = CLKGEN_FIELD(0x2f0, 0x1, 12),
+       .nsb    = { CLKGEN_FIELD(0x2f0, 0x1, 8),
+                   CLKGEN_FIELD(0x2f0, 0x1, 9),
+                   CLKGEN_FIELD(0x2f0, 0x1, 10),
+                   CLKGEN_FIELD(0x2f0, 0x1, 11) },
+       .nsdiv_present = true,
+       .nsdiv  = { CLKGEN_FIELD(0x304, 0x1, 24),
+                   CLKGEN_FIELD(0x308, 0x1, 24),
+                   CLKGEN_FIELD(0x30c, 0x1, 24),
+                   CLKGEN_FIELD(0x310, 0x1, 24) },
+       .mdiv   = { CLKGEN_FIELD(0x304, 0x1f, 15),
+                   CLKGEN_FIELD(0x308, 0x1f, 15),
+                   CLKGEN_FIELD(0x30c, 0x1f, 15),
+                   CLKGEN_FIELD(0x310, 0x1f, 15) },
+       .en     = { CLKGEN_FIELD(0x2fc, 0x1, 0),
+                   CLKGEN_FIELD(0x2fc, 0x1, 1),
+                   CLKGEN_FIELD(0x2fc, 0x1, 2),
+                   CLKGEN_FIELD(0x2fc, 0x1, 3) },
+       .ndiv   = CLKGEN_FIELD(0x2f4, 0x7, 16),
+       .pe     = { CLKGEN_FIELD(0x304, 0x7fff, 0),
+                   CLKGEN_FIELD(0x308, 0x7fff, 0),
+                   CLKGEN_FIELD(0x30c, 0x7fff, 0),
+                   CLKGEN_FIELD(0x310, 0x7fff, 0) },
+       .sdiv   = { CLKGEN_FIELD(0x304, 0xf, 20),
+                   CLKGEN_FIELD(0x308, 0xf, 20),
+                   CLKGEN_FIELD(0x30c, 0xf, 20),
+                   CLKGEN_FIELD(0x310, 0xf, 20) },
+       .lockstatus_present = true,
+       .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
+       .powerup_polarity = 1,
+       .standby_polarity = 1,
+       .pll_ops        = &st_quadfs_pll_c32_ops,
+       .rtbl           = fs660c32_rtbl,
+       .rtbl_cnt       = ARRAY_SIZE(fs660c32_rtbl),
+       .get_rate       = clk_fs660c32_dig_get_rate,
+};
+
+static const struct clkgen_quadfs_data st_fs660c32_D_407 = {
+       .nrst_present = true,
+       .nrst   = { CLKGEN_FIELD(0x2a0, 0x1, 0),
+                   CLKGEN_FIELD(0x2a0, 0x1, 1),
+                   CLKGEN_FIELD(0x2a0, 0x1, 2),
+                   CLKGEN_FIELD(0x2a0, 0x1, 3) },
+       .ndiv   = CLKGEN_FIELD(0x2a4, 0x7, 16),
+       .pe     = { CLKGEN_FIELD(0x2b4, 0x7fff, 0),
+                   CLKGEN_FIELD(0x2b8, 0x7fff, 0),
+                   CLKGEN_FIELD(0x2bc, 0x7fff, 0),
+                   CLKGEN_FIELD(0x2c0, 0x7fff, 0) },
+       .sdiv   = { CLKGEN_FIELD(0x2b4, 0xf, 20),
+                   CLKGEN_FIELD(0x2b8, 0xf, 20),
+                   CLKGEN_FIELD(0x2bc, 0xf, 20),
+                   CLKGEN_FIELD(0x2c0, 0xf, 20) },
+       .npda   = CLKGEN_FIELD(0x2a0, 0x1, 12),
+       .nsb    = { CLKGEN_FIELD(0x2a0, 0x1, 8),
+                   CLKGEN_FIELD(0x2a0, 0x1, 9),
+                   CLKGEN_FIELD(0x2a0, 0x1, 10),
+                   CLKGEN_FIELD(0x2a0, 0x1, 11) },
+       .nsdiv_present = true,
+       .nsdiv  = { CLKGEN_FIELD(0x2b4, 0x1, 24),
+                   CLKGEN_FIELD(0x2b8, 0x1, 24),
+                   CLKGEN_FIELD(0x2bc, 0x1, 24),
+                   CLKGEN_FIELD(0x2c0, 0x1, 24) },
+       .mdiv   = { CLKGEN_FIELD(0x2b4, 0x1f, 15),
+                   CLKGEN_FIELD(0x2b8, 0x1f, 15),
+                   CLKGEN_FIELD(0x2bc, 0x1f, 15),
+                   CLKGEN_FIELD(0x2c0, 0x1f, 15) },
+       .en     = { CLKGEN_FIELD(0x2ac, 0x1, 0),
+                   CLKGEN_FIELD(0x2ac, 0x1, 1),
+                   CLKGEN_FIELD(0x2ac, 0x1, 2),
+                   CLKGEN_FIELD(0x2ac, 0x1, 3) },
+       .lockstatus_present = true,
+       .lock_status = CLKGEN_FIELD(0x2A0, 0x1, 24),
+       .powerup_polarity = 1,
+       .standby_polarity = 1,
+       .pll_ops        = &st_quadfs_pll_c32_ops,
+       .rtbl           = fs660c32_rtbl,
+       .rtbl_cnt       = ARRAY_SIZE(fs660c32_rtbl),
+       .get_rate       = clk_fs660c32_dig_get_rate,};
+
 /**
  * DOC: A Frequency Synthesizer that multiples its input clock by a fixed factor
  *
@@ -308,7 +448,7 @@ static int quadfs_pll_enable(struct clk_hw *hw)
        /*
         * Power up the PLL
         */
-       CLKGEN_WRITE(pll, npda, 1);
+       CLKGEN_WRITE(pll, npda, !pll->data->powerup_polarity);
 
        if (pll->lock)
                spin_unlock_irqrestore(pll->lock, flags);
@@ -335,7 +475,7 @@ static void quadfs_pll_disable(struct clk_hw *hw)
         * Powerdown the PLL and then put block into soft reset if we have
         * reset control.
         */
-       CLKGEN_WRITE(pll, npda, 0);
+       CLKGEN_WRITE(pll, npda, pll->data->powerup_polarity);
 
        if (pll->data->reset_present)
                CLKGEN_WRITE(pll, nreset, 0);
@@ -611,7 +751,10 @@ static int quadfs_fsynth_enable(struct clk_hw *hw)
        if (fs->lock)
                spin_lock_irqsave(fs->lock, flags);
 
-       CLKGEN_WRITE(fs, nsb[fs->chan], 1);
+       CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity);
+
+       if (fs->data->nrst_present)
+               CLKGEN_WRITE(fs, nrst[fs->chan], 0);
 
        if (fs->lock)
                spin_unlock_irqrestore(fs->lock, flags);
@@ -631,7 +774,7 @@ static void quadfs_fsynth_disable(struct clk_hw *hw)
        if (fs->lock)
                spin_lock_irqsave(fs->lock, flags);
 
-       CLKGEN_WRITE(fs, nsb[fs->chan], 0);
+       CLKGEN_WRITE(fs, nsb[fs->chan], !fs->data->standby_polarity);
 
        if (fs->lock)
                spin_unlock_irqrestore(fs->lock, flags);
@@ -645,12 +788,12 @@ static int quadfs_fsynth_is_enabled(struct clk_hw *hw)
        pr_debug("%s: %s enable bit = 0x%x\n",
                 __func__, __clk_get_name(hw->clk), nsb);
 
-       return !!nsb;
+       return fs->data->standby_polarity ? !nsb : !!nsb;
 }
 
 #define P15                    (uint64_t)(1 << 15)
 
-static int clk_fs216c65_get_rate(unsigned long input, struct stm_fs *fs,
+static int clk_fs216c65_get_rate(unsigned long input, const struct stm_fs *fs,
                unsigned long *rate)
 {
        uint64_t res;
@@ -670,7 +813,7 @@ static int clk_fs216c65_get_rate(unsigned long input, struct stm_fs *fs,
        return 0;
 }
 
-static int clk_fs432c65_get_rate(unsigned long input, struct stm_fs *fs,
+static int clk_fs432c65_get_rate(unsigned long input, const struct stm_fs *fs,
                unsigned long *rate)
 {
        uint64_t res;
@@ -693,7 +836,7 @@ static int clk_fs432c65_get_rate(unsigned long input, struct stm_fs *fs,
 #define P20            (uint64_t)(1 << 20)
 
 static int clk_fs660c32_dig_get_rate(unsigned long input,
-                               struct stm_fs *fs, unsigned long *rate)
+                               const struct stm_fs *fs, unsigned long *rate)
 {
        unsigned long s = (1 << fs->sdiv);
        unsigned long ns;
@@ -749,7 +892,7 @@ static long quadfs_find_best_rate(struct clk_hw *hw, unsigned long drate,
 {
        struct st_clk_quadfs_fsynth *fs = to_quadfs_fsynth(hw);
        int (*clk_fs_get_rate)(unsigned long ,
-                               struct stm_fs *, unsigned long *);
+                               const struct stm_fs *, unsigned long *);
        struct stm_fs prev_params;
        unsigned long prev_rate, rate = 0;
        unsigned long diff_rate, prev_diff_rate = ~0;
@@ -793,7 +936,7 @@ static unsigned long quadfs_recalc_rate(struct clk_hw *hw,
        unsigned long rate = 0;
        struct stm_fs params;
        int (*clk_fs_get_rate)(unsigned long ,
-                               struct stm_fs *, unsigned long *);
+                               const struct stm_fs *, unsigned long *);
 
        clk_fs_get_rate = fs->data->get_rate;
 
@@ -917,19 +1060,31 @@ static struct clk * __init st_clk_register_quadfs_fsynth(
 static struct of_device_id quadfs_of_match[] = {
        {
                .compatible = "st,stih416-quadfs216",
-               .data = (void *)&st_fs216c65_416
+               .data = &st_fs216c65_416
        },
        {
                .compatible = "st,stih416-quadfs432",
-               .data = (void *)&st_fs432c65_416
+               .data = &st_fs432c65_416
        },
        {
                .compatible = "st,stih416-quadfs660-E",
-               .data = (void *)&st_fs660c32_E_416
+               .data = &st_fs660c32_E_416
        },
        {
                .compatible = "st,stih416-quadfs660-F",
-               .data = (void *)&st_fs660c32_F_416
+               .data = &st_fs660c32_F_416
+       },
+       {
+               .compatible = "st,stih407-quadfs660-C",
+               .data = &st_fs660c32_C_407
+       },
+       {
+               .compatible = "st,stih407-quadfs660-D",
+               .data = &st_fs660c32_D_407
+       },
+       {
+               .compatible = "st,stih407-quadfs660-D",
+               .data = (void *)&st_fs660c32_D_407
        },
        {}
 };
index a329906..79dc40b 100644 (file)
@@ -580,6 +580,11 @@ static struct clkgen_mux_data stih416_a9_mux_data = {
        .shift = 0,
        .width = 2,
 };
+static struct clkgen_mux_data stih407_a9_mux_data = {
+       .offset = 0x1a4,
+       .shift = 1,
+       .width = 2,
+};
 
 static struct of_device_id mux_of_match[] = {
        {
@@ -610,6 +615,10 @@ static struct of_device_id mux_of_match[] = {
                .compatible = "st,stih416-clkgen-a9-mux",
                .data = &stih416_a9_mux_data,
        },
+       {
+               .compatible = "st,stih407-clkgen-a9-mux",
+               .data = &stih407_a9_mux_data,
+       },
        {}
 };
 
@@ -765,7 +774,8 @@ void __init st_of_clkgen_vcc_setup(struct device_node *np)
                div->reg = reg + VCC_DIV_OFFSET;
                div->shift = 2 * i;
                div->width = 2;
-               div->flags = CLK_DIVIDER_POWER_OF_TWO;
+               div->flags = CLK_DIVIDER_POWER_OF_TWO |
+                       CLK_DIVIDER_ROUND_CLOSEST;
 
                mux->reg = reg + VCC_MUX_OFFSET;
                mux->shift = 2 * i;
index d8b9b1a..29769d7 100644 (file)
@@ -59,7 +59,7 @@ static const struct clk_ops st_pll800c65_ops;
 static const struct clk_ops stm_pll3200c32_ops;
 static const struct clk_ops st_pll1200c32_ops;
 
-static struct clkgen_pll_data st_pll1600c65_ax = {
+static const struct clkgen_pll_data st_pll1600c65_ax = {
        .pdn_status     = CLKGEN_FIELD(0x0, 0x1,                        19),
        .locked_status  = CLKGEN_FIELD(0x0, 0x1,                        31),
        .mdiv           = CLKGEN_FIELD(0x0, C65_MDIV_PLL1600_MASK,      0),
@@ -67,7 +67,7 @@ static struct clkgen_pll_data st_pll1600c65_ax = {
        .ops            = &st_pll1600c65_ops
 };
 
-static struct clkgen_pll_data st_pll800c65_ax = {
+static const struct clkgen_pll_data st_pll800c65_ax = {
        .pdn_status     = CLKGEN_FIELD(0x0,     0x1,                    19),
        .locked_status  = CLKGEN_FIELD(0x0,     0x1,                    31),
        .mdiv           = CLKGEN_FIELD(0x0,     C65_MDIV_PLL800_MASK,   0),
@@ -76,7 +76,7 @@ static struct clkgen_pll_data st_pll800c65_ax = {
        .ops            = &st_pll800c65_ops
 };
 
-static struct clkgen_pll_data st_pll3200c32_a1x_0 = {
+static const struct clkgen_pll_data st_pll3200c32_a1x_0 = {
        .pdn_status     = CLKGEN_FIELD(0x0,     0x1,                    31),
        .locked_status  = CLKGEN_FIELD(0x4,     0x1,                    31),
        .ndiv           = CLKGEN_FIELD(0x0,     C32_NDIV_MASK,          0x0),
@@ -93,7 +93,7 @@ static struct clkgen_pll_data st_pll3200c32_a1x_0 = {
        .ops            = &stm_pll3200c32_ops,
 };
 
-static struct clkgen_pll_data st_pll3200c32_a1x_1 = {
+static const struct clkgen_pll_data st_pll3200c32_a1x_1 = {
        .pdn_status     = CLKGEN_FIELD(0xC,     0x1,                    31),
        .locked_status  = CLKGEN_FIELD(0x10,    0x1,                    31),
        .ndiv           = CLKGEN_FIELD(0xC,     C32_NDIV_MASK,          0x0),
@@ -111,7 +111,7 @@ static struct clkgen_pll_data st_pll3200c32_a1x_1 = {
 };
 
 /* 415 specific */
-static struct clkgen_pll_data st_pll3200c32_a9_415 = {
+static const struct clkgen_pll_data st_pll3200c32_a9_415 = {
        .pdn_status     = CLKGEN_FIELD(0x0,     0x1,                    0),
        .locked_status  = CLKGEN_FIELD(0x6C,    0x1,                    0),
        .ndiv           = CLKGEN_FIELD(0x0,     C32_NDIV_MASK,          9),
@@ -122,7 +122,7 @@ static struct clkgen_pll_data st_pll3200c32_a9_415 = {
        .ops            = &stm_pll3200c32_ops,
 };
 
-static struct clkgen_pll_data st_pll3200c32_ddr_415 = {
+static const struct clkgen_pll_data st_pll3200c32_ddr_415 = {
        .pdn_status     = CLKGEN_FIELD(0x0,     0x1,                    0),
        .locked_status  = CLKGEN_FIELD(0x100,   0x1,                    0),
        .ndiv           = CLKGEN_FIELD(0x8,     C32_NDIV_MASK,          0),
@@ -135,7 +135,7 @@ static struct clkgen_pll_data st_pll3200c32_ddr_415 = {
        .ops            = &stm_pll3200c32_ops,
 };
 
-static struct clkgen_pll_data st_pll1200c32_gpu_415 = {
+static const struct clkgen_pll_data st_pll1200c32_gpu_415 = {
        .pdn_status     = CLKGEN_FIELD(0x144,   0x1,                    3),
        .locked_status  = CLKGEN_FIELD(0x168,   0x1,                    0),
        .ldf            = CLKGEN_FIELD(0x0,     C32_LDF_MASK,           3),
@@ -146,7 +146,7 @@ static struct clkgen_pll_data st_pll1200c32_gpu_415 = {
 };
 
 /* 416 specific */
-static struct clkgen_pll_data st_pll3200c32_a9_416 = {
+static const struct clkgen_pll_data st_pll3200c32_a9_416 = {
        .pdn_status     = CLKGEN_FIELD(0x0,     0x1,                    0),
        .locked_status  = CLKGEN_FIELD(0x6C,    0x1,                    0),
        .ndiv           = CLKGEN_FIELD(0x8,     C32_NDIV_MASK,          0),
@@ -157,7 +157,7 @@ static struct clkgen_pll_data st_pll3200c32_a9_416 = {
        .ops            = &stm_pll3200c32_ops,
 };
 
-static struct clkgen_pll_data st_pll3200c32_ddr_416 = {
+static const struct clkgen_pll_data st_pll3200c32_ddr_416 = {
        .pdn_status     = CLKGEN_FIELD(0x0,     0x1,                    0),
        .locked_status  = CLKGEN_FIELD(0x10C,   0x1,                    0),
        .ndiv           = CLKGEN_FIELD(0x8,     C32_NDIV_MASK,          0),
@@ -170,7 +170,7 @@ static struct clkgen_pll_data st_pll3200c32_ddr_416 = {
        .ops            = &stm_pll3200c32_ops,
 };
 
-static struct clkgen_pll_data st_pll1200c32_gpu_416 = {
+static const struct clkgen_pll_data st_pll1200c32_gpu_416 = {
        .pdn_status     = CLKGEN_FIELD(0x8E4,   0x1,                    3),
        .locked_status  = CLKGEN_FIELD(0x90C,   0x1,                    0),
        .ldf            = CLKGEN_FIELD(0x0,     C32_LDF_MASK,           3),
@@ -180,6 +180,54 @@ static struct clkgen_pll_data st_pll1200c32_gpu_416 = {
        .ops            = &st_pll1200c32_ops,
 };
 
+static const struct clkgen_pll_data st_pll3200c32_407_a0 = {
+       /* 407 A0 */
+       .pdn_status     = CLKGEN_FIELD(0x2a0,   0x1,                    8),
+       .locked_status  = CLKGEN_FIELD(0x2a0,   0x1,                    24),
+       .ndiv           = CLKGEN_FIELD(0x2a4,   C32_NDIV_MASK,          16),
+       .idf            = CLKGEN_FIELD(0x2a4,   C32_IDF_MASK,           0x0),
+       .num_odfs = 1,
+       .odf            = { CLKGEN_FIELD(0x2b4, C32_ODF_MASK,           0) },
+       .odf_gate       = { CLKGEN_FIELD(0x2b4, 0x1,                    6) },
+       .ops            = &stm_pll3200c32_ops,
+};
+
+static const struct clkgen_pll_data st_pll3200c32_407_c0_0 = {
+       /* 407 C0 PLL0 */
+       .pdn_status     = CLKGEN_FIELD(0x2a0,   0x1,                    8),
+       .locked_status  = CLKGEN_FIELD(0x2a0,   0x1,                    24),
+       .ndiv           = CLKGEN_FIELD(0x2a4,   C32_NDIV_MASK,          16),
+       .idf            = CLKGEN_FIELD(0x2a4,   C32_IDF_MASK,           0x0),
+       .num_odfs = 1,
+       .odf            = { CLKGEN_FIELD(0x2b4, C32_ODF_MASK,           0) },
+       .odf_gate       = { CLKGEN_FIELD(0x2b4, 0x1,                    6) },
+       .ops            = &stm_pll3200c32_ops,
+};
+
+static const struct clkgen_pll_data st_pll3200c32_407_c0_1 = {
+       /* 407 C0 PLL1 */
+       .pdn_status     = CLKGEN_FIELD(0x2c8,   0x1,                    8),
+       .locked_status  = CLKGEN_FIELD(0x2c8,   0x1,                    24),
+       .ndiv           = CLKGEN_FIELD(0x2cc,   C32_NDIV_MASK,          16),
+       .idf            = CLKGEN_FIELD(0x2cc,   C32_IDF_MASK,           0x0),
+       .num_odfs = 1,
+       .odf            = { CLKGEN_FIELD(0x2dc, C32_ODF_MASK,           0) },
+       .odf_gate       = { CLKGEN_FIELD(0x2dc, 0x1,                    6) },
+       .ops            = &stm_pll3200c32_ops,
+};
+
+static const struct clkgen_pll_data st_pll3200c32_407_a9 = {
+       /* 407 A9 */
+       .pdn_status     = CLKGEN_FIELD(0x1a8,   0x1,                    0),
+       .locked_status  = CLKGEN_FIELD(0x87c,   0x1,                    0),
+       .ndiv           = CLKGEN_FIELD(0x1b0,   C32_NDIV_MASK,          0),
+       .idf            = CLKGEN_FIELD(0x1a8,   C32_IDF_MASK,           25),
+       .num_odfs = 1,
+       .odf            = { CLKGEN_FIELD(0x1b0, C32_ODF_MASK,           8) },
+       .odf_gate       = { CLKGEN_FIELD(0x1ac, 0x1,                    28) },
+       .ops            = &stm_pll3200c32_ops,
+};
+
 /**
  * DOC: Clock Generated by PLL, rate set and enabled by bootloader
  *
@@ -450,9 +498,8 @@ static void __init clkgena_c65_pll_setup(struct device_node *np)
         * PLL0 HS (high speed) output
         */
        clk_data->clks[0] = clkgen_pll_register(parent_name,
-                                               &st_pll1600c65_ax,
-                                               reg + CLKGENAx_PLL0_OFFSET,
-                                               clk_name);
+                       (struct clkgen_pll_data *) &st_pll1600c65_ax,
+                       reg + CLKGENAx_PLL0_OFFSET, clk_name);
 
        if (IS_ERR(clk_data->clks[0]))
                goto err;
@@ -480,9 +527,8 @@ static void __init clkgena_c65_pll_setup(struct device_node *np)
         * PLL1 output
         */
        clk_data->clks[2] = clkgen_pll_register(parent_name,
-                                               &st_pll800c65_ax,
-                                               reg + CLKGENAx_PLL1_OFFSET,
-                                               clk_name);
+                       (struct clkgen_pll_data *) &st_pll800c65_ax,
+                       reg + CLKGENAx_PLL1_OFFSET, clk_name);
 
        if (IS_ERR(clk_data->clks[2]))
                goto err;
@@ -572,6 +618,22 @@ static struct of_device_id c32_pll_of_match[] = {
                .compatible = "st,stih416-plls-c32-ddr",
                .data = &st_pll3200c32_ddr_416,
        },
+       {
+               .compatible = "st,stih407-plls-c32-a0",
+               .data = &st_pll3200c32_407_a0,
+       },
+       {
+               .compatible = "st,stih407-plls-c32-c0_0",
+               .data = &st_pll3200c32_407_c0_0,
+       },
+       {
+               .compatible = "st,stih407-plls-c32-c0_1",
+               .data = &st_pll3200c32_407_c0_1,
+       },
+       {
+               .compatible = "st,stih407-plls-c32-a9",
+               .data = &st_pll3200c32_407_a9,
+       },
        {}
 };
 
index 762fd64..6850cba 100644 (file)
@@ -6,4 +6,6 @@ obj-y += clk-sunxi.o clk-factors.o
 obj-y += clk-a10-hosc.o
 obj-y += clk-a20-gmac.o
 
-obj-$(CONFIG_MFD_SUN6I_PRCM) += clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o
+obj-$(CONFIG_MFD_SUN6I_PRCM) += \
+       clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
+       clk-sun8i-apb0.o
index 633ddc4..5296fd6 100644 (file)
@@ -60,7 +60,7 @@ static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
        struct clk_gate *gate;
        const char *clk_name = node->name;
        const char *parents[SUN7I_A20_GMAC_PARENTS];
-       void *reg;
+       void __iomem *reg;
 
        if (of_property_read_string(node, "clock-output-names", &clk_name))
                return;
index 3806d97..2057c8a 100644 (file)
@@ -62,7 +62,7 @@ static unsigned long clk_factors_recalc_rate(struct clk_hw *hw,
                p = FACTOR_GET(config->pshift, config->pwidth, reg);
 
        /* Calculate the rate */
-       rate = (parent_rate * n * (k + 1) >> p) / (m + 1);
+       rate = (parent_rate * (n + config->n_start) * (k + 1) >> p) / (m + 1);
 
        return rate;
 }
index 02e1a43..d2d0efa 100644 (file)
@@ -15,6 +15,7 @@ struct clk_factors_config {
        u8 mwidth;
        u8 pshift;
        u8 pwidth;
+       u8 n_start;
 };
 
 struct clk_factors {
index 44cd27c..e10d052 100644 (file)
@@ -9,81 +9,93 @@
  */
 
 #include <linux/clk-provider.h>
+#include <linux/clkdev.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 
 #define SUN6I_APB0_GATES_MAX_SIZE      32
 
+struct gates_data {
+       DECLARE_BITMAP(mask, SUN6I_APB0_GATES_MAX_SIZE);
+};
+
+static const struct gates_data sun6i_a31_apb0_gates __initconst = {
+       .mask = {0x7F},
+};
+
+static const struct gates_data sun8i_a23_apb0_gates __initconst = {
+       .mask = {0x5D},
+};
+
+static const struct of_device_id sun6i_a31_apb0_gates_clk_dt_ids[] = {
+       { .compatible = "allwinner,sun6i-a31-apb0-gates-clk", .data = &sun6i_a31_apb0_gates },
+       { .compatible = "allwinner,sun8i-a23-apb0-gates-clk", .data = &sun8i_a23_apb0_gates },
+       { /* sentinel */ }
+};
+
 static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
 {
        struct device_node *np = pdev->dev.of_node;
        struct clk_onecell_data *clk_data;
+       const struct of_device_id *device;
+       const struct gates_data *data;
        const char *clk_parent;
        const char *clk_name;
        struct resource *r;
        void __iomem *reg;
-       int gate_id;
        int ngates;
        int i;
+       int j = 0;
+
+       if (!np)
+               return -ENODEV;
+
+       device = of_match_device(sun6i_a31_apb0_gates_clk_dt_ids, &pdev->dev);
+       if (!device)
+               return -ENODEV;
+       data = device->data;
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        reg = devm_ioremap_resource(&pdev->dev, r);
-       if (!reg)
+       if (IS_ERR(reg))
                return PTR_ERR(reg);
 
        clk_parent = of_clk_get_parent_name(np, 0);
        if (!clk_parent)
                return -EINVAL;
 
-       ngates = of_property_count_strings(np, "clock-output-names");
-       if (ngates < 0)
-               return ngates;
-
-       if (!ngates || ngates > SUN6I_APB0_GATES_MAX_SIZE)
-               return -EINVAL;
-
        clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
                                GFP_KERNEL);
        if (!clk_data)
                return -ENOMEM;
 
-       clk_data->clks = devm_kzalloc(&pdev->dev,
-                                     SUN6I_APB0_GATES_MAX_SIZE *
-                                     sizeof(struct clk *),
-                                     GFP_KERNEL);
+       /* Worst-case size approximation and memory allocation */
+       ngates = find_last_bit(data->mask, SUN6I_APB0_GATES_MAX_SIZE);
+       clk_data->clks = devm_kcalloc(&pdev->dev, (ngates + 1),
+                                     sizeof(struct clk *), GFP_KERNEL);
        if (!clk_data->clks)
                return -ENOMEM;
 
-       for (i = 0; i < ngates; i++) {
+       for_each_set_bit(i, data->mask, SUN6I_APB0_GATES_MAX_SIZE) {
                of_property_read_string_index(np, "clock-output-names",
-                                             i, &clk_name);
+                                             j, &clk_name);
 
-               gate_id = i;
-               of_property_read_u32_index(np, "clock-indices", i, &gate_id);
+               clk_data->clks[i] = clk_register_gate(&pdev->dev, clk_name,
+                                                     clk_parent, 0, reg, i,
+                                                     0, NULL);
+               WARN_ON(IS_ERR(clk_data->clks[i]));
+               clk_register_clkdev(clk_data->clks[i], clk_name, NULL);
 
-               WARN_ON(gate_id >= SUN6I_APB0_GATES_MAX_SIZE);
-               if (gate_id >= SUN6I_APB0_GATES_MAX_SIZE)
-                       continue;
-
-               clk_data->clks[gate_id] = clk_register_gate(&pdev->dev,
-                                                           clk_name,
-                                                           clk_parent, 0,
-                                                           reg, gate_id,
-                                                           0, NULL);
-               WARN_ON(IS_ERR(clk_data->clks[gate_id]));
+               j++;
        }
 
-       clk_data->clk_num = ngates;
+       clk_data->clk_num = ngates + 1;
 
        return of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
 }
 
-const struct of_device_id sun6i_a31_apb0_gates_clk_dt_ids[] = {
-       { .compatible = "allwinner,sun6i-a31-apb0-gates-clk" },
-       { /* sentinel */ }
-};
-
 static struct platform_driver sun6i_a31_apb0_gates_clk_driver = {
        .driver = {
                .name = "sun6i-a31-apb0-gates-clk",
index 11f17c3..1fa2337 100644 (file)
@@ -57,7 +57,7 @@ static int sun6i_a31_apb0_clk_probe(struct platform_device *pdev)
        return of_clk_add_provider(np, of_clk_src_simple_get, clk);
 }
 
-const struct of_device_id sun6i_a31_apb0_clk_dt_ids[] = {
+static const struct of_device_id sun6i_a31_apb0_clk_dt_ids[] = {
        { .compatible = "allwinner,sun6i-a31-apb0-clk" },
        { /* sentinel */ }
 };
index f73cc05..eca8ca0 100644 (file)
@@ -160,7 +160,7 @@ static int ar100_set_rate(struct clk_hw *hw, unsigned long rate,
        return 0;
 }
 
-struct clk_ops ar100_ops = {
+static struct clk_ops ar100_ops = {
        .recalc_rate = ar100_recalc_rate,
        .determine_rate = ar100_determine_rate,
        .set_parent = ar100_set_parent,
@@ -213,7 +213,7 @@ static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev)
        return of_clk_add_provider(np, of_clk_src_simple_get, clk);
 }
 
-const struct of_device_id sun6i_a31_ar100_clk_dt_ids[] = {
+static const struct of_device_id sun6i_a31_ar100_clk_dt_ids[] = {
        { .compatible = "allwinner,sun6i-a31-ar100-clk" },
        { /* sentinel */ }
 };
diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
new file mode 100644 (file)
index 0000000..1f5ba9b
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2014 Chen-Yu Tsai
+ * Author: Chen-Yu Tsai <wens@csie.org>
+ *
+ * Allwinner A23 APB0 clock driver
+ *
+ * License Terms: GNU General Public License v2
+ *
+ * Based on clk-sun6i-apb0.c
+ * Allwinner A31 APB0 clock driver
+ *
+ * Copyright (C) 2014 Free Electrons
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+static int sun8i_a23_apb0_clk_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       const char *clk_name = np->name;
+       const char *clk_parent;
+       struct resource *r;
+       void __iomem *reg;
+       struct clk *clk;
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       reg = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(reg))
+               return PTR_ERR(reg);
+
+       clk_parent = of_clk_get_parent_name(np, 0);
+       if (!clk_parent)
+               return -EINVAL;
+
+       of_property_read_string(np, "clock-output-names", &clk_name);
+
+       /* The A23 APB0 clock is a standard 2 bit wide divider clock */
+       clk = clk_register_divider(&pdev->dev, clk_name, clk_parent, 0, reg,
+                                  0, 2, CLK_DIVIDER_POWER_OF_TWO, NULL);
+       if (IS_ERR(clk))
+               return PTR_ERR(clk);
+
+       return of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static const struct of_device_id sun8i_a23_apb0_clk_dt_ids[] = {
+       { .compatible = "allwinner,sun8i-a23-apb0-clk" },
+       { /* sentinel */ }
+};
+
+static struct platform_driver sun8i_a23_apb0_clk_driver = {
+       .driver = {
+               .name = "sun8i-a23-apb0-clk",
+               .owner = THIS_MODULE,
+               .of_match_table = sun8i_a23_apb0_clk_dt_ids,
+       },
+       .probe = sun8i_a23_apb0_clk_probe,
+};
+module_platform_driver(sun8i_a23_apb0_clk_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner A23 APB0 clock Driver");
+MODULE_LICENSE("GPL v2");
index fb2ce84..b654b7b 100644 (file)
@@ -163,6 +163,54 @@ static void sun6i_a31_get_pll1_factors(u32 *freq, u32 parent_rate,
        }
 }
 
+/**
+ * sun8i_a23_get_pll1_factors() - calculates n, k, m, p factors for PLL1
+ * PLL1 rate is calculated as follows
+ * rate = (parent_rate * (n + 1) * (k + 1) >> p) / (m + 1);
+ * parent_rate is always 24Mhz
+ */
+
+static void sun8i_a23_get_pll1_factors(u32 *freq, u32 parent_rate,
+                                  u8 *n, u8 *k, u8 *m, u8 *p)
+{
+       u8 div;
+
+       /* Normalize value to a 6M multiple */
+       div = *freq / 6000000;
+       *freq = 6000000 * div;
+
+       /* we were called to round the frequency, we can now return */
+       if (n == NULL)
+               return;
+
+       /* m is always zero for pll1 */
+       *m = 0;
+
+       /* k is 1 only on these cases */
+       if (*freq >= 768000000 || *freq == 42000000 || *freq == 54000000)
+               *k = 1;
+       else
+               *k = 0;
+
+       /* p will be 2 for divs under 20 and odd divs under 32 */
+       if (div < 20 || (div < 32 && (div & 1)))
+               *p = 2;
+
+       /* p will be 1 for even divs under 32, divs under 40 and odd pairs
+        * of divs between 40-62 */
+       else if (div < 40 || (div < 64 && (div & 2)))
+               *p = 1;
+
+       /* any other entries have p = 0 */
+       else
+               *p = 0;
+
+       /* calculate a suitable n based on k and p */
+       div <<= *p;
+       div /= (*k + 1);
+       *n = div / 4 - 1;
+}
+
 /**
  * sun4i_get_pll5_factors() - calculates n, k factors for PLL5
  * PLL5 rate is calculated as follows
@@ -422,6 +470,18 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
        .mwidth = 2,
 };
 
+static struct clk_factors_config sun8i_a23_pll1_config = {
+       .nshift = 8,
+       .nwidth = 5,
+       .kshift = 4,
+       .kwidth = 2,
+       .mshift = 0,
+       .mwidth = 2,
+       .pshift = 16,
+       .pwidth = 2,
+       .n_start = 1,
+};
+
 static struct clk_factors_config sun4i_pll5_config = {
        .nshift = 8,
        .nwidth = 5,
@@ -471,6 +531,12 @@ static const struct factors_data sun6i_a31_pll1_data __initconst = {
        .getter = sun6i_a31_get_pll1_factors,
 };
 
+static const struct factors_data sun8i_a23_pll1_data __initconst = {
+       .enable = 31,
+       .table = &sun8i_a23_pll1_config,
+       .getter = sun8i_a23_get_pll1_factors,
+};
+
 static const struct factors_data sun7i_a20_pll4_data __initconst = {
        .enable = 31,
        .table = &sun4i_pll5_config,
@@ -527,7 +593,7 @@ static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
        struct clk_hw *mux_hw = NULL;
        const char *clk_name = node->name;
        const char *parents[SUNXI_MAX_PARENTS];
-       void *reg;
+       void __iomem *reg;
        int i = 0;
 
        reg = of_iomap(node, 0);
@@ -632,7 +698,7 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
        struct clk *clk;
        const char *clk_name = node->name;
        const char *parents[SUNXI_MAX_PARENTS];
-       void *reg;
+       void __iomem *reg;
        int i = 0;
 
        reg = of_iomap(node, 0);
@@ -664,6 +730,7 @@ struct div_data {
        u8      shift;
        u8      pow;
        u8      width;
+       const struct clk_div_table *table;
 };
 
 static const struct div_data sun4i_axi_data __initconst = {
@@ -672,6 +739,23 @@ static const struct div_data sun4i_axi_data __initconst = {
        .width  = 2,
 };
 
+static const struct clk_div_table sun8i_a23_axi_table[] __initconst = {
+       { .val = 0, .div = 1 },
+       { .val = 1, .div = 2 },
+       { .val = 2, .div = 3 },
+       { .val = 3, .div = 4 },
+       { .val = 4, .div = 4 },
+       { .val = 5, .div = 4 },
+       { .val = 6, .div = 4 },
+       { .val = 7, .div = 4 },
+       { } /* sentinel */
+};
+
+static const struct div_data sun8i_a23_axi_data __initconst = {
+       .width  = 3,
+       .table  = sun8i_a23_axi_table,
+};
+
 static const struct div_data sun4i_ahb_data __initconst = {
        .shift  = 4,
        .pow    = 1,
@@ -696,7 +780,7 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
        struct clk *clk;
        const char *clk_name = node->name;
        const char *clk_parent;
-       void *reg;
+       void __iomem *reg;
 
        reg = of_iomap(node, 0);
 
@@ -704,10 +788,10 @@ static void __init sunxi_divider_clk_setup(struct device_node *node,
 
        of_property_read_string(node, "clock-output-names", &clk_name);
 
-       clk = clk_register_divider(NULL, clk_name, clk_parent, 0,
-                                  reg, data->shift, data->width,
-                                  data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
-                                  &clk_lock);
+       clk = clk_register_divider_table(NULL, clk_name, clk_parent, 0,
+                                        reg, data->shift, data->width,
+                                        data->pow ? CLK_DIVIDER_POWER_OF_TWO : 0,
+                                        data->table, &clk_lock);
        if (clk) {
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
                clk_register_clkdev(clk, clk_name, NULL);
@@ -804,6 +888,10 @@ static const struct gates_data sun7i_a20_ahb_gates_data __initconst = {
        .mask = { 0x12f77fff, 0x16ff3f },
 };
 
+static const struct gates_data sun8i_a23_ahb1_gates_data __initconst = {
+       .mask = {0x25386742, 0x2505111},
+};
+
 static const struct gates_data sun4i_apb0_gates_data __initconst = {
        .mask = {0x4EF},
 };
@@ -836,6 +924,10 @@ static const struct gates_data sun6i_a31_apb1_gates_data __initconst = {
        .mask = {0x3031},
 };
 
+static const struct gates_data sun8i_a23_apb1_gates_data __initconst = {
+       .mask = {0x3021},
+};
+
 static const struct gates_data sun6i_a31_apb2_gates_data __initconst = {
        .mask = {0x3F000F},
 };
@@ -844,6 +936,10 @@ static const struct gates_data sun7i_a20_apb1_gates_data __initconst = {
        .mask = { 0xff80ff },
 };
 
+static const struct gates_data sun8i_a23_apb2_gates_data __initconst = {
+       .mask = {0x1F0007},
+};
+
 static const struct gates_data sun4i_a10_usb_gates_data __initconst = {
        .mask = {0x1C0},
        .reset_mask = 0x07,
@@ -866,11 +962,10 @@ static void __init sunxi_gates_clk_setup(struct device_node *node,
        struct gates_reset_data *reset_data;
        const char *clk_parent;
        const char *clk_name;
-       void *reg;
+       void __iomem *reg;
        int qty;
        int i = 0;
        int j = 0;
-       int ignore;
 
        reg = of_iomap(node, 0);
 
@@ -891,14 +986,12 @@ static void __init sunxi_gates_clk_setup(struct device_node *node,
                of_property_read_string_index(node, "clock-output-names",
                                              j, &clk_name);
 
-               /* No driver claims this clock, but it should remain gated */
-               ignore = !strcmp("ahb_sdram", clk_name) ? CLK_IGNORE_UNUSED : 0;
-
                clk_data->clks[i] = clk_register_gate(NULL, clk_name,
-                                                     clk_parent, ignore,
+                                                     clk_parent, 0,
                                                      reg + 4 * (i/32), i % 32,
                                                      0, &clk_lock);
                WARN_ON(IS_ERR(clk_data->clks[i]));
+               clk_register_clkdev(clk_data->clks[i], clk_name, NULL);
 
                j++;
        }
@@ -991,7 +1084,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
        struct clk_gate *gate = NULL;
        struct clk_fixed_factor *fix_factor;
        struct clk_divider *divider;
-       void *reg;
+       void __iomem *reg;
        int i = 0;
        int flags, clkflags;
 
@@ -1102,6 +1195,7 @@ free_clkdata:
 static const struct of_device_id clk_factors_match[] __initconst = {
        {.compatible = "allwinner,sun4i-a10-pll1-clk", .data = &sun4i_pll1_data,},
        {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
+       {.compatible = "allwinner,sun8i-a23-pll1-clk", .data = &sun8i_a23_pll1_data,},
        {.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,},
        {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_data,},
        {.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,},
@@ -1113,6 +1207,7 @@ static const struct of_device_id clk_factors_match[] __initconst = {
 /* Matches for divider clocks */
 static const struct of_device_id clk_div_match[] __initconst = {
        {.compatible = "allwinner,sun4i-a10-axi-clk", .data = &sun4i_axi_data,},
+       {.compatible = "allwinner,sun8i-a23-axi-clk", .data = &sun8i_a23_axi_data,},
        {.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,},
        {.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,},
        {.compatible = "allwinner,sun6i-a31-apb2-div-clk", .data = &sun6i_a31_apb2_div_data,},
@@ -1142,6 +1237,7 @@ static const struct of_device_id clk_gates_match[] __initconst = {
        {.compatible = "allwinner,sun5i-a13-ahb-gates-clk", .data = &sun5i_a13_ahb_gates_data,},
        {.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,},
        {.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,},
+       {.compatible = "allwinner,sun8i-a23-ahb1-gates-clk", .data = &sun8i_a23_ahb1_gates_data,},
        {.compatible = "allwinner,sun4i-a10-apb0-gates-clk", .data = &sun4i_apb0_gates_data,},
        {.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,},
        {.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,},
@@ -1151,7 +1247,9 @@ static const struct of_device_id clk_gates_match[] __initconst = {
        {.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,},
        {.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,},
        {.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,},
+       {.compatible = "allwinner,sun8i-a23-apb1-gates-clk", .data = &sun8i_a23_apb1_gates_data,},
        {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
+       {.compatible = "allwinner,sun8i-a23-apb2-gates-clk", .data = &sun8i_a23_apb2_gates_data,},
        {.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,},
        {.compatible = "allwinner,sun5i-a13-usb-clk", .data = &sun5i_a13_usb_gates_data,},
        {.compatible = "allwinner,sun6i-a31-usb-clk", .data = &sun6i_a31_usb_gates_data,},
@@ -1202,6 +1300,7 @@ static void __init sunxi_init_clocks(const char *clocks[], int nclocks)
 
 static const char *sun4i_a10_critical_clocks[] __initdata = {
        "pll5_ddr",
+       "ahb_sdram",
 };
 
 static void __init sun4i_a10_init_clocks(struct device_node *node)
@@ -1214,6 +1313,7 @@ CLK_OF_DECLARE(sun4i_a10_clk_init, "allwinner,sun4i-a10", sun4i_a10_init_clocks)
 static const char *sun5i_critical_clocks[] __initdata = {
        "mbus",
        "pll5_ddr",
+       "ahb_sdram",
 };
 
 static void __init sun5i_init_clocks(struct device_node *node)
@@ -1236,3 +1336,4 @@ static void __init sun6i_init_clocks(struct device_node *node)
                          ARRAY_SIZE(sun6i_critical_clocks));
 }
 CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
+CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
index 637b62c..c7c6d8f 100644 (file)
 #define XUSBIO_PLL_CFG0_SEQ_ENABLE             BIT(24)
 #define XUSBIO_PLL_CFG0_SEQ_START_STATE                BIT(25)
 
+#define SATA_PLL_CFG0          0x490
+#define SATA_PLL_CFG0_PADPLL_RESET_SWCTL       BIT(0)
+#define SATA_PLL_CFG0_PADPLL_USE_LOCKDET       BIT(2)
+#define SATA_PLL_CFG0_SEQ_ENABLE               BIT(24)
+#define SATA_PLL_CFG0_SEQ_START_STATE          BIT(25)
+
 #define PLLE_MISC_PLLE_PTS     BIT(8)
 #define PLLE_MISC_IDDQ_SW_VALUE        BIT(13)
 #define PLLE_MISC_IDDQ_SW_CTRL BIT(14)
@@ -1361,6 +1367,19 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw)
        val |= XUSBIO_PLL_CFG0_SEQ_ENABLE;
        pll_writel(val, XUSBIO_PLL_CFG0, pll);
 
+       /* Enable hw control of SATA pll */
+       val = pll_readl(SATA_PLL_CFG0, pll);
+       val &= ~SATA_PLL_CFG0_PADPLL_RESET_SWCTL;
+       val |= SATA_PLL_CFG0_PADPLL_USE_LOCKDET;
+       val |= SATA_PLL_CFG0_SEQ_START_STATE;
+       pll_writel(val, SATA_PLL_CFG0, pll);
+
+       udelay(1);
+
+       val = pll_readl(SATA_PLL_CFG0, pll);
+       val |= SATA_PLL_CFG0_SEQ_ENABLE;
+       pll_writel(val, SATA_PLL_CFG0, pll);
+
 out:
        if (pll->lock)
                spin_unlock_irqrestore(pll->lock, flags);
index adf6b81..37f32c4 100644 (file)
@@ -469,7 +469,7 @@ static struct tegra_periph_init_data periph_clks[] = {
        MUX("sata", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SATA, 124, TEGRA_PERIPH_ON_APB, tegra_clk_sata),
        MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
        MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
-       MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
+       MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 165, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
        MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8),
        MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8),
        MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8),
@@ -487,7 +487,7 @@ static struct tegra_periph_init_data periph_clks[] = {
        MUX8("extern2", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN2, 121, 0, tegra_clk_extern2),
        MUX8("extern3", mux_plla_clk32_pllp_clkm_plle, CLK_SOURCE_EXTERN3, 122, 0, tegra_clk_extern3),
        MUX8("soc_therm", mux_pllm_pllc_pllp_plla, CLK_SOURCE_SOC_THERM, 78, TEGRA_PERIPH_ON_APB, tegra_clk_soc_therm),
-       MUX8("vi_sensor", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor_8),
+       MUX8("vi_sensor", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR, 164, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor_8),
        MUX8("isp", mux_pllm_pllc_pllp_plla_clkm_pllc4, CLK_SOURCE_ISP, 23, TEGRA_PERIPH_ON_APB, tegra_clk_isp_8),
        MUX8("entropy", mux_pllp_clkm1, CLK_SOURCE_ENTROPY, 149,  0, tegra_clk_entropy),
        MUX8("hdmi_audio", mux_pllp3_pllc_clkm, CLK_SOURCE_HDMI_AUDIO, 176, TEGRA_PERIPH_NO_RESET, tegra_clk_hdmi_audio),
index b9c8ba2..f760f31 100644 (file)
 /* Tegra CPU clock and reset control regs */
 #define CLK_RST_CONTROLLER_CPU_CMPLX_STATUS    0x470
 
+#define MUX8(_name, _parents, _offset, \
+                            _clk_num, _gate_flags, _clk_id)    \
+       TEGRA_INIT_DATA_TABLE(_name, NULL, NULL, _parents, _offset,\
+                       29, MASK(3), 0, 0, 8, 1, TEGRA_DIVIDER_ROUND_UP,\
+                       _clk_num, _gate_flags, _clk_id, _parents##_idx, 0,\
+                       NULL)
+
 #ifdef CONFIG_PM_SLEEP
 static struct cpu_clk_suspend_context {
        u32 clk_csite_src;
@@ -777,7 +784,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
        [tegra_clk_spdif_in] = { .dt_id = TEGRA114_CLK_SPDIF_IN, .present = true },
        [tegra_clk_spdif_out] = { .dt_id = TEGRA114_CLK_SPDIF_OUT, .present = true },
        [tegra_clk_vi_8] = { .dt_id = TEGRA114_CLK_VI, .present = true },
-       [tegra_clk_vi_sensor_8] = { .dt_id = TEGRA114_CLK_VI_SENSOR, .present = true },
        [tegra_clk_fuse] = { .dt_id = TEGRA114_CLK_FUSE, .present = true },
        [tegra_clk_fuse_burn] = { .dt_id = TEGRA114_CLK_FUSE_BURN, .present = true },
        [tegra_clk_clk_32k] = { .dt_id = TEGRA114_CLK_CLK_32K, .present = true },
@@ -923,6 +929,13 @@ static struct tegra_devclk devclks[] __initdata = {
        { .dev_id = "timer", .dt_id = TEGRA114_CLK_TIMER },
 };
 
+static const char *mux_pllm_pllc2_c_c3_pllp_plla[] = {
+       "pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0"
+};
+static u32 mux_pllm_pllc2_c_c3_pllp_plla_idx[] = {
+       [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6,
+};
+
 static struct clk **clks;
 
 static unsigned long osc_freq;
@@ -1178,10 +1191,18 @@ static void __init tegra114_pll_init(void __iomem *clk_base,
        clks[TEGRA114_CLK_PLL_E_OUT0] = clk;
 }
 
+#define CLK_SOURCE_VI_SENSOR 0x1a8
+
+static struct tegra_periph_init_data tegra_periph_clk_list[] = {
+       MUX8("vi_sensor", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR, 20, TEGRA_PERIPH_NO_RESET, TEGRA114_CLK_VI_SENSOR),
+};
+
 static __init void tegra114_periph_clk_init(void __iomem *clk_base,
                                            void __iomem *pmc_base)
 {
        struct clk *clk;
+       struct tegra_periph_init_data *data;
+       int i;
 
        /* xusb_ss_div2 */
        clk = clk_register_fixed_factor(NULL, "xusb_ss_div2", "xusb_ss_src", 0,
@@ -1209,6 +1230,14 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base,
                               clk_base + CLK_SOURCE_EMC,
                               29, 3, 0, NULL);
 
+       for (i = 0; i < ARRAY_SIZE(tegra_periph_clk_list); i++) {
+               data = &tegra_periph_clk_list[i];
+               clk = tegra_clk_register_periph(data->name,
+                       data->p.parent_names, data->num_parents,
+                       &data->periph, clk_base, data->offset, data->flags);
+               clks[data->clk_id] = clk;
+       }
+
        tegra_periph_clk_init(clk_base, pmc_base, tegra114_clks,
                                &pll_p_params);
 }
index 80efe51..9525c68 100644 (file)
@@ -869,7 +869,7 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_spdif_in] = { .dt_id = TEGRA124_CLK_SPDIF_IN, .present = true },
        [tegra_clk_spdif_out] = { .dt_id = TEGRA124_CLK_SPDIF_OUT, .present = true },
        [tegra_clk_vi_9] = { .dt_id = TEGRA124_CLK_VI, .present = true },
-       [tegra_clk_vi_sensor] = { .dt_id = TEGRA124_CLK_VI_SENSOR, .present = true },
+       [tegra_clk_vi_sensor_8] = { .dt_id = TEGRA124_CLK_VI_SENSOR, .present = true },
        [tegra_clk_fuse] = { .dt_id = TEGRA124_CLK_FUSE, .present = true },
        [tegra_clk_fuse_burn] = { .dt_id = TEGRA124_CLK_FUSE_BURN, .present = true },
        [tegra_clk_clk_32k] = { .dt_id = TEGRA124_CLK_CLK_32K, .present = true },
@@ -1369,6 +1369,14 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        {TEGRA124_CLK_XUSB_HS_SRC, TEGRA124_CLK_PLL_U_60M, 60000000, 0},
        {TEGRA124_CLK_XUSB_FALCON_SRC, TEGRA124_CLK_PLL_RE_OUT, 224000000, 0},
        {TEGRA124_CLK_XUSB_HOST_SRC, TEGRA124_CLK_PLL_RE_OUT, 112000000, 0},
+       {TEGRA124_CLK_SATA, TEGRA124_CLK_PLL_P, 104000000, 0},
+       {TEGRA124_CLK_SATA_OOB, TEGRA124_CLK_PLL_P, 204000000, 0},
+       {TEGRA124_CLK_EMC, TEGRA124_CLK_CLK_MAX, 0, 1},
+       {TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
+       {TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1},
+       {TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1},
+       {TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0},
+       {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 0},
        /* This MUST be the last entry. */
        {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
 };
index c0a7d77..bf452b6 100644 (file)
@@ -277,6 +277,12 @@ void __init tegra_register_devclks(struct tegra_devclk *dev_clks, int num)
        for (i = 0; i < num; i++, dev_clks++)
                clk_register_clkdev(clks[dev_clks->dt_id], dev_clks->con_id,
                                dev_clks->dev_id);
+
+       for (i = 0; i < clk_num; i++) {
+               if (!IS_ERR_OR_NULL(clks[i]))
+                       clk_register_clkdev(clks[i], __clk_get_name(clks[i]),
+                               "tegra-clk-debug");
+       }
 }
 
 struct clk ** __init tegra_lookup_dt_id(int clk_id,
index 5428c9c..72d9727 100644 (file)
@@ -77,13 +77,11 @@ static int dra7_apll_enable(struct clk_hw *hw)
        if (i == MAX_APLL_WAIT_TRIES) {
                pr_warn("clock: %s failed transition to '%s'\n",
                        clk_name, (state) ? "locked" : "bypassed");
-       } else {
+               r = -EBUSY;
+       } else
                pr_debug("clock: %s transition to '%s' in %d loops\n",
                         clk_name, (state) ? "locked" : "bypassed", i);
 
-               r = 0;
-       }
-
        return r;
 }
 
@@ -338,7 +336,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
        const char *parent_name;
        u32 val;
 
-       ad = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+       ad = kzalloc(sizeof(*ad), GFP_KERNEL);
        clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
        init = kzalloc(sizeof(*init), GFP_KERNEL);
 
index e158133..62ac8f6 100644 (file)
@@ -16,8 +16,9 @@
 #include <linux/clkdev.h>
 #include <linux/clk/ti.h>
 
-#define DRA7_DPLL_ABE_DEFFREQ                          361267200
+#define DRA7_DPLL_ABE_DEFFREQ                          180633600
 #define DRA7_DPLL_GMAC_DEFFREQ                         1000000000
+#define DRA7_DPLL_USB_DEFFREQ                          960000000
 
 
 static struct ti_dt_clk dra7xx_clks[] = {
@@ -322,10 +323,25 @@ int __init dra7xx_dt_clk_init(void)
        if (rc)
                pr_err("%s: failed to configure ABE DPLL!\n", __func__);
 
+       dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck");
+       rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2);
+       if (rc)
+               pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__);
+
        dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck");
        rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
        if (rc)
                pr_err("%s: failed to configure GMAC DPLL!\n", __func__);
 
+       dpll_ck = clk_get_sys(NULL, "dpll_usb_ck");
+       rc = clk_set_rate(dpll_ck, DRA7_DPLL_USB_DEFFREQ);
+       if (rc)
+               pr_err("%s: failed to configure USB DPLL!\n", __func__);
+
+       dpll_ck = clk_get_sys(NULL, "dpll_usb_m2_ck");
+       rc = clk_set_rate(dpll_ck, DRA7_DPLL_USB_DEFFREQ/2);
+       if (rc)
+               pr_err("%s: failed to set USB_DPLL M2 OUT\n", __func__);
+
        return rc;
 }
index abd956d..79791e1 100644 (file)
@@ -161,7 +161,8 @@ cleanup:
 }
 
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
-       defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX)
+       defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
+       defined(CONFIG_SOC_AM43XX)
 /**
  * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock
  * @node: device node for this clock
@@ -322,7 +323,7 @@ CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
               of_ti_omap4_dpll_x2_setup);
 #endif
 
-#ifdef CONFIG_SOC_AM33XX
+#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
 {
        ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
index 0197a47..e9d650e 100644 (file)
@@ -160,7 +160,7 @@ static void of_mux_clk_setup(struct device_node *node)
        u8 clk_mux_flags = 0;
        u32 mask = 0;
        u32 shift = 0;
-       u32 flags = 0;
+       u32 flags = CLK_SET_RATE_NO_REPARENT;
 
        num_parents = of_clk_get_parent_count(node);
        if (num_parents < 2) {
index 8d64200..ab51bf2 100644 (file)
@@ -153,19 +153,16 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
 }
 
 /* Clocksource handling */
-static void exynos4_mct_frc_start(u32 hi, u32 lo)
+static void exynos4_mct_frc_start(void)
 {
        u32 reg;
 
-       exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
-       exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
-
        reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
        reg |= MCT_G_TCON_START;
        exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
 }
 
-static cycle_t exynos4_frc_read(struct clocksource *cs)
+static cycle_t notrace _exynos4_frc_read(void)
 {
        unsigned int lo, hi;
        u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
@@ -179,9 +176,14 @@ static cycle_t exynos4_frc_read(struct clocksource *cs)
        return ((cycle_t)hi << 32) | lo;
 }
 
+static cycle_t exynos4_frc_read(struct clocksource *cs)
+{
+       return _exynos4_frc_read();
+}
+
 static void exynos4_frc_resume(struct clocksource *cs)
 {
-       exynos4_mct_frc_start(0, 0);
+       exynos4_mct_frc_start();
 }
 
 struct clocksource mct_frc = {
@@ -195,12 +197,23 @@ struct clocksource mct_frc = {
 
 static u64 notrace exynos4_read_sched_clock(void)
 {
-       return exynos4_frc_read(&mct_frc);
+       return _exynos4_frc_read();
+}
+
+static struct delay_timer exynos4_delay_timer;
+
+static cycles_t exynos4_read_current_timer(void)
+{
+       return _exynos4_frc_read();
 }
 
 static void __init exynos4_clocksource_init(void)
 {
-       exynos4_mct_frc_start(0, 0);
+       exynos4_mct_frc_start();
+
+       exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
+       exynos4_delay_timer.freq = clk_rate;
+       register_current_timer_delay(&exynos4_delay_timer);
 
        if (clocksource_register_hz(&mct_frc, clk_rate))
                panic("%s: can't register clocksource\n", mct_frc.name);
index e473d65..ffe350f 100644 (file)
@@ -186,6 +186,8 @@ config CPU_FREQ_GOV_CONSERVATIVE
 config GENERIC_CPUFREQ_CPU0
        tristate "Generic CPU0 cpufreq driver"
        depends on HAVE_CLK && OF
+       # if CPU_THERMAL is on and THERMAL=m, CPU0 cannot be =y:
+       depends on !CPU_THERMAL || THERMAL
        select PM_OPP
        help
          This adds a generic cpufreq driver for CPU0 frequency management.
index ebac671..7364a53 100644 (file)
@@ -104,6 +104,7 @@ config ARM_IMX6Q_CPUFREQ
        tristate "Freescale i.MX6 cpufreq support"
        depends on ARCH_MXC
        depends on REGULATOR_ANATOP
+       select PM_OPP
        help
          This adds cpufreq driver support for Freescale i.MX6 series SoCs.
 
@@ -118,7 +119,7 @@ config ARM_INTEGRATOR
          If in doubt, say Y.
 
 config ARM_KIRKWOOD_CPUFREQ
-       def_bool MACH_KIRKWOOD
+       def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD
        help
          This adds the CPUFreq driver for Marvell Kirkwood
          SoCs.
index 738c8b7..db6d9a2 100644 (file)
@@ -49,7 +49,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ)  += arm_big_little.o
 # LITTLE drivers, so that it is probed last.
 obj-$(CONFIG_ARM_DT_BL_CPUFREQ)                += arm_big_little_dt.o
 
-obj-$(CONFIG_ARCH_DAVINCI_DA850)       += davinci-cpufreq.o
+obj-$(CONFIG_ARCH_DAVINCI)             += davinci-cpufreq.o
 obj-$(CONFIG_UX500_SOC_DB8500)         += dbx500-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS_CPUFREQ)       += exynos-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ)   += exynos4210-cpufreq.o
index ee1ae30..86beda9 100644 (file)
@@ -152,11 +152,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                goto out_put_reg;
        }
 
-       ret = of_init_opp_table(cpu_dev);
-       if (ret) {
-               pr_err("failed to init OPP table: %d\n", ret);
-               goto out_put_clk;
-       }
+       /* OPPs might be populated at runtime, don't check for error here */
+       of_init_opp_table(cpu_dev);
 
        ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
        if (ret) {
index aed2b0c..6f02485 100644 (file)
@@ -1153,10 +1153,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
         * the creation of a brand new one. So we need to perform this update
         * by invoking update_policy_cpu().
         */
-       if (recover_policy && cpu != policy->cpu)
+       if (recover_policy && cpu != policy->cpu) {
                update_policy_cpu(policy, cpu);
-       else
+               WARN_ON(kobject_move(&policy->kobj, &dev->kobj));
+       } else {
                policy->cpu = cpu;
+       }
 
        cpumask_copy(policy->cpus, cpumask_of(cpu));
 
@@ -2242,10 +2244,8 @@ int cpufreq_update_policy(unsigned int cpu)
        struct cpufreq_policy new_policy;
        int ret;
 
-       if (!policy) {
-               ret = -ENODEV;
-               goto no_policy;
-       }
+       if (!policy)
+               return -ENODEV;
 
        down_write(&policy->rwsem);
 
@@ -2264,7 +2264,7 @@ int cpufreq_update_policy(unsigned int cpu)
                new_policy.cur = cpufreq_driver->get(cpu);
                if (WARN_ON(!new_policy.cur)) {
                        ret = -EIO;
-                       goto no_policy;
+                       goto unlock;
                }
 
                if (!policy->cur) {
@@ -2279,10 +2279,10 @@ int cpufreq_update_policy(unsigned int cpu)
 
        ret = cpufreq_set_policy(policy, &new_policy);
 
+unlock:
        up_write(&policy->rwsem);
 
        cpufreq_cpu_put(policy);
-no_policy:
        return ret;
 }
 EXPORT_SYMBOL(cpufreq_update_policy);
index 4e7f492..86631cb 100644 (file)
@@ -128,6 +128,7 @@ static struct pstate_funcs pstate_funcs;
 
 struct perf_limits {
        int no_turbo;
+       int turbo_disabled;
        int max_perf_pct;
        int min_perf_pct;
        int32_t max_perf;
@@ -196,10 +197,7 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
        pid->last_err = fp_error;
 
        result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
-       if (result >= 0)
-               result = result + (1 << (FRAC_BITS-1));
-       else
-               result = result - (1 << (FRAC_BITS-1));
+       result = result + (1 << (FRAC_BITS-1));
        return (signed int)fp_toint(result);
 }
 
@@ -290,7 +288,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
        if (ret != 1)
                return -EINVAL;
        limits.no_turbo = clamp_t(int, input, 0 , 1);
-
+       if (limits.turbo_disabled) {
+               pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+               limits.no_turbo = limits.turbo_disabled;
+       }
        return count;
 }
 
@@ -360,21 +361,21 @@ static int byt_get_min_pstate(void)
 {
        u64 value;
        rdmsrl(BYT_RATIOS, value);
-       return (value >> 8) & 0x3F;
+       return (value >> 8) & 0x7F;
 }
 
 static int byt_get_max_pstate(void)
 {
        u64 value;
        rdmsrl(BYT_RATIOS, value);
-       return (value >> 16) & 0x3F;
+       return (value >> 16) & 0x7F;
 }
 
 static int byt_get_turbo_pstate(void)
 {
        u64 value;
        rdmsrl(BYT_TURBO_RATIOS, value);
-       return value & 0x3F;
+       return value & 0x7F;
 }
 
 static void byt_set_pstate(struct cpudata *cpudata, int pstate)
@@ -384,7 +385,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
        u32 vid;
 
        val = pstate << 8;
-       if (limits.no_turbo)
+       if (limits.no_turbo && !limits.turbo_disabled)
                val |= (u64)1 << 32;
 
        vid_fp = cpudata->vid.min + mul_fp(
@@ -408,8 +409,8 @@ static void byt_get_vid(struct cpudata *cpudata)
 
 
        rdmsrl(BYT_VIDS, value);
-       cpudata->vid.min = int_tofp((value >> 8) & 0x3f);
-       cpudata->vid.max = int_tofp((value >> 16) & 0x3f);
+       cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
+       cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
        cpudata->vid.ratio = div_fp(
                cpudata->vid.max - cpudata->vid.min,
                int_tofp(cpudata->pstate.max_pstate -
@@ -451,7 +452,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
        u64 val;
 
        val = pstate << 8;
-       if (limits.no_turbo)
+       if (limits.no_turbo && !limits.turbo_disabled)
                val |= (u64)1 << 32;
 
        wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
@@ -699,9 +700,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
 
        cpu = all_cpu_data[cpunum];
 
-       intel_pstate_get_cpu_pstates(cpu);
-
        cpu->cpu = cpunum;
+       intel_pstate_get_cpu_pstates(cpu);
 
        init_timer_deferrable(&cpu->timer);
        cpu->timer.function = intel_pstate_timer_func;
@@ -744,7 +744,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
                limits.min_perf = int_tofp(1);
                limits.max_perf_pct = 100;
                limits.max_perf = int_tofp(1);
-               limits.no_turbo = 0;
+               limits.no_turbo = limits.turbo_disabled;
                return 0;
        }
        limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -787,6 +787,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu;
        int rc;
+       u64 misc_en;
 
        rc = intel_pstate_init_cpu(policy->cpu);
        if (rc)
@@ -794,8 +795,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 
        cpu = all_cpu_data[policy->cpu];
 
-       if (!limits.no_turbo &&
-               limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
+       rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
+       if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
+               cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
+               limits.turbo_disabled = 1;
+               limits.no_turbo = 1;
+       }
+       if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
                policy->policy = CPUFREQ_POLICY_PERFORMANCE;
        else
                policy->policy = CPUFREQ_POLICY_POWERSAVE;
index 5463767..b5befc2 100644 (file)
@@ -349,7 +349,7 @@ static int __init sa1110_clk_init(void)
                        name = "K4S641632D";
                if (machine_is_h3100())
                        name = "KM416S4030CT";
-               if (machine_is_jornada720())
+               if (machine_is_jornada720() || machine_is_h3600())
                        name = "K4S281632B-1H";
                if (machine_is_nanoengine())
                        name = "MT48LC8M16A2TG-75";
index 28587d0..a5fba02 100644 (file)
@@ -55,7 +55,7 @@ static struct cpuidle_driver armada_370_xp_idle_driver = {
                .power_usage            = 50,
                .target_residency       = 100,
                .flags                  = CPUIDLE_FLAG_TIME_VALID,
-               .name                   = "MV CPU IDLE",
+               .name                   = "Idle",
                .desc                   = "CPU power down",
        },
        .states[2]              = {
@@ -65,7 +65,7 @@ static struct cpuidle_driver armada_370_xp_idle_driver = {
                .target_residency       = 1000,
                .flags                  = CPUIDLE_FLAG_TIME_VALID |
                                                ARMADA_370_XP_FLAG_DEEP_IDLE,
-               .name                   = "MV CPU DEEP IDLE",
+               .name                   = "Deep idle",
                .desc                   = "CPU and L2 Fabric power down",
        },
        .state_count = ARMADA_370_XP_MAX_STATES,
index 02f177a..2fb0fdf 100644 (file)
@@ -391,7 +391,7 @@ config CRYPTO_DEV_ATMEL_SHA
 
 config CRYPTO_DEV_CCP
        bool "Support for AMD Cryptographic Coprocessor"
-       depends on X86 && PCI
+       depends on (X86 && PCI) || ARM64
        default n
        help
          The AMD Cryptographic Coprocessor provides hardware support
@@ -418,4 +418,22 @@ config CRYPTO_DEV_MXS_DCP
          To compile this driver as a module, choose M here: the module
          will be called mxs-dcp.
 
+source "drivers/crypto/qat/Kconfig"
+
+config CRYPTO_DEV_QCE
+       tristate "Qualcomm crypto engine accelerator"
+       depends on (ARCH_QCOM || COMPILE_TEST) && HAS_DMA && HAS_IOMEM
+       select CRYPTO_AES
+       select CRYPTO_DES
+       select CRYPTO_ECB
+       select CRYPTO_CBC
+       select CRYPTO_XTS
+       select CRYPTO_CTR
+       select CRYPTO_ALGAPI
+       select CRYPTO_BLKCIPHER
+       help
+         This driver supports Qualcomm crypto engine accelerator
+         hardware. To compile this driver as a module, choose M here. The
+         module will be called qcrypto.
+
 endif # CRYPTO_HW
index 482f090..3924f93 100644 (file)
@@ -23,3 +23,5 @@ obj-$(CONFIG_CRYPTO_DEV_S5P) += s5p-sss.o
 obj-$(CONFIG_CRYPTO_DEV_SAHARA) += sahara.o
 obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o
 obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
+obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
index 37f9cc9..e4c6c58 100644 (file)
@@ -1292,7 +1292,7 @@ static struct platform_driver crypto4xx_driver = {
                .of_match_table = crypto4xx_match,
        },
        .probe          = crypto4xx_probe,
-       .remove         = crypto4xx_remove,
+       .remove         = __exit_p(crypto4xx_remove),
 };
 
 module_platform_driver(crypto4xx_driver);
index 0618be0..9a4f69e 100644 (file)
@@ -1353,7 +1353,6 @@ static struct crypto_platform_data *atmel_sha_of_init(struct platform_device *pd
                                        GFP_KERNEL);
        if (!pdata->dma_slave) {
                dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
-               devm_kfree(&pdev->dev, pdata);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1375,7 +1374,8 @@ static int atmel_sha_probe(struct platform_device *pdev)
        unsigned long sha_phys_size;
        int err;
 
-       sha_dd = kzalloc(sizeof(struct atmel_sha_dev), GFP_KERNEL);
+       sha_dd = devm_kzalloc(&pdev->dev, sizeof(struct atmel_sha_dev),
+                               GFP_KERNEL);
        if (sha_dd == NULL) {
                dev_err(dev, "unable to alloc data struct.\n");
                err = -ENOMEM;
@@ -1490,8 +1490,6 @@ clk_err:
        free_irq(sha_dd->irq, sha_dd);
 res_err:
        tasklet_kill(&sha_dd->done_task);
-       kfree(sha_dd);
-       sha_dd = NULL;
 sha_dd_err:
        dev_err(dev, "initialization failed.\n");
 
@@ -1523,9 +1521,6 @@ static int atmel_sha_remove(struct platform_device *pdev)
        if (sha_dd->irq >= 0)
                free_irq(sha_dd->irq, sha_dd);
 
-       kfree(sha_dd);
-       sha_dd = NULL;
-
        return 0;
 }
 
index 6cde5b5..d3a9041 100644 (file)
@@ -1337,7 +1337,6 @@ static struct crypto_platform_data *atmel_tdes_of_init(struct platform_device *p
                                        GFP_KERNEL);
        if (!pdata->dma_slave) {
                dev_err(&pdev->dev, "could not allocate memory for dma_slave\n");
-               devm_kfree(&pdev->dev, pdata);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1359,7 +1358,7 @@ static int atmel_tdes_probe(struct platform_device *pdev)
        unsigned long tdes_phys_size;
        int err;
 
-       tdes_dd = kzalloc(sizeof(struct atmel_tdes_dev), GFP_KERNEL);
+       tdes_dd = devm_kmalloc(&pdev->dev, sizeof(*tdes_dd), GFP_KERNEL);
        if (tdes_dd == NULL) {
                dev_err(dev, "unable to alloc data struct.\n");
                err = -ENOMEM;
@@ -1483,8 +1482,6 @@ tdes_irq_err:
 res_err:
        tasklet_kill(&tdes_dd->done_task);
        tasklet_kill(&tdes_dd->queue_task);
-       kfree(tdes_dd);
-       tdes_dd = NULL;
 tdes_dd_err:
        dev_err(dev, "initialization failed.\n");
 
@@ -1519,9 +1516,6 @@ static int atmel_tdes_remove(struct platform_device *pdev)
        if (tdes_dd->irq >= 0)
                free_irq(tdes_dd->irq, tdes_dd);
 
-       kfree(tdes_dd);
-       tdes_dd = NULL;
-
        return 0;
 }
 
index c09ce1f..a80ea85 100644 (file)
@@ -97,6 +97,13 @@ static inline void append_dec_op1(u32 *desc, u32 type)
 {
        u32 *jump_cmd, *uncond_jump_cmd;
 
+       /* DK bit is valid only for AES */
+       if ((type & OP_ALG_ALGSEL_MASK) != OP_ALG_ALGSEL_AES) {
+               append_operation(desc, type | OP_ALG_AS_INITFINAL |
+                                OP_ALG_DECRYPT);
+               return;
+       }
+
        jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
        append_operation(desc, type | OP_ALG_AS_INITFINAL |
                         OP_ALG_DECRYPT);
@@ -786,7 +793,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
        ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc,
                                              desc_bytes(desc),
                                              DMA_TO_DEVICE);
-       if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
+       if (dma_mapping_error(jrdev, ctx->sh_desc_dec_dma)) {
                dev_err(jrdev, "unable to map shared descriptor\n");
                return -ENOMEM;
        }
@@ -1313,8 +1320,13 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
                                         DMA_FROM_DEVICE, dst_chained);
        }
 
-       /* Check if data are contiguous */
        iv_dma = dma_map_single(jrdev, req->iv, ivsize, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, iv_dma)) {
+               dev_err(jrdev, "unable to map IV\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* Check if data are contiguous */
        if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
            iv_dma || src_nents || iv_dma + ivsize !=
            sg_dma_address(req->src)) {
@@ -1345,8 +1357,6 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
                         desc_bytes;
-       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                           sec4_sg_bytes, DMA_TO_DEVICE);
        *all_contig_ptr = all_contig;
 
        sec4_sg_index = 0;
@@ -1369,6 +1379,12 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
                sg_to_sec4_sg_last(req->dst, dst_nents,
                                   edesc->sec4_sg + sec4_sg_index, 0);
        }
+       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               return ERR_PTR(-ENOMEM);
+       }
 
        return edesc;
 }
@@ -1494,8 +1510,13 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
                                         DMA_FROM_DEVICE, dst_chained);
        }
 
-       /* Check if data are contiguous */
        iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, iv_dma)) {
+               dev_err(jrdev, "unable to map IV\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
+       /* Check if data are contiguous */
        if (assoc_nents || sg_dma_address(req->assoc) + req->assoclen !=
            iv_dma || src_nents || iv_dma + ivsize != sg_dma_address(req->src))
                contig &= ~GIV_SRC_CONTIG;
@@ -1534,8 +1555,6 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct aead_edesc) +
                         desc_bytes;
-       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                           sec4_sg_bytes, DMA_TO_DEVICE);
        *contig_ptr = contig;
 
        sec4_sg_index = 0;
@@ -1559,6 +1578,12 @@ static struct aead_edesc *aead_giv_edesc_alloc(struct aead_givcrypt_request
                sg_to_sec4_sg_last(req->dst, dst_nents,
                                   edesc->sec4_sg + sec4_sg_index, 0);
        }
+       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               return ERR_PTR(-ENOMEM);
+       }
 
        return edesc;
 }
@@ -1650,11 +1675,16 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
                                         DMA_FROM_DEVICE, dst_chained);
        }
 
+       iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, iv_dma)) {
+               dev_err(jrdev, "unable to map IV\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
        /*
         * Check if iv can be contiguous with source and destination.
         * If so, include it. If not, create scatterlist.
         */
-       iv_dma = dma_map_single(jrdev, req->info, ivsize, DMA_TO_DEVICE);
        if (!src_nents && iv_dma + ivsize == sg_dma_address(req->src))
                iv_contig = true;
        else
@@ -1693,6 +1723,11 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
 
        edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
                                            sec4_sg_bytes, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               return ERR_PTR(-ENOMEM);
+       }
+
        edesc->iv_dma = iv_dma;
 
 #ifdef DEBUG
@@ -2441,8 +2476,37 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
 
 static int __init caam_algapi_init(void)
 {
+       struct device_node *dev_node;
+       struct platform_device *pdev;
+       struct device *ctrldev;
+       void *priv;
        int i = 0, err = 0;
 
+       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+       if (!dev_node) {
+               dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+               if (!dev_node)
+                       return -ENODEV;
+       }
+
+       pdev = of_find_device_by_node(dev_node);
+       if (!pdev) {
+               of_node_put(dev_node);
+               return -ENODEV;
+       }
+
+       ctrldev = &pdev->dev;
+       priv = dev_get_drvdata(ctrldev);
+       of_node_put(dev_node);
+
+       /*
+        * If priv is NULL, it's probably because the caam driver wasn't
+        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+        */
+       if (!priv)
+               return -ENODEV;
+
+
        INIT_LIST_HEAD(&alg_list);
 
        /* register crypto algorithms the device supports */
index 0d9284e..b464d03 100644 (file)
@@ -137,13 +137,20 @@ struct caam_hash_state {
 /* Common job descriptor seq in/out ptr routines */
 
 /* Map state->caam_ctx, and append seq_out_ptr command that points to it */
-static inline void map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
-                                      struct caam_hash_state *state,
-                                      int ctx_len)
+static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev,
+                                     struct caam_hash_state *state,
+                                     int ctx_len)
 {
        state->ctx_dma = dma_map_single(jrdev, state->caam_ctx,
                                        ctx_len, DMA_FROM_DEVICE);
+       if (dma_mapping_error(jrdev, state->ctx_dma)) {
+               dev_err(jrdev, "unable to map ctx\n");
+               return -ENOMEM;
+       }
+
        append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0);
+
+       return 0;
 }
 
 /* Map req->result, and append seq_out_ptr command that points to it */
@@ -201,14 +208,19 @@ try_buf_map_to_sec4_sg(struct device *jrdev, struct sec4_sg_entry *sec4_sg,
 }
 
 /* Map state->caam_ctx, and add it to link table */
-static inline void ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
-                                     struct caam_hash_state *state,
-                                     int ctx_len,
-                                     struct sec4_sg_entry *sec4_sg,
-                                     u32 flag)
+static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev,
+                                    struct caam_hash_state *state, int ctx_len,
+                                    struct sec4_sg_entry *sec4_sg, u32 flag)
 {
        state->ctx_dma = dma_map_single(jrdev, state->caam_ctx, ctx_len, flag);
+       if (dma_mapping_error(jrdev, state->ctx_dma)) {
+               dev_err(jrdev, "unable to map ctx\n");
+               return -ENOMEM;
+       }
+
        dma_to_sec4_sg_one(sec4_sg, state->ctx_dma, ctx_len, 0);
+
+       return 0;
 }
 
 /* Common shared descriptor commands */
@@ -487,11 +499,11 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
                               digestsize, 1);
 #endif
        }
-       *keylen = digestsize;
-
        dma_unmap_single(jrdev, src_dma, *keylen, DMA_TO_DEVICE);
        dma_unmap_single(jrdev, dst_dma, digestsize, DMA_FROM_DEVICE);
 
+       *keylen = digestsize;
+
        kfree(desc);
 
        return ret;
@@ -706,7 +718,7 @@ static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err,
        if (err)
                caam_jr_strstatus(jrdev, err);
 
-       ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE);
+       ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE);
        kfree(edesc);
 
 #ifdef DEBUG
@@ -741,7 +753,7 @@ static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err,
        if (err)
                caam_jr_strstatus(jrdev, err);
 
-       ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
+       ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE);
        kfree(edesc);
 
 #ifdef DEBUG
@@ -808,12 +820,11 @@ static int ahash_update_ctx(struct ahash_request *req)
                edesc->sec4_sg_bytes = sec4_sg_bytes;
                edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                                 DESC_JOB_IO_LEN;
-               edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                                    sec4_sg_bytes,
-                                                    DMA_TO_DEVICE);
 
-               ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
-                                  edesc->sec4_sg, DMA_BIDIRECTIONAL);
+               ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+                                        edesc->sec4_sg, DMA_BIDIRECTIONAL);
+               if (ret)
+                       return ret;
 
                state->buf_dma = try_buf_map_to_sec4_sg(jrdev,
                                                        edesc->sec4_sg + 1,
@@ -839,6 +850,14 @@ static int ahash_update_ctx(struct ahash_request *req)
                init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
                                     HDR_REVERSE);
 
+               edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                                    sec4_sg_bytes,
+                                                    DMA_TO_DEVICE);
+               if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+                       dev_err(jrdev, "unable to map S/G table\n");
+                       return -ENOMEM;
+               }
+
                append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
                                       to_hash, LDST_SGF);
 
@@ -911,23 +930,34 @@ static int ahash_final_ctx(struct ahash_request *req)
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                         DESC_JOB_IO_LEN;
-       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                           sec4_sg_bytes, DMA_TO_DEVICE);
        edesc->src_nents = 0;
 
-       ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
-                          DMA_TO_DEVICE);
+       ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+                                edesc->sec4_sg, DMA_TO_DEVICE);
+       if (ret)
+               return ret;
 
        state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
                                                buf, state->buf_dma, buflen,
                                                last_buflen);
        (edesc->sec4_sg + sec4_sg_bytes - 1)->len |= SEC4_SG_LEN_FIN;
 
+       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               return -ENOMEM;
+       }
+
        append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen,
                          LDST_SGF);
 
        edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
                                                digestsize);
+       if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+               dev_err(jrdev, "unable to map dst\n");
+               return -ENOMEM;
+       }
 
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -989,11 +1019,11 @@ static int ahash_finup_ctx(struct ahash_request *req)
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                         DESC_JOB_IO_LEN;
-       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                           sec4_sg_bytes, DMA_TO_DEVICE);
 
-       ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, edesc->sec4_sg,
-                          DMA_TO_DEVICE);
+       ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len,
+                                edesc->sec4_sg, DMA_TO_DEVICE);
+       if (ret)
+               return ret;
 
        state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg + 1,
                                                buf, state->buf_dma, buflen,
@@ -1002,11 +1032,22 @@ static int ahash_finup_ctx(struct ahash_request *req)
        src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg +
                           sec4_sg_src_index, chained);
 
+       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               return -ENOMEM;
+       }
+
        append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len +
                               buflen + req->nbytes, LDST_SGF);
 
        edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
                                                digestsize);
+       if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+               dev_err(jrdev, "unable to map dst\n");
+               return -ENOMEM;
+       }
 
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1056,8 +1097,7 @@ static int ahash_digest(struct ahash_request *req)
        }
        edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                          DESC_JOB_IO_LEN;
-       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                           sec4_sg_bytes, DMA_TO_DEVICE);
+       edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->src_nents = src_nents;
        edesc->chained = chained;
 
@@ -1067,6 +1107,12 @@ static int ahash_digest(struct ahash_request *req)
 
        if (src_nents) {
                sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
+               edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+               if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+                       dev_err(jrdev, "unable to map S/G table\n");
+                       return -ENOMEM;
+               }
                src_dma = edesc->sec4_sg_dma;
                options = LDST_SGF;
        } else {
@@ -1077,6 +1123,10 @@ static int ahash_digest(struct ahash_request *req)
 
        edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
                                                digestsize);
+       if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+               dev_err(jrdev, "unable to map dst\n");
+               return -ENOMEM;
+       }
 
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1125,11 +1175,19 @@ static int ahash_final_no_ctx(struct ahash_request *req)
        init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE);
 
        state->buf_dma = dma_map_single(jrdev, buf, buflen, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, state->buf_dma)) {
+               dev_err(jrdev, "unable to map src\n");
+               return -ENOMEM;
+       }
 
        append_seq_in_ptr(desc, state->buf_dma, buflen, 0);
 
        edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
                                                digestsize);
+       if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+               dev_err(jrdev, "unable to map dst\n");
+               return -ENOMEM;
+       }
        edesc->src_nents = 0;
 
 #ifdef DEBUG
@@ -1197,9 +1255,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
                edesc->sec4_sg_bytes = sec4_sg_bytes;
                edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                                 DESC_JOB_IO_LEN;
-               edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                                   sec4_sg_bytes,
-                                                   DMA_TO_DEVICE);
+               edesc->dst_dma = 0;
 
                state->buf_dma = buf_map_to_sec4_sg(jrdev, edesc->sec4_sg,
                                                    buf, *buflen);
@@ -1216,9 +1272,19 @@ static int ahash_update_no_ctx(struct ahash_request *req)
                init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER |
                                     HDR_REVERSE);
 
+               edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                                   sec4_sg_bytes,
+                                                   DMA_TO_DEVICE);
+               if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+                       dev_err(jrdev, "unable to map S/G table\n");
+                       return -ENOMEM;
+               }
+
                append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF);
 
-               map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+               ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+               if (ret)
+                       return ret;
 
 #ifdef DEBUG
                print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1297,8 +1363,6 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
        edesc->sec4_sg_bytes = sec4_sg_bytes;
        edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                         DESC_JOB_IO_LEN;
-       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                           sec4_sg_bytes, DMA_TO_DEVICE);
 
        state->buf_dma = try_buf_map_to_sec4_sg(jrdev, edesc->sec4_sg, buf,
                                                state->buf_dma, buflen,
@@ -1307,11 +1371,22 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
        src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1,
                           chained);
 
+       edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
+                                           sec4_sg_bytes, DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+               dev_err(jrdev, "unable to map S/G table\n");
+               return -ENOMEM;
+       }
+
        append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen +
                               req->nbytes, LDST_SGF);
 
        edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result,
                                                digestsize);
+       if (dma_mapping_error(jrdev, edesc->dst_dma)) {
+               dev_err(jrdev, "unable to map dst\n");
+               return -ENOMEM;
+       }
 
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1380,13 +1455,19 @@ static int ahash_update_first(struct ahash_request *req)
                edesc->sec4_sg_bytes = sec4_sg_bytes;
                edesc->sec4_sg = (void *)edesc + sizeof(struct ahash_edesc) +
                                 DESC_JOB_IO_LEN;
-               edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
-                                                   sec4_sg_bytes,
-                                                   DMA_TO_DEVICE);
+               edesc->dst_dma = 0;
 
                if (src_nents) {
                        sg_to_sec4_sg_last(req->src, src_nents,
                                           edesc->sec4_sg, 0);
+                       edesc->sec4_sg_dma = dma_map_single(jrdev,
+                                                           edesc->sec4_sg,
+                                                           sec4_sg_bytes,
+                                                           DMA_TO_DEVICE);
+                       if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
+                               dev_err(jrdev, "unable to map S/G table\n");
+                               return -ENOMEM;
+                       }
                        src_dma = edesc->sec4_sg_dma;
                        options = LDST_SGF;
                } else {
@@ -1404,7 +1485,9 @@ static int ahash_update_first(struct ahash_request *req)
 
                append_seq_in_ptr(desc, src_dma, to_hash, options);
 
-               map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+               ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len);
+               if (ret)
+                       return ret;
 
 #ifdef DEBUG
                print_hex_dump(KERN_ERR, "jobdesc@"__stringify(__LINE__)": ",
@@ -1453,6 +1536,7 @@ static int ahash_init(struct ahash_request *req)
        state->final = ahash_final_no_ctx;
 
        state->current_buf = 0;
+       state->buf_dma = 0;
 
        return 0;
 }
@@ -1787,8 +1871,36 @@ caam_hash_alloc(struct caam_hash_template *template,
 
 static int __init caam_algapi_hash_init(void)
 {
+       struct device_node *dev_node;
+       struct platform_device *pdev;
+       struct device *ctrldev;
+       void *priv;
        int i = 0, err = 0;
 
+       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+       if (!dev_node) {
+               dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+               if (!dev_node)
+                       return -ENODEV;
+       }
+
+       pdev = of_find_device_by_node(dev_node);
+       if (!pdev) {
+               of_node_put(dev_node);
+               return -ENODEV;
+       }
+
+       ctrldev = &pdev->dev;
+       priv = dev_get_drvdata(ctrldev);
+       of_node_put(dev_node);
+
+       /*
+        * If priv is NULL, it's probably because the caam driver wasn't
+        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+        */
+       if (!priv)
+               return -ENODEV;
+
        INIT_LIST_HEAD(&hash_list);
 
        /* register crypto algorithms the device supports */
index 8c07d31..ae31e55 100644 (file)
@@ -185,7 +185,7 @@ static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait)
                                      max - copied_idx, false);
 }
 
-static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
+static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx)
 {
        struct device *jrdev = ctx->jrdev;
        u32 *desc = ctx->sh_desc;
@@ -203,13 +203,18 @@ static inline void rng_create_sh_desc(struct caam_rng_ctx *ctx)
 
        ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc),
                                          DMA_TO_DEVICE);
+       if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) {
+               dev_err(jrdev, "unable to map shared descriptor\n");
+               return -ENOMEM;
+       }
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
                       desc, desc_bytes(desc), 1);
 #endif
+       return 0;
 }
 
-static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
+static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
 {
        struct device *jrdev = ctx->jrdev;
        struct buf_data *bd = &ctx->bufs[buf_id];
@@ -220,12 +225,17 @@ static inline void rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id)
                             HDR_REVERSE);
 
        bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE);
+       if (dma_mapping_error(jrdev, bd->addr)) {
+               dev_err(jrdev, "unable to map dst\n");
+               return -ENOMEM;
+       }
 
        append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0);
 #ifdef DEBUG
        print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4,
                       desc, desc_bytes(desc), 1);
 #endif
+       return 0;
 }
 
 static void caam_cleanup(struct hwrng *rng)
@@ -242,24 +252,44 @@ static void caam_cleanup(struct hwrng *rng)
        rng_unmap_ctx(rng_ctx);
 }
 
-static void caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
+static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id)
 {
        struct buf_data *bd = &ctx->bufs[buf_id];
+       int err;
+
+       err = rng_create_job_desc(ctx, buf_id);
+       if (err)
+               return err;
 
-       rng_create_job_desc(ctx, buf_id);
        atomic_set(&bd->empty, BUF_EMPTY);
        submit_job(ctx, buf_id == ctx->current_buf);
        wait_for_completion(&bd->filled);
+
+       return 0;
 }
 
-static void caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
+static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev)
 {
+       int err;
+
        ctx->jrdev = jrdev;
-       rng_create_sh_desc(ctx);
+
+       err = rng_create_sh_desc(ctx);
+       if (err)
+               return err;
+
        ctx->current_buf = 0;
        ctx->cur_buf_idx = 0;
-       caam_init_buf(ctx, 0);
-       caam_init_buf(ctx, 1);
+
+       err = caam_init_buf(ctx, 0);
+       if (err)
+               return err;
+
+       err = caam_init_buf(ctx, 1);
+       if (err)
+               return err;
+
+       return 0;
 }
 
 static struct hwrng caam_rng = {
@@ -278,6 +308,35 @@ static void __exit caam_rng_exit(void)
 static int __init caam_rng_init(void)
 {
        struct device *dev;
+       struct device_node *dev_node;
+       struct platform_device *pdev;
+       struct device *ctrldev;
+       void *priv;
+       int err;
+
+       dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
+       if (!dev_node) {
+               dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0");
+               if (!dev_node)
+                       return -ENODEV;
+       }
+
+       pdev = of_find_device_by_node(dev_node);
+       if (!pdev) {
+               of_node_put(dev_node);
+               return -ENODEV;
+       }
+
+       ctrldev = &pdev->dev;
+       priv = dev_get_drvdata(ctrldev);
+       of_node_put(dev_node);
+
+       /*
+        * If priv is NULL, it's probably because the caam driver wasn't
+        * properly initialized (e.g. RNG4 init failed). Thus, bail out here.
+        */
+       if (!priv)
+               return -ENODEV;
 
        dev = caam_jr_alloc();
        if (IS_ERR(dev)) {
@@ -287,7 +346,9 @@ static int __init caam_rng_init(void)
        rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA);
        if (!rng_ctx)
                return -ENOMEM;
-       caam_init_rng(rng_ctx, dev);
+       err = caam_init_rng(rng_ctx, dev);
+       if (err)
+               return err;
 
        dev_info(dev, "registering rng-caam\n");
        return hwrng_register(&caam_rng);
index 1c38f86..3cade79 100644 (file)
@@ -5,6 +5,7 @@
  * Copyright 2008-2012 Freescale Semiconductor, Inc.
  */
 
+#include <linux/device.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
 
@@ -87,6 +88,17 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
 
        /* Set the bit to request direct access to DECO0 */
        topregs = (struct caam_full __iomem *)ctrlpriv->ctrl;
+
+       if (ctrlpriv->virt_en == 1) {
+               setbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
+
+               while (!(rd_reg32(&topregs->ctrl.deco_rsr) & DECORSR_VALID) &&
+                      --timeout)
+                       cpu_relax();
+
+               timeout = 100000;
+       }
+
        setbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
 
        while (!(rd_reg32(&topregs->ctrl.deco_rq) & DECORR_DEN0) &&
@@ -129,6 +141,9 @@ static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
        *status = rd_reg32(&topregs->deco.op_status_hi) &
                  DECO_OP_STATUS_HI_ERR_MASK;
 
+       if (ctrlpriv->virt_en == 1)
+               clrbits32(&topregs->ctrl.deco_rsr, DECORSR_JR0);
+
        /* Mark the DECO as free */
        clrbits32(&topregs->ctrl.deco_rq, DECORR_RQD0ENABLE);
 
@@ -295,9 +310,6 @@ static int caam_remove(struct platform_device *pdev)
        /* Unmap controller region */
        iounmap(&topregs->ctrl);
 
-       kfree(ctrlpriv->jrpdev);
-       kfree(ctrlpriv);
-
        return ret;
 }
 
@@ -380,9 +392,11 @@ static int caam_probe(struct platform_device *pdev)
 #ifdef CONFIG_DEBUG_FS
        struct caam_perfmon *perfmon;
 #endif
-       u64 cha_vid;
+       u32 scfgr, comp_params;
+       u32 cha_vid_ls;
 
-       ctrlpriv = kzalloc(sizeof(struct caam_drv_private), GFP_KERNEL);
+       ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(struct caam_drv_private),
+                               GFP_KERNEL);
        if (!ctrlpriv)
                return -ENOMEM;
 
@@ -413,13 +427,40 @@ static int caam_probe(struct platform_device *pdev)
        setbits32(&topregs->ctrl.mcr, MCFGR_WDENABLE |
                  (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
 
+       /*
+        *  Read the Compile Time paramters and SCFGR to determine
+        * if Virtualization is enabled for this platform
+        */
+       comp_params = rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms);
+       scfgr = rd_reg32(&topregs->ctrl.scfgr);
+
+       ctrlpriv->virt_en = 0;
+       if (comp_params & CTPR_MS_VIRT_EN_INCL) {
+               /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
+                * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
+                */
+               if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
+                   (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
+                      (scfgr & SCFGR_VIRT_EN)))
+                               ctrlpriv->virt_en = 1;
+       } else {
+               /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
+               if (comp_params & CTPR_MS_VIRT_EN_POR)
+                               ctrlpriv->virt_en = 1;
+       }
+
+       if (ctrlpriv->virt_en == 1)
+               setbits32(&topregs->ctrl.jrstart, JRSTART_JR0_START |
+                         JRSTART_JR1_START | JRSTART_JR2_START |
+                         JRSTART_JR3_START);
+
        if (sizeof(dma_addr_t) == sizeof(u64))
                if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
-                       dma_set_mask(dev, DMA_BIT_MASK(40));
+                       dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
                else
-                       dma_set_mask(dev, DMA_BIT_MASK(36));
+                       dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
        else
-               dma_set_mask(dev, DMA_BIT_MASK(32));
+               dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
 
        /*
         * Detect and enable JobRs
@@ -432,8 +473,9 @@ static int caam_probe(struct platform_device *pdev)
                    of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
                        rspec++;
 
-       ctrlpriv->jrpdev = kzalloc(sizeof(struct platform_device *) * rspec,
-                                                               GFP_KERNEL);
+       ctrlpriv->jrpdev = devm_kzalloc(&pdev->dev,
+                                       sizeof(struct platform_device *) * rspec,
+                                       GFP_KERNEL);
        if (ctrlpriv->jrpdev == NULL) {
                iounmap(&topregs->ctrl);
                return -ENOMEM;
@@ -456,8 +498,9 @@ static int caam_probe(struct platform_device *pdev)
                }
 
        /* Check to see if QI present. If so, enable */
-       ctrlpriv->qi_present = !!(rd_reg64(&topregs->ctrl.perfmon.comp_parms) &
-                                 CTPR_QI_MASK);
+       ctrlpriv->qi_present =
+                       !!(rd_reg32(&topregs->ctrl.perfmon.comp_parms_ms) &
+                          CTPR_MS_QI_MASK);
        if (ctrlpriv->qi_present) {
                ctrlpriv->qi = (struct caam_queue_if __force *)&topregs->qi;
                /* This is all that's required to physically enable QI */
@@ -471,13 +514,13 @@ static int caam_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
-       cha_vid = rd_reg64(&topregs->ctrl.perfmon.cha_id);
+       cha_vid_ls = rd_reg32(&topregs->ctrl.perfmon.cha_id_ls);
 
        /*
         * If SEC has RNG version >= 4 and RNG state handle has not been
         * already instantiated, do RNG instantiation
         */
-       if ((cha_vid & CHA_ID_RNG_MASK) >> CHA_ID_RNG_SHIFT >= 4) {
+       if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
                ctrlpriv->rng4_sh_init =
                        rd_reg32(&topregs->ctrl.r4tst[0].rdsta);
                /*
@@ -531,7 +574,8 @@ static int caam_probe(struct platform_device *pdev)
 
        /* NOTE: RTIC detection ought to go here, around Si time */
 
-       caam_id = rd_reg64(&topregs->ctrl.perfmon.caam_id);
+       caam_id = (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ms) << 32 |
+                 (u64)rd_reg32(&topregs->ctrl.perfmon.caam_id_ls);
 
        /* Report "alive" for developer to see */
        dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
@@ -547,7 +591,7 @@ static int caam_probe(struct platform_device *pdev)
         */
        perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
 
-       ctrlpriv->dfs_root = debugfs_create_dir("caam", NULL);
+       ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
        ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
 
        /* Controller-level - performance monitor counters */
index 7e4500f..d397ff9 100644 (file)
@@ -321,7 +321,6 @@ struct sec4_sg_entry {
 /* Continue - Not the last FIFO store to come */
 #define FIFOST_CONT_SHIFT      23
 #define FIFOST_CONT_MASK       (1 << FIFOST_CONT_SHIFT)
-#define FIFOST_CONT_MASK       (1 << FIFOST_CONT_SHIFT)
 
 /*
  * Extended Length - use 32-bit extended length that
index 6d85fcc..97363db 100644 (file)
@@ -82,6 +82,7 @@ struct caam_drv_private {
        u8 total_jobrs;         /* Total Job Rings in device */
        u8 qi_present;          /* Nonzero if QI present in device */
        int secvio_irq;         /* Security violation interrupt number */
+       int virt_en;            /* Virtualization enabled in CAAM */
 
 #define        RNG4_MAX_HANDLES 2
        /* RNG4 block */
index 1d80bd3..4d18e27 100644 (file)
@@ -453,8 +453,8 @@ static int caam_jr_probe(struct platform_device *pdev)
        int error;
 
        jrdev = &pdev->dev;
-       jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
-                        GFP_KERNEL);
+       jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
+                             GFP_KERNEL);
        if (!jrpriv)
                return -ENOMEM;
 
@@ -476,21 +476,19 @@ static int caam_jr_probe(struct platform_device *pdev)
 
        if (sizeof(dma_addr_t) == sizeof(u64))
                if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring"))
-                       dma_set_mask(jrdev, DMA_BIT_MASK(40));
+                       dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(40));
                else
-                       dma_set_mask(jrdev, DMA_BIT_MASK(36));
+                       dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(36));
        else
-               dma_set_mask(jrdev, DMA_BIT_MASK(32));
+               dma_set_mask_and_coherent(jrdev, DMA_BIT_MASK(32));
 
        /* Identify the interrupt */
        jrpriv->irq = irq_of_parse_and_map(nprop, 0);
 
        /* Now do the platform independent part */
        error = caam_jr_init(jrdev); /* now turn on hardware */
-       if (error) {
-               kfree(jrpriv);
+       if (error)
                return error;
-       }
 
        jrpriv->dev = jrdev;
        spin_lock(&driver_data.jr_alloc_lock);
index cbde8b9..f48e344 100644 (file)
@@ -84,6 +84,7 @@
 #endif
 
 #ifndef CONFIG_64BIT
+#ifdef __BIG_ENDIAN
 static inline void wr_reg64(u64 __iomem *reg, u64 data)
 {
        wr_reg32((u32 __iomem *)reg, (data & 0xffffffff00000000ull) >> 32);
@@ -95,6 +96,21 @@ static inline u64 rd_reg64(u64 __iomem *reg)
        return (((u64)rd_reg32((u32 __iomem *)reg)) << 32) |
                ((u64)rd_reg32((u32 __iomem *)reg + 1));
 }
+#else
+#ifdef __LITTLE_ENDIAN
+static inline void wr_reg64(u64 __iomem *reg, u64 data)
+{
+       wr_reg32((u32 __iomem *)reg + 1, (data & 0xffffffff00000000ull) >> 32);
+       wr_reg32((u32 __iomem *)reg, data & 0x00000000ffffffffull);
+}
+
+static inline u64 rd_reg64(u64 __iomem *reg)
+{
+       return (((u64)rd_reg32((u32 __iomem *)reg + 1)) << 32) |
+               ((u64)rd_reg32((u32 __iomem *)reg));
+}
+#endif
+#endif
 #endif
 
 /*
@@ -114,45 +130,45 @@ struct jr_outentry {
  */
 
 /* Number of DECOs */
-#define CHA_NUM_DECONUM_SHIFT  56
-#define CHA_NUM_DECONUM_MASK   (0xfull << CHA_NUM_DECONUM_SHIFT)
+#define CHA_NUM_MS_DECONUM_SHIFT       24
+#define CHA_NUM_MS_DECONUM_MASK        (0xfull << CHA_NUM_MS_DECONUM_SHIFT)
 
 /* CHA Version IDs */
-#define CHA_ID_AES_SHIFT       0
-#define CHA_ID_AES_MASK                (0xfull << CHA_ID_AES_SHIFT)
+#define CHA_ID_LS_AES_SHIFT    0
+#define CHA_ID_LS_AES_MASK             (0xfull << CHA_ID_LS_AES_SHIFT)
 
-#define CHA_ID_DES_SHIFT       4
-#define CHA_ID_DES_MASK                (0xfull << CHA_ID_DES_SHIFT)
+#define CHA_ID_LS_DES_SHIFT    4
+#define CHA_ID_LS_DES_MASK             (0xfull << CHA_ID_LS_DES_SHIFT)
 
-#define CHA_ID_ARC4_SHIFT      8
-#define CHA_ID_ARC4_MASK       (0xfull << CHA_ID_ARC4_SHIFT)
+#define CHA_ID_LS_ARC4_SHIFT   8
+#define CHA_ID_LS_ARC4_MASK    (0xfull << CHA_ID_LS_ARC4_SHIFT)
 
-#define CHA_ID_MD_SHIFT                12
-#define CHA_ID_MD_MASK         (0xfull << CHA_ID_MD_SHIFT)
+#define CHA_ID_LS_MD_SHIFT     12
+#define CHA_ID_LS_MD_MASK      (0xfull << CHA_ID_LS_MD_SHIFT)
 
-#define CHA_ID_RNG_SHIFT       16
-#define CHA_ID_RNG_MASK                (0xfull << CHA_ID_RNG_SHIFT)
+#define CHA_ID_LS_RNG_SHIFT    16
+#define CHA_ID_LS_RNG_MASK     (0xfull << CHA_ID_LS_RNG_SHIFT)
 
-#define CHA_ID_SNW8_SHIFT      20
-#define CHA_ID_SNW8_MASK       (0xfull << CHA_ID_SNW8_SHIFT)
+#define CHA_ID_LS_SNW8_SHIFT   20
+#define CHA_ID_LS_SNW8_MASK    (0xfull << CHA_ID_LS_SNW8_SHIFT)
 
-#define CHA_ID_KAS_SHIFT       24
-#define CHA_ID_KAS_MASK                (0xfull << CHA_ID_KAS_SHIFT)
+#define CHA_ID_LS_KAS_SHIFT    24
+#define CHA_ID_LS_KAS_MASK     (0xfull << CHA_ID_LS_KAS_SHIFT)
 
-#define CHA_ID_PK_SHIFT                28
-#define CHA_ID_PK_MASK         (0xfull << CHA_ID_PK_SHIFT)
+#define CHA_ID_LS_PK_SHIFT     28
+#define CHA_ID_LS_PK_MASK      (0xfull << CHA_ID_LS_PK_SHIFT)
 
-#define CHA_ID_CRC_SHIFT       32
-#define CHA_ID_CRC_MASK                (0xfull << CHA_ID_CRC_SHIFT)
+#define CHA_ID_MS_CRC_SHIFT    0
+#define CHA_ID_MS_CRC_MASK     (0xfull << CHA_ID_MS_CRC_SHIFT)
 
-#define CHA_ID_SNW9_SHIFT      36
-#define CHA_ID_SNW9_MASK       (0xfull << CHA_ID_SNW9_SHIFT)
+#define CHA_ID_MS_SNW9_SHIFT   4
+#define CHA_ID_MS_SNW9_MASK    (0xfull << CHA_ID_MS_SNW9_SHIFT)
 
-#define CHA_ID_DECO_SHIFT      56
-#define CHA_ID_DECO_MASK       (0xfull << CHA_ID_DECO_SHIFT)
+#define CHA_ID_MS_DECO_SHIFT   24
+#define CHA_ID_MS_DECO_MASK    (0xfull << CHA_ID_MS_DECO_SHIFT)
 
-#define CHA_ID_JR_SHIFT                60
-#define CHA_ID_JR_MASK         (0xfull << CHA_ID_JR_SHIFT)
+#define CHA_ID_MS_JR_SHIFT     28
+#define CHA_ID_MS_JR_MASK      (0xfull << CHA_ID_MS_JR_SHIFT)
 
 struct sec_vid {
        u16 ip_id;
@@ -172,10 +188,14 @@ struct caam_perfmon {
        u64 rsvd[13];
 
        /* CAAM Hardware Instantiation Parameters               fa0-fbf */
-       u64 cha_rev;            /* CRNR - CHA Revision Number           */
-#define CTPR_QI_SHIFT          57
-#define CTPR_QI_MASK           (0x1ull << CTPR_QI_SHIFT)
-       u64 comp_parms; /* CTPR - Compile Parameters Register   */
+       u32 cha_rev_ms;         /* CRNR - CHA Rev No. Most significant half*/
+       u32 cha_rev_ls;         /* CRNR - CHA Rev No. Least significant half*/
+#define CTPR_MS_QI_SHIFT       25
+#define CTPR_MS_QI_MASK                (0x1ull << CTPR_MS_QI_SHIFT)
+#define CTPR_MS_VIRT_EN_INCL   0x00000001
+#define CTPR_MS_VIRT_EN_POR    0x00000002
+       u32 comp_parms_ms;      /* CTPR - Compile Parameters Register   */
+       u32 comp_parms_ls;      /* CTPR - Compile Parameters Register   */
        u64 rsvd1[2];
 
        /* CAAM Global Status                                   fc0-fdf */
@@ -189,9 +209,12 @@ struct caam_perfmon {
        /* Component Instantiation Parameters                   fe0-fff */
        u32 rtic_id;            /* RVID - RTIC Version ID       */
        u32 ccb_id;             /* CCBVID - CCB Version ID      */
-       u64 cha_id;             /* CHAVID - CHA Version ID      */
-       u64 cha_num;            /* CHANUM - CHA Number          */
-       u64 caam_id;            /* CAAMVID - CAAM Version ID    */
+       u32 cha_id_ms;          /* CHAVID - CHA Version ID Most Significant*/
+       u32 cha_id_ls;          /* CHAVID - CHA Version ID Least Significant*/
+       u32 cha_num_ms;         /* CHANUM - CHA Number Most Significant */
+       u32 cha_num_ls;         /* CHANUM - CHA Number Least Significant*/
+       u32 caam_id_ms;         /* CAAMVID - CAAM Version ID MS */
+       u32 caam_id_ls;         /* CAAMVID - CAAM Version ID LS */
 };
 
 /* LIODN programming for DMA configuration */
@@ -304,9 +327,12 @@ struct caam_ctrl {
        /* Bus Access Configuration Section                     010-11f */
        /* Read/Writable                                                */
        struct masterid jr_mid[4];      /* JRxLIODNR - JobR LIODN setup */
-       u32 rsvd3[12];
+       u32 rsvd3[11];
+       u32 jrstart;                    /* JRSTART - Job Ring Start Register */
        struct masterid rtic_mid[4];    /* RTICxLIODNR - RTIC LIODN setup */
-       u32 rsvd4[7];
+       u32 rsvd4[5];
+       u32 deco_rsr;                   /* DECORSR - Deco Request Source */
+       u32 rsvd11;
        u32 deco_rq;                    /* DECORR - DECO Request */
        struct partid deco_mid[5];      /* DECOxLIODNR - 1 per DECO */
        u32 rsvd5[22];
@@ -347,7 +373,10 @@ struct caam_ctrl {
 #define MCFGR_DMA_RESET                0x10000000
 #define MCFGR_LONG_PTR         0x00010000 /* Use >32-bit desc addressing */
 #define SCFGR_RDBENABLE                0x00000400
+#define SCFGR_VIRT_EN          0x00008000
 #define DECORR_RQD0ENABLE      0x00000001 /* Enable DECO0 for direct access */
+#define DECORSR_JR0            0x00000001 /* JR to supply TZ, SDID, ICID */
+#define DECORSR_VALID          0x80000000
 #define DECORR_DEN0            0x00010000 /* DECO0 available for access*/
 
 /* AXI read cache control */
@@ -365,6 +394,12 @@ struct caam_ctrl {
 #define MCFGR_AXIPRI           0x00000008 /* Assert AXI priority sideband */
 #define MCFGR_BURST_64         0x00000001 /* Max burst size */
 
+/* JRSTART register offsets */
+#define JRSTART_JR0_START       0x00000001 /* Start Job ring 0 */
+#define JRSTART_JR1_START       0x00000002 /* Start Job ring 1 */
+#define JRSTART_JR2_START       0x00000004 /* Start Job ring 2 */
+#define JRSTART_JR3_START       0x00000008 /* Start Job ring 3 */
+
 /*
  * caam_job_ring - direct job ring setup
  * 1-4 possible per instantiation, base + 1000/2000/3000/4000
index d3505a0..7f592d8 100644 (file)
@@ -1,6 +1,11 @@
 obj-$(CONFIG_CRYPTO_DEV_CCP_DD) += ccp.o
 ccp-objs := ccp-dev.o ccp-ops.o
+ifdef CONFIG_X86
 ccp-objs += ccp-pci.o
+endif
+ifdef CONFIG_ARM64
+ccp-objs += ccp-platform.o
+endif
 
 obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o
 ccp-crypto-objs := ccp-crypto-main.o \
index 2c78161..a7d1106 100644 (file)
@@ -20,7 +20,9 @@
 #include <linux/delay.h>
 #include <linux/hw_random.h>
 #include <linux/cpu.h>
+#ifdef CONFIG_X86
 #include <asm/cpu_device_id.h>
+#endif
 #include <linux/ccp.h>
 
 #include "ccp-dev.h"
@@ -360,6 +362,12 @@ int ccp_init(struct ccp_device *ccp)
                /* Build queue interrupt mask (two interrupts per queue) */
                qim |= cmd_q->int_ok | cmd_q->int_err;
 
+#ifdef CONFIG_ARM64
+               /* For arm64 set the recommended queue cache settings */
+               iowrite32(ccp->axcache, ccp->io_regs + CMD_Q_CACHE_BASE +
+                         (CMD_Q_CACHE_INC * i));
+#endif
+
                dev_dbg(dev, "queue #%u available\n", i);
        }
        if (ccp->cmd_q_count == 0) {
@@ -558,12 +566,15 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
 }
 #endif
 
+#ifdef CONFIG_X86
 static const struct x86_cpu_id ccp_support[] = {
        { X86_VENDOR_AMD, 22, },
 };
+#endif
 
 static int __init ccp_mod_init(void)
 {
+#ifdef CONFIG_X86
        struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
        int ret;
 
@@ -589,12 +600,30 @@ static int __init ccp_mod_init(void)
 
                break;
        }
+#endif
+
+#ifdef CONFIG_ARM64
+       int ret;
+
+       ret = ccp_platform_init();
+       if (ret)
+               return ret;
+
+       /* Don't leave the driver loaded if init failed */
+       if (!ccp_get_device()) {
+               ccp_platform_exit();
+               return -ENODEV;
+       }
+
+       return 0;
+#endif
 
        return -ENODEV;
 }
 
 static void __exit ccp_mod_exit(void)
 {
+#ifdef CONFIG_X86
        struct cpuinfo_x86 *cpuinfo = &boot_cpu_data;
 
        switch (cpuinfo->x86) {
@@ -602,6 +631,11 @@ static void __exit ccp_mod_exit(void)
                ccp_pci_exit();
                break;
        }
+#endif
+
+#ifdef CONFIG_ARM64
+       ccp_platform_exit();
+#endif
 }
 
 module_init(ccp_mod_init);
index 7ec536e..62ff35a 100644 (file)
@@ -23,8 +23,6 @@
 #include <linux/hw_random.h>
 
 
-#define IO_OFFSET                      0x20000
-
 #define MAX_DMAPOOL_NAME_LEN           32
 
 #define MAX_HW_QUEUES                  5
@@ -32,6 +30,9 @@
 
 #define TRNG_RETRIES                   10
 
+#define CACHE_NONE                     0x00
+#define CACHE_WB_NO_ALLOC              0xb7
+
 
 /****** Register Mappings ******/
 #define Q_MASK_REG                     0x000
@@ -50,7 +51,7 @@
 #define CMD_Q_INT_STATUS_BASE          0x214
 #define CMD_Q_STATUS_INCR              0x20
 
-#define CMD_Q_CACHE                    0x228
+#define CMD_Q_CACHE_BASE               0x228
 #define CMD_Q_CACHE_INC                        0x20
 
 #define CMD_Q_ERROR(__qs)              ((__qs) & 0x0000003f);
@@ -194,6 +195,7 @@ struct ccp_device {
        void *dev_specific;
        int (*get_irq)(struct ccp_device *ccp);
        void (*free_irq)(struct ccp_device *ccp);
+       unsigned int irq;
 
        /*
         * I/O area used for device communication. The register mapping
@@ -254,12 +256,18 @@ struct ccp_device {
        /* Suspend support */
        unsigned int suspending;
        wait_queue_head_t suspend_queue;
+
+       /* DMA caching attribute support */
+       unsigned int axcache;
 };
 
 
 int ccp_pci_init(void);
 void ccp_pci_exit(void);
 
+int ccp_platform_init(void);
+void ccp_platform_exit(void);
+
 struct ccp_device *ccp_alloc_struct(struct device *dev);
 int ccp_init(struct ccp_device *ccp);
 void ccp_destroy(struct ccp_device *ccp);
index 9ae006d..8729364 100644 (file)
@@ -1606,7 +1606,7 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
                goto e_ksb;
 
        ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len, CCP_KSB_BYTES,
-                               true);
+                               false);
        ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
                              CCP_PASSTHRU_BYTESWAP_NOOP);
        if (ret) {
@@ -1623,10 +1623,10 @@ static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
                goto e_exp;
 
        ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len, CCP_KSB_BYTES,
-                               true);
+                               false);
        src.address += o_len;   /* Adjust the address for the copy operation */
        ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len, CCP_KSB_BYTES,
-                               true);
+                               false);
        src.address -= o_len;   /* Reset the address to original value */
 
        /* Prepare the output area for the operation */
@@ -1841,20 +1841,20 @@ static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 
        /* Copy the ECC modulus */
        ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
-                               CCP_ECC_OPERAND_SIZE, true);
+                               CCP_ECC_OPERAND_SIZE, false);
        src.address += CCP_ECC_OPERAND_SIZE;
 
        /* Copy the first operand */
        ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
                                ecc->u.mm.operand_1_len,
-                               CCP_ECC_OPERAND_SIZE, true);
+                               CCP_ECC_OPERAND_SIZE, false);
        src.address += CCP_ECC_OPERAND_SIZE;
 
        if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
                /* Copy the second operand */
                ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
                                        ecc->u.mm.operand_2_len,
-                                       CCP_ECC_OPERAND_SIZE, true);
+                                       CCP_ECC_OPERAND_SIZE, false);
                src.address += CCP_ECC_OPERAND_SIZE;
        }
 
@@ -1960,17 +1960,17 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
 
        /* Copy the ECC modulus */
        ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
-                               CCP_ECC_OPERAND_SIZE, true);
+                               CCP_ECC_OPERAND_SIZE, false);
        src.address += CCP_ECC_OPERAND_SIZE;
 
        /* Copy the first point X and Y coordinate */
        ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
                                ecc->u.pm.point_1.x_len,
-                               CCP_ECC_OPERAND_SIZE, true);
+                               CCP_ECC_OPERAND_SIZE, false);
        src.address += CCP_ECC_OPERAND_SIZE;
        ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
                                ecc->u.pm.point_1.y_len,
-                               CCP_ECC_OPERAND_SIZE, true);
+                               CCP_ECC_OPERAND_SIZE, false);
        src.address += CCP_ECC_OPERAND_SIZE;
 
        /* Set the first point Z coordianate to 1 */
@@ -1981,11 +1981,11 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
                /* Copy the second point X and Y coordinate */
                ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
                                        ecc->u.pm.point_2.x_len,
-                                       CCP_ECC_OPERAND_SIZE, true);
+                                       CCP_ECC_OPERAND_SIZE, false);
                src.address += CCP_ECC_OPERAND_SIZE;
                ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
                                        ecc->u.pm.point_2.y_len,
-                                       CCP_ECC_OPERAND_SIZE, true);
+                                       CCP_ECC_OPERAND_SIZE, false);
                src.address += CCP_ECC_OPERAND_SIZE;
 
                /* Set the second point Z coordianate to 1 */
@@ -1995,14 +1995,14 @@ static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
                /* Copy the Domain "a" parameter */
                ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
                                        ecc->u.pm.domain_a_len,
-                                       CCP_ECC_OPERAND_SIZE, true);
+                                       CCP_ECC_OPERAND_SIZE, false);
                src.address += CCP_ECC_OPERAND_SIZE;
 
                if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
                        /* Copy the scalar value */
                        ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
                                                ecc->u.pm.scalar_len,
-                                               CCP_ECC_OPERAND_SIZE, true);
+                                               CCP_ECC_OPERAND_SIZE, false);
                        src.address += CCP_ECC_OPERAND_SIZE;
                }
        }
index 0d74623..180cc87 100644 (file)
 
 #include <linux/module.h>
 #include <linux/kernel.h>
+#include <linux/device.h>
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
+#include <linux/dma-mapping.h>
 #include <linux/kthread.h>
 #include <linux/sched.h>
 #include <linux/interrupt.h>
@@ -24,6 +26,8 @@
 #include "ccp-dev.h"
 
 #define IO_BAR                         2
+#define IO_OFFSET                      0x20000
+
 #define MSIX_VECTORS                   2
 
 struct ccp_msix {
@@ -89,7 +93,8 @@ static int ccp_get_msi_irq(struct ccp_device *ccp)
        if (ret)
                return ret;
 
-       ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev);
+       ccp->irq = pdev->irq;
+       ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
        if (ret) {
                dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
                goto e_msi;
@@ -136,7 +141,7 @@ static void ccp_free_irqs(struct ccp_device *ccp)
                                 dev);
                pci_disable_msix(pdev);
        } else {
-               free_irq(pdev->irq, dev);
+               free_irq(ccp->irq, dev);
                pci_disable_msi(pdev);
        }
 }
@@ -147,21 +152,12 @@ static int ccp_find_mmio_area(struct ccp_device *ccp)
        struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
        resource_size_t io_len;
        unsigned long io_flags;
-       int bar;
 
        io_flags = pci_resource_flags(pdev, IO_BAR);
        io_len = pci_resource_len(pdev, IO_BAR);
        if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
                return IO_BAR;
 
-       for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) {
-               io_flags = pci_resource_flags(pdev, bar);
-               io_len = pci_resource_len(pdev, bar);
-               if ((io_flags & IORESOURCE_MEM) &&
-                   (io_len >= (IO_OFFSET + 0x800)))
-                       return bar;
-       }
-
        return -EIO;
 }
 
@@ -214,20 +210,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        }
        ccp->io_regs = ccp->io_map + IO_OFFSET;
 
-       ret = dma_set_mask(dev, DMA_BIT_MASK(48));
-       if (ret == 0) {
-               ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48));
+       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+       if (ret) {
+               ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
                if (ret) {
-                       dev_err(dev,
-                               "pci_set_consistent_dma_mask failed (%d)\n",
+                       dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n",
                                ret);
-                       goto e_bar0;
-               }
-       } else {
-               ret = dma_set_mask(dev, DMA_BIT_MASK(32));
-               if (ret) {
-                       dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret);
-                       goto e_bar0;
+                       goto e_iomap;
                }
        }
 
@@ -235,13 +224,13 @@ static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        ret = ccp_init(ccp);
        if (ret)
-               goto e_bar0;
+               goto e_iomap;
 
        dev_notice(dev, "enabled\n");
 
        return 0;
 
-e_bar0:
+e_iomap:
        pci_iounmap(pdev, ccp->io_map);
 
 e_device:
diff --git a/drivers/crypto/ccp/ccp-platform.c b/drivers/crypto/ccp/ccp-platform.c
new file mode 100644 (file)
index 0000000..b0a2806
--- /dev/null
@@ -0,0 +1,230 @@
+/*
+ * AMD Cryptographic Coprocessor (CCP) driver
+ *
+ * Copyright (C) 2014 Advanced Micro Devices, Inc.
+ *
+ * Author: Tom Lendacky <thomas.lendacky@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ioport.h>
+#include <linux/dma-mapping.h>
+#include <linux/kthread.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/ccp.h>
+#include <linux/of.h>
+
+#include "ccp-dev.h"
+
+
+static int ccp_get_irq(struct ccp_device *ccp)
+{
+       struct device *dev = ccp->dev;
+       struct platform_device *pdev = container_of(dev,
+                                       struct platform_device, dev);
+       int ret;
+
+       ret = platform_get_irq(pdev, 0);
+       if (ret < 0)
+               return ret;
+
+       ccp->irq = ret;
+       ret = request_irq(ccp->irq, ccp_irq_handler, 0, "ccp", dev);
+       if (ret) {
+               dev_notice(dev, "unable to allocate IRQ (%d)\n", ret);
+               return ret;
+       }
+
+       return 0;
+}
+
+static int ccp_get_irqs(struct ccp_device *ccp)
+{
+       struct device *dev = ccp->dev;
+       int ret;
+
+       ret = ccp_get_irq(ccp);
+       if (!ret)
+               return 0;
+
+       /* Couldn't get an interrupt */
+       dev_notice(dev, "could not enable interrupts (%d)\n", ret);
+
+       return ret;
+}
+
+static void ccp_free_irqs(struct ccp_device *ccp)
+{
+       struct device *dev = ccp->dev;
+
+       free_irq(ccp->irq, dev);
+}
+
+static struct resource *ccp_find_mmio_area(struct ccp_device *ccp)
+{
+       struct device *dev = ccp->dev;
+       struct platform_device *pdev = container_of(dev,
+                                       struct platform_device, dev);
+       struct resource *ior;
+
+       ior = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       if (ior && (resource_size(ior) >= 0x800))
+               return ior;
+
+       return NULL;
+}
+
+static int ccp_platform_probe(struct platform_device *pdev)
+{
+       struct ccp_device *ccp;
+       struct device *dev = &pdev->dev;
+       struct resource *ior;
+       int ret;
+
+       ret = -ENOMEM;
+       ccp = ccp_alloc_struct(dev);
+       if (!ccp)
+               goto e_err;
+
+       ccp->dev_specific = NULL;
+       ccp->get_irq = ccp_get_irqs;
+       ccp->free_irq = ccp_free_irqs;
+
+       ior = ccp_find_mmio_area(ccp);
+       ccp->io_map = devm_ioremap_resource(dev, ior);
+       if (IS_ERR(ccp->io_map)) {
+               ret = PTR_ERR(ccp->io_map);
+               goto e_free;
+       }
+       ccp->io_regs = ccp->io_map;
+
+       if (!dev->dma_mask)
+               dev->dma_mask = &dev->coherent_dma_mask;
+       *(dev->dma_mask) = DMA_BIT_MASK(48);
+       dev->coherent_dma_mask = DMA_BIT_MASK(48);
+
+       if (of_property_read_bool(dev->of_node, "dma-coherent"))
+               ccp->axcache = CACHE_WB_NO_ALLOC;
+       else
+               ccp->axcache = CACHE_NONE;
+
+       dev_set_drvdata(dev, ccp);
+
+       ret = ccp_init(ccp);
+       if (ret)
+               goto e_free;
+
+       dev_notice(dev, "enabled\n");
+
+       return 0;
+
+e_free:
+       kfree(ccp);
+
+e_err:
+       dev_notice(dev, "initialization failed\n");
+       return ret;
+}
+
+static int ccp_platform_remove(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ccp_device *ccp = dev_get_drvdata(dev);
+
+       ccp_destroy(ccp);
+
+       kfree(ccp);
+
+       dev_notice(dev, "disabled\n");
+
+       return 0;
+}
+
+#ifdef CONFIG_PM
+static int ccp_platform_suspend(struct platform_device *pdev,
+                               pm_message_t state)
+{
+       struct device *dev = &pdev->dev;
+       struct ccp_device *ccp = dev_get_drvdata(dev);
+       unsigned long flags;
+       unsigned int i;
+
+       spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+       ccp->suspending = 1;
+
+       /* Wake all the queue kthreads to prepare for suspend */
+       for (i = 0; i < ccp->cmd_q_count; i++)
+               wake_up_process(ccp->cmd_q[i].kthread);
+
+       spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+       /* Wait for all queue kthreads to say they're done */
+       while (!ccp_queues_suspended(ccp))
+               wait_event_interruptible(ccp->suspend_queue,
+                                        ccp_queues_suspended(ccp));
+
+       return 0;
+}
+
+static int ccp_platform_resume(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct ccp_device *ccp = dev_get_drvdata(dev);
+       unsigned long flags;
+       unsigned int i;
+
+       spin_lock_irqsave(&ccp->cmd_lock, flags);
+
+       ccp->suspending = 0;
+
+       /* Wake up all the kthreads */
+       for (i = 0; i < ccp->cmd_q_count; i++) {
+               ccp->cmd_q[i].suspended = 0;
+               wake_up_process(ccp->cmd_q[i].kthread);
+       }
+
+       spin_unlock_irqrestore(&ccp->cmd_lock, flags);
+
+       return 0;
+}
+#endif
+
+static const struct of_device_id ccp_platform_ids[] = {
+       { .compatible = "amd,ccp-seattle-v1a" },
+       { },
+};
+
+static struct platform_driver ccp_platform_driver = {
+       .driver = {
+               .name = "AMD Cryptographic Coprocessor",
+               .owner = THIS_MODULE,
+               .of_match_table = ccp_platform_ids,
+       },
+       .probe = ccp_platform_probe,
+       .remove = ccp_platform_remove,
+#ifdef CONFIG_PM
+       .suspend = ccp_platform_suspend,
+       .resume = ccp_platform_resume,
+#endif
+};
+
+int ccp_platform_init(void)
+{
+       return platform_driver_register(&ccp_platform_driver);
+}
+
+void ccp_platform_exit(void)
+{
+       platform_driver_unregister(&ccp_platform_driver);
+}
index 502edf0..544f6d3 100644 (file)
@@ -1247,7 +1247,7 @@ static struct vio_device_id nx842_driver_ids[] = {
 static struct vio_driver nx842_driver = {
        .name = MODULE_NAME,
        .probe = nx842_probe,
-       .remove = nx842_remove,
+       .remove = __exit_p(nx842_remove),
        .get_desired_dma = nx842_get_desired_dma,
        .id_table = nx842_driver_ids,
 };
diff --git a/drivers/crypto/qat/Kconfig b/drivers/crypto/qat/Kconfig
new file mode 100644 (file)
index 0000000..49bede2
--- /dev/null
@@ -0,0 +1,23 @@
+config CRYPTO_DEV_QAT
+       tristate
+       select CRYPTO_AEAD
+       select CRYPTO_AUTHENC
+       select CRYPTO_ALGAPI
+       select CRYPTO_AES
+       select CRYPTO_CBC
+       select CRYPTO_SHA1
+       select CRYPTO_SHA256
+       select CRYPTO_SHA512
+       select FW_LOADER
+
+config CRYPTO_DEV_QAT_DH895xCC
+       tristate "Support for Intel(R) DH895xCC"
+       depends on X86 && PCI
+       default n
+       select CRYPTO_DEV_QAT
+       help
+         Support for Intel(R) DH895xcc with Intel(R) QuickAssist Technology
+         for accelerating crypto and compression workloads.
+
+         To compile this as a module, choose M here: the module
+         will be called qat_dh895xcc.
diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile
new file mode 100644 (file)
index 0000000..d11481b
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_CRYPTO_DEV_QAT) += qat_common/
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/
diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile
new file mode 100644 (file)
index 0000000..e0424dc
--- /dev/null
@@ -0,0 +1,14 @@
+obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o
+intel_qat-objs := adf_cfg.o \
+       adf_ctl_drv.o \
+       adf_dev_mgr.o \
+       adf_init.o \
+       adf_accel_engine.o \
+       adf_aer.o \
+       adf_transport.o \
+       qat_crypto.o \
+       qat_algs.o \
+       qat_uclo.o \
+       qat_hal.o
+
+intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
new file mode 100644 (file)
index 0000000..9282381
--- /dev/null
@@ -0,0 +1,205 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_ACCEL_DEVICES_H_
+#define ADF_ACCEL_DEVICES_H_
+#include <linux/module.h>
+#include <linux/atomic.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/io.h>
+#include "adf_cfg_common.h"
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define ADF_DH895XCC_DEVICE_NAME "dh895xcc"
+#define ADF_DH895XCC_PCI_DEVICE_ID 0x435
+#define ADF_DH895XCC_PMISC_BAR 1
+#define ADF_DH895XCC_ETR_BAR 2
+#define ADF_PCI_MAX_BARS 3
+#define ADF_DEVICE_NAME_LENGTH 32
+#define ADF_ETR_MAX_RINGS_PER_BANK 16
+#define ADF_MAX_MSIX_VECTOR_NAME 16
+#define ADF_DEVICE_NAME_PREFIX "qat_"
+
+enum adf_accel_capabilities {
+       ADF_ACCEL_CAPABILITIES_NULL = 0,
+       ADF_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC = 1,
+       ADF_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC = 2,
+       ADF_ACCEL_CAPABILITIES_CIPHER = 4,
+       ADF_ACCEL_CAPABILITIES_AUTHENTICATION = 8,
+       ADF_ACCEL_CAPABILITIES_COMPRESSION = 32,
+       ADF_ACCEL_CAPABILITIES_LZS_COMPRESSION = 64,
+       ADF_ACCEL_CAPABILITIES_RANDOM_NUMBER = 128
+};
+
+struct adf_bar {
+       resource_size_t base_addr;
+       void __iomem *virt_addr;
+       resource_size_t size;
+} __packed;
+
+struct adf_accel_msix {
+       struct msix_entry *entries;
+       char **names;
+} __packed;
+
+struct adf_accel_pci {
+       struct pci_dev *pci_dev;
+       struct adf_accel_msix msix_entries;
+       struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
+       uint8_t revid;
+       uint8_t sku;
+} __packed;
+
+enum dev_state {
+       DEV_DOWN = 0,
+       DEV_UP
+};
+
+enum dev_sku_info {
+       DEV_SKU_1 = 0,
+       DEV_SKU_2,
+       DEV_SKU_3,
+       DEV_SKU_4,
+       DEV_SKU_UNKNOWN,
+};
+
+static inline const char *get_sku_info(enum dev_sku_info info)
+{
+       switch (info) {
+       case DEV_SKU_1:
+               return "SKU1";
+       case DEV_SKU_2:
+               return "SKU2";
+       case DEV_SKU_3:
+               return "SKU3";
+       case DEV_SKU_4:
+               return "SKU4";
+       case DEV_SKU_UNKNOWN:
+       default:
+               break;
+       }
+       return "Unknown SKU";
+}
+
+struct adf_hw_device_class {
+       const char *name;
+       const enum adf_device_type type;
+       uint32_t instances;
+} __packed;
+
+struct adf_cfg_device_data;
+struct adf_accel_dev;
+struct adf_etr_data;
+struct adf_etr_ring_data;
+
+struct adf_hw_device_data {
+       struct adf_hw_device_class *dev_class;
+       uint32_t (*get_accel_mask)(uint32_t fuse);
+       uint32_t (*get_ae_mask)(uint32_t fuse);
+       uint32_t (*get_misc_bar_id)(struct adf_hw_device_data *self);
+       uint32_t (*get_etr_bar_id)(struct adf_hw_device_data *self);
+       uint32_t (*get_num_aes)(struct adf_hw_device_data *self);
+       uint32_t (*get_num_accels)(struct adf_hw_device_data *self);
+       enum dev_sku_info (*get_sku)(struct adf_hw_device_data *self);
+       void (*hw_arb_ring_enable)(struct adf_etr_ring_data *ring);
+       void (*hw_arb_ring_disable)(struct adf_etr_ring_data *ring);
+       int (*alloc_irq)(struct adf_accel_dev *accel_dev);
+       void (*free_irq)(struct adf_accel_dev *accel_dev);
+       void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
+       const char *fw_name;
+       uint32_t pci_dev_id;
+       uint32_t fuses;
+       uint32_t accel_capabilities_mask;
+       uint16_t accel_mask;
+       uint16_t ae_mask;
+       uint16_t tx_rings_mask;
+       uint8_t tx_rx_gap;
+       uint8_t instance_id;
+       uint8_t num_banks;
+       uint8_t num_accel;
+       uint8_t num_logical_accel;
+       uint8_t num_engines;
+} __packed;
+
+/* CSR write macro */
+#define ADF_CSR_WR(csr_base, csr_offset, val) \
+       __raw_writel(val, csr_base + csr_offset)
+
+/* CSR read macro */
+#define ADF_CSR_RD(csr_base, csr_offset) __raw_readl(csr_base + csr_offset)
+
+#define GET_DEV(accel_dev) ((accel_dev)->accel_pci_dev.pci_dev->dev)
+#define GET_BARS(accel_dev) ((accel_dev)->accel_pci_dev.pci_bars)
+#define GET_HW_DATA(accel_dev) (accel_dev->hw_device)
+#define GET_MAX_BANKS(accel_dev) (GET_HW_DATA(accel_dev)->num_banks)
+#define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines)
+#define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev
+
+struct adf_admin_comms;
+struct icp_qat_fw_loader_handle;
+struct adf_fw_loader_data {
+       struct icp_qat_fw_loader_handle *fw_loader;
+       const struct firmware *uof_fw;
+};
+
+struct adf_accel_dev {
+       struct adf_etr_data *transport;
+       struct adf_hw_device_data *hw_device;
+       struct adf_cfg_device_data *cfg;
+       struct adf_fw_loader_data *fw_loader;
+       struct adf_admin_comms *admin;
+       struct list_head crypto_list;
+       unsigned long status;
+       atomic_t ref_count;
+       struct dentry *debugfs_dir;
+       struct list_head list;
+       struct module *owner;
+       uint8_t accel_id;
+       uint8_t numa_node;
+       struct adf_accel_pci accel_pci_dev;
+} __packed;
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_accel_engine.c b/drivers/crypto/qat/qat_common/adf_accel_engine.c
new file mode 100644 (file)
index 0000000..c77453b
--- /dev/null
@@ -0,0 +1,168 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/firmware.h>
+#include <linux/pci.h>
+#include "adf_cfg.h"
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       void *uof_addr;
+       uint32_t uof_size;
+
+       if (request_firmware(&loader_data->uof_fw, hw_device->fw_name,
+                            &accel_dev->accel_pci_dev.pci_dev->dev)) {
+               pr_err("QAT: Failed to load firmware %s\n", hw_device->fw_name);
+               return -EFAULT;
+       }
+
+       uof_size = loader_data->uof_fw->size;
+       uof_addr = (void *)loader_data->uof_fw->data;
+       if (qat_uclo_map_uof_obj(loader_data->fw_loader, uof_addr, uof_size)) {
+               pr_err("QAT: Failed to map UOF\n");
+               goto out_err;
+       }
+       if (qat_uclo_wr_all_uimage(loader_data->fw_loader)) {
+               pr_err("QAT: Failed to map UOF\n");
+               goto out_err;
+       }
+       return 0;
+
+out_err:
+       release_firmware(loader_data->uof_fw);
+       return -EFAULT;
+}
+
+int adf_ae_fw_release(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+
+       release_firmware(loader_data->uof_fw);
+       qat_uclo_del_uof_obj(loader_data->fw_loader);
+       qat_hal_deinit(loader_data->fw_loader);
+       loader_data->fw_loader = NULL;
+       return 0;
+}
+
+int adf_ae_start(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+       for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+               if (hw_data->ae_mask & (1 << ae)) {
+                       qat_hal_start(loader_data->fw_loader, ae, 0xFF);
+                       ae_ctr++;
+               }
+       }
+       pr_info("QAT: qat_dev%d started %d acceleration engines\n",
+               accel_dev->accel_id, ae_ctr);
+       return 0;
+}
+
+int adf_ae_stop(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       uint32_t ae_ctr, ae, max_aes = GET_MAX_ACCELENGINES(accel_dev);
+
+       for (ae = 0, ae_ctr = 0; ae < max_aes; ae++) {
+               if (hw_data->ae_mask & (1 << ae)) {
+                       qat_hal_stop(loader_data->fw_loader, ae, 0xFF);
+                       ae_ctr++;
+               }
+       }
+       pr_info("QAT: qat_dev%d stopped %d acceleration engines\n",
+               accel_dev->accel_id, ae_ctr);
+       return 0;
+}
+
+static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
+{
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
+
+       qat_hal_reset(loader_data->fw_loader);
+       if (qat_hal_clr_reset(loader_data->fw_loader))
+               return -EFAULT;
+
+       return 0;
+}
+
+int adf_ae_init(struct adf_accel_dev *accel_dev)
+{
+       struct adf_fw_loader_data *loader_data;
+
+       loader_data = kzalloc(sizeof(*loader_data), GFP_KERNEL);
+       if (!loader_data)
+               return -ENOMEM;
+
+       accel_dev->fw_loader = loader_data;
+       if (qat_hal_init(accel_dev)) {
+               pr_err("QAT: Failed to init the AEs\n");
+               kfree(loader_data);
+               return -EFAULT;
+       }
+       if (adf_ae_reset(accel_dev, 0)) {
+               pr_err("QAT: Failed to reset the AEs\n");
+               qat_hal_deinit(loader_data->fw_loader);
+               kfree(loader_data);
+               return -EFAULT;
+       }
+       return 0;
+}
+
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev)
+{
+       kfree(accel_dev->fw_loader);
+       accel_dev->fw_loader = NULL;
+       return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
new file mode 100644 (file)
index 0000000..c29d4c3
--- /dev/null
@@ -0,0 +1,259 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+
+static struct workqueue_struct *device_reset_wq;
+
+static pci_ers_result_t adf_error_detected(struct pci_dev *pdev,
+                                          pci_channel_state_t state)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       pr_info("QAT: Acceleration driver hardware error detected.\n");
+       if (!accel_dev) {
+               pr_err("QAT: Can't find acceleration device\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       if (state == pci_channel_io_perm_failure) {
+               pr_err("QAT: Can't recover from device error\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/* reset dev data */
+struct adf_reset_dev_data {
+       int mode;
+       struct adf_accel_dev *accel_dev;
+       struct completion compl;
+       struct work_struct reset_work;
+};
+
+#define PPDSTAT_OFFSET 0x7E
+static void adf_dev_restore(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+       struct pci_dev *parent = pdev->bus->self;
+       uint16_t ppdstat = 0, bridge_ctl = 0;
+       int pending = 0;
+
+       pr_info("QAT: Reseting device qat_dev%d\n", accel_dev->accel_id);
+       pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
+       pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
+       if (pending) {
+               int ctr = 0;
+
+               do {
+                       msleep(100);
+                       pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
+                       pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
+               } while (pending && ctr++ < 10);
+       }
+
+       if (pending)
+               pr_info("QAT: Transaction still in progress. Proceeding\n");
+
+       pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
+       bridge_ctl |= PCI_BRIDGE_CTL_BUS_RESET;
+       pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+       msleep(100);
+       bridge_ctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+       pci_write_config_word(parent, PCI_BRIDGE_CONTROL, bridge_ctl);
+       msleep(100);
+       pci_restore_state(pdev);
+       pci_save_state(pdev);
+}
+
+static void adf_device_reset_worker(struct work_struct *work)
+{
+       struct adf_reset_dev_data *reset_data =
+                 container_of(work, struct adf_reset_dev_data, reset_work);
+       struct adf_accel_dev *accel_dev = reset_data->accel_dev;
+
+       adf_dev_restarting_notify(accel_dev);
+       adf_dev_stop(accel_dev);
+       adf_dev_restore(accel_dev);
+       if (adf_dev_start(accel_dev)) {
+               /* The device hanged and we can't restart it so stop here */
+               dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
+               kfree(reset_data);
+               WARN(1, "QAT: device restart failed. Device is unusable\n");
+               return;
+       }
+       adf_dev_restarted_notify(accel_dev);
+       clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+
+       /* The dev is back alive. Notify the caller if in sync mode */
+       if (reset_data->mode == ADF_DEV_RESET_SYNC)
+               complete(&reset_data->compl);
+       else
+               kfree(reset_data);
+}
+
+static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
+                                     enum adf_dev_reset_mode mode)
+{
+       struct adf_reset_dev_data *reset_data;
+
+       if (adf_dev_started(accel_dev) &&
+           !test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+               return 0;
+
+       set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+       reset_data = kzalloc(sizeof(*reset_data), GFP_ATOMIC);
+       if (!reset_data)
+               return -ENOMEM;
+       reset_data->accel_dev = accel_dev;
+       init_completion(&reset_data->compl);
+       reset_data->mode = mode;
+       INIT_WORK(&reset_data->reset_work, adf_device_reset_worker);
+       queue_work(device_reset_wq, &reset_data->reset_work);
+
+       /* If in sync mode wait for the result */
+       if (mode == ADF_DEV_RESET_SYNC) {
+               int ret = 0;
+               /* Maximum device reset time is 10 seconds */
+               unsigned long wait_jiffies = msecs_to_jiffies(10000);
+               unsigned long timeout = wait_for_completion_timeout(
+                                  &reset_data->compl, wait_jiffies);
+               if (!timeout) {
+                       pr_err("QAT: Reset device timeout expired\n");
+                       ret = -EFAULT;
+               }
+               kfree(reset_data);
+               return ret;
+       }
+       return 0;
+}
+
+static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Can't find acceleration device\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+       pci_cleanup_aer_uncorrect_error_status(pdev);
+       if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC))
+               return PCI_ERS_RESULT_DISCONNECT;
+
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void adf_resume(struct pci_dev *pdev)
+{
+       pr_info("QAT: Acceleration driver reset completed\n");
+       pr_info("QAT: Device is up and runnig\n");
+}
+
+static struct pci_error_handlers adf_err_handler = {
+       .error_detected = adf_error_detected,
+       .slot_reset = adf_slot_reset,
+       .resume = adf_resume,
+};
+
+/**
+ * adf_enable_aer() - Enable Advance Error Reporting for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ * @adf:        PCI device driver owning the given acceleration device.
+ *
+ * Function enables PCI Advance Error Reporting for the
+ * QAT acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+       adf->err_handler = &adf_err_handler;
+       pci_enable_pcie_error_reporting(pdev);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_enable_aer);
+
+/**
+ * adf_disable_aer() - Enable Advance Error Reporting for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function disables PCI Advance Error Reporting for the
+ * QAT acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_disable_aer(struct adf_accel_dev *accel_dev)
+{
+       struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
+
+       pci_disable_pcie_error_reporting(pdev);
+}
+EXPORT_SYMBOL_GPL(adf_disable_aer);
+
+int adf_init_aer(void)
+{
+       device_reset_wq = create_workqueue("qat_device_reset_wq");
+       return (device_reset_wq == NULL) ? -EFAULT : 0;
+}
+
+void adf_exit_aer(void)
+{
+       if (device_reset_wq)
+               destroy_workqueue(device_reset_wq);
+       device_reset_wq = NULL;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
new file mode 100644 (file)
index 0000000..aba7f1d
--- /dev/null
@@ -0,0 +1,361 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+
+static DEFINE_MUTEX(qat_cfg_read_lock);
+
+static void *qat_dev_cfg_start(struct seq_file *sfile, loff_t *pos)
+{
+       struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+       mutex_lock(&qat_cfg_read_lock);
+       return seq_list_start(&dev_cfg->sec_list, *pos);
+}
+
+static int qat_dev_cfg_show(struct seq_file *sfile, void *v)
+{
+       struct list_head *list;
+       struct adf_cfg_section *sec =
+                               list_entry(v, struct adf_cfg_section, list);
+
+       seq_printf(sfile, "[%s]\n", sec->name);
+       list_for_each(list, &sec->param_head) {
+               struct adf_cfg_key_val *ptr =
+                       list_entry(list, struct adf_cfg_key_val, list);
+               seq_printf(sfile, "%s = %s\n", ptr->key, ptr->val);
+       }
+       return 0;
+}
+
+static void *qat_dev_cfg_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+       struct adf_cfg_device_data *dev_cfg = sfile->private;
+
+       return seq_list_next(v, &dev_cfg->sec_list, pos);
+}
+
+static void qat_dev_cfg_stop(struct seq_file *sfile, void *v)
+{
+       mutex_unlock(&qat_cfg_read_lock);
+}
+
+static const struct seq_operations qat_dev_cfg_sops = {
+       .start = qat_dev_cfg_start,
+       .next = qat_dev_cfg_next,
+       .stop = qat_dev_cfg_stop,
+       .show = qat_dev_cfg_show
+};
+
+static int qat_dev_cfg_open(struct inode *inode, struct file *file)
+{
+       int ret = seq_open(file, &qat_dev_cfg_sops);
+
+       if (!ret) {
+               struct seq_file *seq_f = file->private_data;
+
+               seq_f->private = inode->i_private;
+       }
+       return ret;
+}
+
+static const struct file_operations qat_dev_cfg_fops = {
+       .open = qat_dev_cfg_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release
+};
+
+/**
+ * adf_cfg_dev_add() - Create an acceleration device configuration table.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function creates a configuration table for the given acceleration device.
+ * The table stores device specific config values.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev)
+{
+       struct adf_cfg_device_data *dev_cfg_data;
+
+       dev_cfg_data = kzalloc(sizeof(*dev_cfg_data), GFP_KERNEL);
+       if (!dev_cfg_data)
+               return -ENOMEM;
+       INIT_LIST_HEAD(&dev_cfg_data->sec_list);
+       init_rwsem(&dev_cfg_data->lock);
+       accel_dev->cfg = dev_cfg_data;
+
+       /* accel_dev->debugfs_dir should always be non-NULL here */
+       dev_cfg_data->debug = debugfs_create_file("dev_cfg", S_IRUSR,
+                                                 accel_dev->debugfs_dir,
+                                                 dev_cfg_data,
+                                                 &qat_dev_cfg_fops);
+       if (!dev_cfg_data->debug) {
+               pr_err("QAT: Failed to create qat cfg debugfs entry.\n");
+               kfree(dev_cfg_data);
+               accel_dev->cfg = NULL;
+               return -EFAULT;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_add);
+
+static void adf_cfg_section_del_all(struct list_head *head);
+
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
+{
+       struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+       down_write(&dev_cfg_data->lock);
+       adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+       up_write(&dev_cfg_data->lock);
+}
+
+/**
+ * adf_cfg_dev_remove() - Clears acceleration device configuration table.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function removes configuration table from the given acceleration device
+ * and frees all allocated memory.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev)
+{
+       struct adf_cfg_device_data *dev_cfg_data = accel_dev->cfg;
+
+       down_write(&dev_cfg_data->lock);
+       adf_cfg_section_del_all(&dev_cfg_data->sec_list);
+       up_write(&dev_cfg_data->lock);
+       debugfs_remove(dev_cfg_data->debug);
+       kfree(dev_cfg_data);
+       accel_dev->cfg = NULL;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_dev_remove);
+
+static void adf_cfg_keyval_add(struct adf_cfg_key_val *new,
+                              struct adf_cfg_section *sec)
+{
+       list_add_tail(&new->list, &sec->param_head);
+}
+
+static void adf_cfg_keyval_del_all(struct list_head *head)
+{
+       struct list_head *list_ptr, *tmp;
+
+       list_for_each_prev_safe(list_ptr, tmp, head) {
+               struct adf_cfg_key_val *ptr =
+                       list_entry(list_ptr, struct adf_cfg_key_val, list);
+               list_del(list_ptr);
+               kfree(ptr);
+       }
+}
+
+static void adf_cfg_section_del_all(struct list_head *head)
+{
+       struct adf_cfg_section *ptr;
+       struct list_head *list, *tmp;
+
+       list_for_each_prev_safe(list, tmp, head) {
+               ptr = list_entry(list, struct adf_cfg_section, list);
+               adf_cfg_keyval_del_all(&ptr->param_head);
+               list_del(list);
+               kfree(ptr);
+       }
+}
+
+static struct adf_cfg_key_val *adf_cfg_key_value_find(struct adf_cfg_section *s,
+                                                     const char *key)
+{
+       struct list_head *list;
+
+       list_for_each(list, &s->param_head) {
+               struct adf_cfg_key_val *ptr =
+                       list_entry(list, struct adf_cfg_key_val, list);
+               if (!strcmp(ptr->key, key))
+                       return ptr;
+       }
+       return NULL;
+}
+
+static struct adf_cfg_section *adf_cfg_sec_find(struct adf_accel_dev *accel_dev,
+                                               const char *sec_name)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       struct list_head *list;
+
+       list_for_each(list, &cfg->sec_list) {
+               struct adf_cfg_section *ptr =
+                       list_entry(list, struct adf_cfg_section, list);
+               if (!strcmp(ptr->name, sec_name))
+                       return ptr;
+       }
+       return NULL;
+}
+
+static int adf_cfg_key_val_get(struct adf_accel_dev *accel_dev,
+                              const char *sec_name,
+                              const char *key_name,
+                              char *val)
+{
+       struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, sec_name);
+       struct adf_cfg_key_val *keyval = NULL;
+
+       if (sec)
+               keyval = adf_cfg_key_value_find(sec, key_name);
+       if (keyval) {
+               memcpy(val, keyval->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES);
+               return 0;
+       }
+       return -1;
+}
+
+/**
+ * adf_cfg_add_key_value_param() - Add key-value config entry to config table.
+ * @accel_dev:  Pointer to acceleration device.
+ * @section_name: Name of the section where the param will be added
+ * @key: The key string
+ * @val: Value pain for the given @key
+ * @type: Type - string, int or address
+ *
+ * Function adds configuration key - value entry in the appropriate section
+ * in the given acceleration device
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+                               const char *section_name,
+                               const char *key, const void *val,
+                               enum adf_cfg_val_type type)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       struct adf_cfg_key_val *key_val;
+       struct adf_cfg_section *section = adf_cfg_sec_find(accel_dev,
+                                                          section_name);
+       if (!section)
+               return -EFAULT;
+
+       key_val = kzalloc(sizeof(*key_val), GFP_KERNEL);
+       if (!key_val)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&key_val->list);
+       strlcpy(key_val->key, key, sizeof(key_val->key));
+
+       if (type == ADF_DEC) {
+               snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+                        "%ld", (*((long *)val)));
+       } else if (type == ADF_STR) {
+               strlcpy(key_val->val, (char *)val, sizeof(key_val->val));
+       } else if (type == ADF_HEX) {
+               snprintf(key_val->val, ADF_CFG_MAX_VAL_LEN_IN_BYTES,
+                        "0x%lx", (unsigned long)val);
+       } else {
+               pr_err("QAT: Unknown type given.\n");
+               kfree(key_val);
+               return -1;
+       }
+       key_val->type = type;
+       down_write(&cfg->lock);
+       adf_cfg_keyval_add(key_val, section);
+       up_write(&cfg->lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_add_key_value_param);
+
+/**
+ * adf_cfg_section_add() - Add config section entry to config table.
+ * @accel_dev:  Pointer to acceleration device.
+ * @name: Name of the section
+ *
+ * Function adds configuration section where key - value entries
+ * will be stored.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       struct adf_cfg_section *sec = adf_cfg_sec_find(accel_dev, name);
+
+       if (sec)
+               return 0;
+
+       sec = kzalloc(sizeof(*sec), GFP_KERNEL);
+       if (!sec)
+               return -ENOMEM;
+
+       strlcpy(sec->name, name, sizeof(sec->name));
+       INIT_LIST_HEAD(&sec->param_head);
+       down_write(&cfg->lock);
+       list_add_tail(&sec->list, &cfg->sec_list);
+       up_write(&cfg->lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_cfg_section_add);
+
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+                           const char *section, const char *name,
+                           char *value)
+{
+       struct adf_cfg_device_data *cfg = accel_dev->cfg;
+       int ret;
+
+       down_read(&cfg->lock);
+       ret = adf_cfg_key_val_get(accel_dev, section, name, value);
+       up_read(&cfg->lock);
+       return ret;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.h b/drivers/crypto/qat/qat_common/adf_cfg.h
new file mode 100644 (file)
index 0000000..6a9c6f6
--- /dev/null
@@ -0,0 +1,87 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_H_
+#define ADF_CFG_H_
+
+#include <linux/list.h>
+#include <linux/rwsem.h>
+#include <linux/debugfs.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_cfg_key_val {
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       enum adf_cfg_val_type type;
+       struct list_head list;
+};
+
+struct adf_cfg_section {
+       char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+       struct list_head list;
+       struct list_head param_head;
+};
+
+struct adf_cfg_device_data {
+       struct list_head sec_list;
+       struct dentry *debug;
+       struct rw_semaphore lock;
+};
+
+int adf_cfg_dev_add(struct adf_accel_dev *accel_dev);
+void adf_cfg_dev_remove(struct adf_accel_dev *accel_dev);
+int adf_cfg_section_add(struct adf_accel_dev *accel_dev, const char *name);
+void adf_cfg_del_all(struct adf_accel_dev *accel_dev);
+int adf_cfg_add_key_value_param(struct adf_accel_dev *accel_dev,
+                               const char *section_name,
+                               const char *key, const void *val,
+                               enum adf_cfg_val_type type);
+int adf_cfg_get_param_value(struct adf_accel_dev *accel_dev,
+                           const char *section, const char *name, char *value);
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_common.h b/drivers/crypto/qat/qat_common/adf_cfg_common.h
new file mode 100644 (file)
index 0000000..88b8218
--- /dev/null
@@ -0,0 +1,100 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_COMMON_H_
+#define ADF_CFG_COMMON_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define ADF_CFG_MAX_STR_LEN 64
+#define ADF_CFG_MAX_KEY_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_VAL_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_MAX_SECTION_LEN_IN_BYTES ADF_CFG_MAX_STR_LEN
+#define ADF_CFG_BASE_DEC 10
+#define ADF_CFG_BASE_HEX 16
+#define ADF_CFG_ALL_DEVICES 0xFE
+#define ADF_CFG_NO_DEVICE 0xFF
+#define ADF_CFG_AFFINITY_WHATEVER 0xFF
+#define MAX_DEVICE_NAME_SIZE 32
+#define ADF_MAX_DEVICES 32
+
+enum adf_cfg_val_type {
+       ADF_DEC,
+       ADF_HEX,
+       ADF_STR
+};
+
+enum adf_device_type {
+       DEV_UNKNOWN = 0,
+       DEV_DH895XCC,
+};
+
+struct adf_dev_status_info {
+       enum adf_device_type type;
+       uint8_t accel_id;
+       uint8_t instance_id;
+       uint8_t num_ae;
+       uint8_t num_accel;
+       uint8_t num_logical_accel;
+       uint8_t banks_per_accel;
+       uint8_t state;
+       uint8_t bus;
+       uint8_t dev;
+       uint8_t fun;
+       char name[MAX_DEVICE_NAME_SIZE];
+};
+
+#define ADF_CTL_IOC_MAGIC 'a'
+#define IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS _IOW(ADF_CTL_IOC_MAGIC, 0, \
+               struct adf_user_cfg_ctl_data)
+#define IOCTL_STOP_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 1, \
+               struct adf_user_cfg_ctl_data)
+#define IOCTL_START_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 2, \
+               struct adf_user_cfg_ctl_data)
+#define IOCTL_STATUS_ACCEL_DEV _IOW(ADF_CTL_IOC_MAGIC, 3, uint32_t)
+#define IOCTL_GET_NUM_DEVICES _IOW(ADF_CTL_IOC_MAGIC, 4, int32_t)
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/qat/qat_common/adf_cfg_strings.h
new file mode 100644 (file)
index 0000000..c7ac758
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_STRINGS_H_
+#define ADF_CFG_STRINGS_H_
+
+#define ADF_GENERAL_SEC "GENERAL"
+#define ADF_KERNEL_SEC "KERNEL"
+#define ADF_ACCEL_SEC "Accelerator"
+#define ADF_NUM_CY "NumberCyInstances"
+#define ADF_NUM_DC "NumberDcInstances"
+#define ADF_RING_SYM_SIZE "NumConcurrentSymRequests"
+#define ADF_RING_ASYM_SIZE "NumConcurrentAsymRequests"
+#define ADF_RING_DC_SIZE "NumConcurrentRequests"
+#define ADF_RING_ASYM_TX "RingAsymTx"
+#define ADF_RING_SYM_TX "RingSymTx"
+#define ADF_RING_RND_TX "RingNrbgTx"
+#define ADF_RING_ASYM_RX "RingAsymRx"
+#define ADF_RING_SYM_RX "RinSymRx"
+#define ADF_RING_RND_RX "RingNrbgRx"
+#define ADF_RING_DC_TX "RingTx"
+#define ADF_RING_DC_RX "RingRx"
+#define ADF_ETRMGR_BANK "Bank"
+#define ADF_RING_BANK_NUM "BankNumber"
+#define ADF_CY "Cy"
+#define ADF_DC "Dc"
+#define ADF_ETRMGR_COALESCING_ENABLED "InterruptCoalescingEnabled"
+#define ADF_ETRMGR_COALESCING_ENABLED_FORMAT \
+       ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_ENABLED
+#define ADF_ETRMGR_COALESCE_TIMER "InterruptCoalescingTimerNs"
+#define ADF_ETRMGR_COALESCE_TIMER_FORMAT \
+       ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCE_TIMER
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED "InterruptCoalescingNumResponses"
+#define ADF_ETRMGR_COALESCING_MSG_ENABLED_FORMAT \
+       ADF_ETRMGR_BANK"%d"ADF_ETRMGR_COALESCING_MSG_ENABLED
+#define ADF_ETRMGR_CORE_AFFINITY "CoreAffinity"
+#define ADF_ETRMGR_CORE_AFFINITY_FORMAT \
+       ADF_ETRMGR_BANK"%d"ADF_ETRMGR_CORE_AFFINITY
+#define ADF_ACCEL_STR "Accelerator%d"
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_cfg_user.h b/drivers/crypto/qat/qat_common/adf_cfg_user.h
new file mode 100644 (file)
index 0000000..0c38a15
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_CFG_USER_H_
+#define ADF_CFG_USER_H_
+
+#include "adf_cfg_common.h"
+#include "adf_cfg_strings.h"
+
+struct adf_user_cfg_key_val {
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       union {
+               char *user_val_ptr;
+               uint64_t padding1;
+       };
+       union {
+               struct adf_user_cfg_key_val *prev;
+               uint64_t padding2;
+       };
+       union {
+               struct adf_user_cfg_key_val *next;
+               uint64_t padding3;
+       };
+       enum adf_cfg_val_type type;
+};
+
+struct adf_user_cfg_section {
+       char name[ADF_CFG_MAX_SECTION_LEN_IN_BYTES];
+       union {
+               struct adf_user_cfg_key_val *params;
+               uint64_t padding1;
+       };
+       union {
+               struct adf_user_cfg_section *prev;
+               uint64_t padding2;
+       };
+       union {
+               struct adf_user_cfg_section *next;
+               uint64_t padding3;
+       };
+};
+
+struct adf_user_cfg_ctl_data {
+       union {
+               struct adf_user_cfg_section *config_section;
+               uint64_t padding;
+       };
+       uint8_t device_id;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
new file mode 100644 (file)
index 0000000..5e8f9d4
--- /dev/null
@@ -0,0 +1,192 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DRV_H
+#define ADF_DRV_H
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_loader_handle.h"
+#include "icp_qat_hal.h"
+
+#define ADF_STATUS_RESTARTING 0
+#define ADF_STATUS_STARTING 1
+#define ADF_STATUS_CONFIGURED 2
+#define ADF_STATUS_STARTED 3
+#define ADF_STATUS_AE_INITIALISED 4
+#define ADF_STATUS_AE_UCODE_LOADED 5
+#define ADF_STATUS_AE_STARTED 6
+#define ADF_STATUS_ORPHAN_TH_RUNNING 7
+#define ADF_STATUS_IRQ_ALLOCATED 8
+
+enum adf_dev_reset_mode {
+       ADF_DEV_RESET_ASYNC = 0,
+       ADF_DEV_RESET_SYNC
+};
+
+enum adf_event {
+       ADF_EVENT_INIT = 0,
+       ADF_EVENT_START,
+       ADF_EVENT_STOP,
+       ADF_EVENT_SHUTDOWN,
+       ADF_EVENT_RESTARTING,
+       ADF_EVENT_RESTARTED,
+};
+
+struct service_hndl {
+       int (*event_hld)(struct adf_accel_dev *accel_dev,
+                        enum adf_event event);
+       unsigned long init_status;
+       unsigned long start_status;
+       char *name;
+       struct list_head list;
+       int admin;
+};
+
+int adf_service_register(struct service_hndl *service);
+int adf_service_unregister(struct service_hndl *service);
+
+int adf_dev_init(struct adf_accel_dev *accel_dev);
+int adf_dev_start(struct adf_accel_dev *accel_dev);
+int adf_dev_stop(struct adf_accel_dev *accel_dev);
+int adf_dev_shutdown(struct adf_accel_dev *accel_dev);
+
+int adf_ctl_dev_register(void);
+void adf_ctl_dev_unregister(void);
+int adf_processes_dev_register(void);
+void adf_processes_dev_unregister(void);
+
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev);
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev);
+struct list_head *adf_devmgr_get_head(void);
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id);
+struct adf_accel_dev *adf_devmgr_get_first(void);
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev);
+int adf_devmgr_verify_id(uint32_t id);
+void adf_devmgr_get_num_dev(uint32_t *num);
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev);
+int adf_dev_started(struct adf_accel_dev *accel_dev);
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev);
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev);
+int adf_ae_init(struct adf_accel_dev *accel_dev);
+int adf_ae_shutdown(struct adf_accel_dev *accel_dev);
+int adf_ae_fw_load(struct adf_accel_dev *accel_dev);
+int adf_ae_fw_release(struct adf_accel_dev *accel_dev);
+int adf_ae_start(struct adf_accel_dev *accel_dev);
+int adf_ae_stop(struct adf_accel_dev *accel_dev);
+
+int adf_enable_aer(struct adf_accel_dev *accel_dev, struct pci_driver *adf);
+void adf_disable_aer(struct adf_accel_dev *accel_dev);
+int adf_init_aer(void);
+void adf_exit_aer(void);
+
+int adf_dev_get(struct adf_accel_dev *accel_dev);
+void adf_dev_put(struct adf_accel_dev *accel_dev);
+int adf_dev_in_use(struct adf_accel_dev *accel_dev);
+int adf_init_etr_data(struct adf_accel_dev *accel_dev);
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev);
+int qat_crypto_register(void);
+int qat_crypto_unregister(void);
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node);
+void qat_crypto_put_instance(struct qat_crypto_instance *inst);
+void qat_alg_callback(void *resp);
+int qat_algs_init(void);
+void qat_algs_exit(void);
+int qat_algs_register(void);
+int qat_algs_unregister(void);
+
+int qat_hal_init(struct adf_accel_dev *accel_dev);
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                  unsigned int ctx_mask);
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                 unsigned int ctx_mask);
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle);
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle);
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+                         unsigned char ae, unsigned int ctx_mask);
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, enum icp_qat_uof_regtype lm_type,
+                          unsigned char mode);
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+                           unsigned char ae, unsigned char mode);
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, unsigned char mode);
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned int ctx_mask, unsigned int upc);
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+                      unsigned char ae, unsigned int uaddr,
+                      unsigned int words_num, uint64_t *uword);
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                    unsigned int uword_addr, unsigned int words_num,
+                    unsigned int *data);
+int qat_hal_get_ins_num(void);
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+                       unsigned char ae,
+                       struct icp_qat_uof_batch_init *lm_init_header);
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+                    unsigned char ae, unsigned char ctx_mask,
+                    enum icp_qat_uof_regtype reg_type,
+                    unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned char ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned char ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata);
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned char ctx_mask,
+                   unsigned short reg_num, unsigned int regdata);
+int qat_hal_wr_lm(struct icp_qat_fw_loader_handle *handle,
+                 unsigned char ae, unsigned short lm_addr, unsigned int value);
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle);
+void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle);
+int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
+                        void *addr_ptr, int mem_size);
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
new file mode 100644 (file)
index 0000000..d97069b
--- /dev/null
@@ -0,0 +1,490 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/bitops.h>
+#include <linux/pci.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_cfg.h"
+#include "adf_cfg_common.h"
+#include "adf_cfg_user.h"
+
+#define DEVICE_NAME "qat_adf_ctl"
+
+static DEFINE_MUTEX(adf_ctl_lock);
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
+
+static const struct file_operations adf_ctl_ops = {
+       .owner = THIS_MODULE,
+       .unlocked_ioctl = adf_ctl_ioctl,
+       .compat_ioctl = adf_ctl_ioctl,
+};
+
+struct adf_ctl_drv_info {
+       unsigned int major;
+       struct cdev drv_cdev;
+       struct class *drv_class;
+};
+
+static struct adf_ctl_drv_info adt_ctl_drv;
+
+static void adf_chr_drv_destroy(void)
+{
+       device_destroy(adt_ctl_drv.drv_class, MKDEV(adt_ctl_drv.major, 0));
+       cdev_del(&adt_ctl_drv.drv_cdev);
+       class_destroy(adt_ctl_drv.drv_class);
+       unregister_chrdev_region(MKDEV(adt_ctl_drv.major, 0), 1);
+}
+
+static int adf_chr_drv_create(void)
+{
+       dev_t dev_id;
+       struct device *drv_device;
+
+       if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
+               pr_err("QAT: unable to allocate chrdev region\n");
+               return -EFAULT;
+       }
+
+       adt_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
+       if (IS_ERR(adt_ctl_drv.drv_class)) {
+               pr_err("QAT: class_create failed for adf_ctl\n");
+               goto err_chrdev_unreg;
+       }
+       adt_ctl_drv.major = MAJOR(dev_id);
+       cdev_init(&adt_ctl_drv.drv_cdev, &adf_ctl_ops);
+       if (cdev_add(&adt_ctl_drv.drv_cdev, dev_id, 1)) {
+               pr_err("QAT: cdev add failed\n");
+               goto err_class_destr;
+       }
+
+       drv_device = device_create(adt_ctl_drv.drv_class, NULL,
+                                  MKDEV(adt_ctl_drv.major, 0),
+                                  NULL, DEVICE_NAME);
+       if (!drv_device) {
+               pr_err("QAT: failed to create device\n");
+               goto err_cdev_del;
+       }
+       return 0;
+err_cdev_del:
+       cdev_del(&adt_ctl_drv.drv_cdev);
+err_class_destr:
+       class_destroy(adt_ctl_drv.drv_class);
+err_chrdev_unreg:
+       unregister_chrdev_region(dev_id, 1);
+       return -EFAULT;
+}
+
+static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
+                                  unsigned long arg)
+{
+       struct adf_user_cfg_ctl_data *cfg_data;
+
+       cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
+       if (!cfg_data)
+               return -ENOMEM;
+
+       /* Initialize device id to NO DEVICE as 0 is a valid device id */
+       cfg_data->device_id = ADF_CFG_NO_DEVICE;
+
+       if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
+               pr_err("QAT: failed to copy from user cfg_data.\n");
+               kfree(cfg_data);
+               return -EIO;
+       }
+
+       *ctl_data = cfg_data;
+       return 0;
+}
+
+static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
+                                 const char *section,
+                                 const struct adf_user_cfg_key_val *key_val)
+{
+       if (key_val->type == ADF_HEX) {
+               long *ptr = (long *)key_val->val;
+               long val = *ptr;
+
+               if (adf_cfg_add_key_value_param(accel_dev, section,
+                                               key_val->key, (void *)val,
+                                               key_val->type)) {
+                       pr_err("QAT: failed to add keyvalue.\n");
+                       return -EFAULT;
+               }
+       } else {
+               if (adf_cfg_add_key_value_param(accel_dev, section,
+                                               key_val->key, key_val->val,
+                                               key_val->type)) {
+                       pr_err("QAT: failed to add keyvalue.\n");
+                       return -EFAULT;
+               }
+       }
+       return 0;
+}
+
+static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
+                                  struct adf_user_cfg_ctl_data *ctl_data)
+{
+       struct adf_user_cfg_key_val key_val;
+       struct adf_user_cfg_key_val *params_head;
+       struct adf_user_cfg_section section, *section_head;
+
+       section_head = ctl_data->config_section;
+
+       while (section_head) {
+               if (copy_from_user(&section, (void __user *)section_head,
+                                  sizeof(*section_head))) {
+                       pr_err("QAT: failed to copy section info\n");
+                       goto out_err;
+               }
+
+               if (adf_cfg_section_add(accel_dev, section.name)) {
+                       pr_err("QAT: failed to add section.\n");
+                       goto out_err;
+               }
+
+               params_head = section_head->params;
+
+               while (params_head) {
+                       if (copy_from_user(&key_val, (void __user *)params_head,
+                                          sizeof(key_val))) {
+                               pr_err("QAT: Failed to copy keyvalue.\n");
+                               goto out_err;
+                       }
+                       if (adf_add_key_value_data(accel_dev, section.name,
+                                                  &key_val)) {
+                               goto out_err;
+                       }
+                       params_head = key_val.next;
+               }
+               section_head = section.next;
+       }
+       return 0;
+out_err:
+       adf_cfg_del_all(accel_dev);
+       return -EFAULT;
+}
+
+static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
+                                   unsigned long arg)
+{
+       int ret;
+       struct adf_user_cfg_ctl_data *ctl_data;
+       struct adf_accel_dev *accel_dev;
+
+       ret = adf_ctl_alloc_resources(&ctl_data, arg);
+       if (ret)
+               return ret;
+
+       accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+       if (!accel_dev) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       if (adf_dev_started(accel_dev)) {
+               ret = -EFAULT;
+               goto out;
+       }
+
+       if (adf_copy_key_value_data(accel_dev, ctl_data)) {
+               ret = -EFAULT;
+               goto out;
+       }
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+out:
+       kfree(ctl_data);
+       return ret;
+}
+
+static int adf_ctl_is_device_in_use(int id)
+{
+       struct list_head *itr, *head = adf_devmgr_get_head();
+
+       list_for_each(itr, head) {
+               struct adf_accel_dev *dev =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+               if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+                       if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
+                               pr_info("QAT: device qat_dev%d is busy\n",
+                                       dev->accel_id);
+                               return -EBUSY;
+                       }
+               }
+       }
+       return 0;
+}
+
+static int adf_ctl_stop_devices(uint32_t id)
+{
+       struct list_head *itr, *head = adf_devmgr_get_head();
+       int ret = 0;
+
+       list_for_each(itr, head) {
+               struct adf_accel_dev *accel_dev =
+                               list_entry(itr, struct adf_accel_dev, list);
+               if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
+                       if (!adf_dev_started(accel_dev))
+                               continue;
+
+                       if (adf_dev_stop(accel_dev)) {
+                               pr_err("QAT: Failed to stop qat_dev%d\n", id);
+                               ret = -EFAULT;
+                       }
+               }
+       }
+       return ret;
+}
+
+static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
+                                 unsigned long arg)
+{
+       int ret;
+       struct adf_user_cfg_ctl_data *ctl_data;
+
+       ret = adf_ctl_alloc_resources(&ctl_data, arg);
+       if (ret)
+               return ret;
+
+       if (adf_devmgr_verify_id(ctl_data->device_id)) {
+               pr_err("QAT: Device %d not found\n", ctl_data->device_id);
+               ret = -ENODEV;
+               goto out;
+       }
+
+       ret = adf_ctl_is_device_in_use(ctl_data->device_id);
+       if (ret)
+               goto out;
+
+       if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
+               pr_info("QAT: Stopping all acceleration devices.\n");
+       else
+               pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
+                       ctl_data->device_id);
+
+       ret = adf_ctl_stop_devices(ctl_data->device_id);
+       if (ret)
+               pr_err("QAT: failed to stop device.\n");
+out:
+       kfree(ctl_data);
+       return ret;
+}
+
+static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
+                                  unsigned long arg)
+{
+       int ret;
+       struct adf_user_cfg_ctl_data *ctl_data;
+       struct adf_accel_dev *accel_dev;
+
+       ret = adf_ctl_alloc_resources(&ctl_data, arg);
+       if (ret)
+               return ret;
+
+       accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
+       if (!accel_dev) {
+               pr_err("QAT: Device %d not found\n", ctl_data->device_id);
+               ret = -ENODEV;
+               goto out;
+       }
+
+       if (!adf_dev_started(accel_dev)) {
+               pr_info("QAT: Starting acceleration device qat_dev%d.\n",
+                       ctl_data->device_id);
+               ret = adf_dev_start(accel_dev);
+       } else {
+               pr_info("QAT: Acceleration device qat_dev%d already started.\n",
+                       ctl_data->device_id);
+       }
+       if (ret) {
+               pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id);
+               adf_dev_stop(accel_dev);
+       }
+out:
+       kfree(ctl_data);
+       return ret;
+}
+
+static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
+                                        unsigned long arg)
+{
+       uint32_t num_devices = 0;
+
+       adf_devmgr_get_num_dev(&num_devices);
+       if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
+                                   unsigned long arg)
+{
+       struct adf_hw_device_data *hw_data;
+       struct adf_dev_status_info dev_info;
+       struct adf_accel_dev *accel_dev;
+
+       if (copy_from_user(&dev_info, (void __user *)arg,
+                          sizeof(struct adf_dev_status_info))) {
+               pr_err("QAT: failed to copy from user.\n");
+               return -EFAULT;
+       }
+
+       accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
+       if (!accel_dev) {
+               pr_err("QAT: Device %d not found\n", dev_info.accel_id);
+               return -ENODEV;
+       }
+       hw_data = accel_dev->hw_device;
+       dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
+       dev_info.num_ae = hw_data->get_num_aes(hw_data);
+       dev_info.num_accel = hw_data->get_num_accels(hw_data);
+       dev_info.num_logical_accel = hw_data->num_logical_accel;
+       dev_info.banks_per_accel = hw_data->num_banks
+                                       / hw_data->num_logical_accel;
+       strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
+       dev_info.instance_id = hw_data->instance_id;
+       dev_info.type = hw_data->dev_class->type;
+       dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
+       dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
+       dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
+
+       if (copy_to_user((void __user *)arg, &dev_info,
+                        sizeof(struct adf_dev_status_info))) {
+               pr_err("QAT: failed to copy status.\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+       int ret;
+
+       if (mutex_lock_interruptible(&adf_ctl_lock))
+               return -EFAULT;
+
+       switch (cmd) {
+       case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
+               ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
+               break;
+
+       case IOCTL_STOP_ACCEL_DEV:
+               ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
+               break;
+
+       case IOCTL_START_ACCEL_DEV:
+               ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
+               break;
+
+       case IOCTL_GET_NUM_DEVICES:
+               ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
+               break;
+
+       case IOCTL_STATUS_ACCEL_DEV:
+               ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
+               break;
+       default:
+               pr_err("QAT: Invalid ioclt\n");
+               ret = -EFAULT;
+               break;
+       }
+       mutex_unlock(&adf_ctl_lock);
+       return ret;
+}
+
+static int __init adf_register_ctl_device_driver(void)
+{
+       mutex_init(&adf_ctl_lock);
+
+       if (qat_algs_init())
+               goto err_algs_init;
+
+       if (adf_chr_drv_create())
+               goto err_chr_dev;
+
+       if (adf_init_aer())
+               goto err_aer;
+
+       if (qat_crypto_register())
+               goto err_crypto_register;
+
+       return 0;
+
+err_crypto_register:
+       adf_exit_aer();
+err_aer:
+       adf_chr_drv_destroy();
+err_chr_dev:
+       qat_algs_exit();
+err_algs_init:
+       mutex_destroy(&adf_ctl_lock);
+       return -EFAULT;
+}
+
+static void __exit adf_unregister_ctl_device_driver(void)
+{
+       adf_chr_drv_destroy();
+       adf_exit_aer();
+       qat_crypto_unregister();
+       qat_algs_exit();
+       mutex_destroy(&adf_ctl_lock);
+}
+
+module_init(adf_register_ctl_device_driver);
+module_exit(adf_unregister_ctl_device_driver);
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
+MODULE_ALIAS("intel_qat");
diff --git a/drivers/crypto/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/qat/qat_common/adf_dev_mgr.c
new file mode 100644 (file)
index 0000000..ae71555
--- /dev/null
@@ -0,0 +1,215 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(accel_table);
+static DEFINE_MUTEX(table_lock);
+static uint32_t num_devices;
+
+/**
+ * adf_devmgr_add_dev() - Add accel_dev to the acceleration framework
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function adds acceleration device to the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev)
+{
+       struct list_head *itr;
+
+       if (num_devices == ADF_MAX_DEVICES) {
+               pr_err("QAT: Only support up to %d devices\n", ADF_MAX_DEVICES);
+               return -EFAULT;
+       }
+
+       mutex_lock(&table_lock);
+       list_for_each(itr, &accel_table) {
+               struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+               if (ptr == accel_dev) {
+                       mutex_unlock(&table_lock);
+                       return -EEXIST;
+               }
+       }
+       atomic_set(&accel_dev->ref_count, 0);
+       list_add_tail(&accel_dev->list, &accel_table);
+       accel_dev->accel_id = num_devices++;
+       mutex_unlock(&table_lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_add_dev);
+
+struct list_head *adf_devmgr_get_head(void)
+{
+       return &accel_table;
+}
+
+/**
+ * adf_devmgr_rm_dev() - Remove accel_dev from the acceleration framework.
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function removes acceleration device from the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev)
+{
+       mutex_lock(&table_lock);
+       list_del(&accel_dev->list);
+       num_devices--;
+       mutex_unlock(&table_lock);
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_rm_dev);
+
+struct adf_accel_dev *adf_devmgr_get_first(void)
+{
+       struct adf_accel_dev *dev = NULL;
+
+       if (!list_empty(&accel_table))
+               dev = list_first_entry(&accel_table, struct adf_accel_dev,
+                                      list);
+       return dev;
+}
+
+/**
+ * adf_devmgr_pci_to_accel_dev() - Get accel_dev associated with the pci_dev.
+ * @accel_dev:  Pointer to pci device.
+ *
+ * Function returns acceleration device associated with the given pci device.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: pinter to accel_dev or NULL if not found.
+ */
+struct adf_accel_dev *adf_devmgr_pci_to_accel_dev(struct pci_dev *pci_dev)
+{
+       struct list_head *itr;
+
+       list_for_each(itr, &accel_table) {
+               struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+               if (ptr->accel_pci_dev.pci_dev == pci_dev) {
+                       mutex_unlock(&table_lock);
+                       return ptr;
+               }
+       }
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(adf_devmgr_pci_to_accel_dev);
+
+struct adf_accel_dev *adf_devmgr_get_dev_by_id(uint32_t id)
+{
+       struct list_head *itr;
+
+       list_for_each(itr, &accel_table) {
+               struct adf_accel_dev *ptr =
+                               list_entry(itr, struct adf_accel_dev, list);
+
+               if (ptr->accel_id == id) {
+                       mutex_unlock(&table_lock);
+                       return ptr;
+               }
+       }
+       return NULL;
+}
+
+int adf_devmgr_verify_id(uint32_t id)
+{
+       if (id == ADF_CFG_ALL_DEVICES)
+               return 0;
+
+       if (adf_devmgr_get_dev_by_id(id))
+               return 0;
+
+       return -ENODEV;
+}
+
+void adf_devmgr_get_num_dev(uint32_t *num)
+{
+       struct list_head *itr;
+
+       *num = 0;
+       list_for_each(itr, &accel_table) {
+               (*num)++;
+       }
+}
+
+int adf_dev_in_use(struct adf_accel_dev *accel_dev)
+{
+       return atomic_read(&accel_dev->ref_count) != 0;
+}
+
+int adf_dev_get(struct adf_accel_dev *accel_dev)
+{
+       if (atomic_add_return(1, &accel_dev->ref_count) == 1)
+               if (!try_module_get(accel_dev->owner))
+                       return -EFAULT;
+       return 0;
+}
+
+void adf_dev_put(struct adf_accel_dev *accel_dev)
+{
+       if (atomic_sub_return(1, &accel_dev->ref_count) == 0)
+               module_put(accel_dev->owner);
+}
+
+int adf_devmgr_in_reset(struct adf_accel_dev *accel_dev)
+{
+       return test_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
+}
+
+int adf_dev_started(struct adf_accel_dev *accel_dev)
+{
+       return test_bit(ADF_STATUS_STARTED, &accel_dev->status);
+}
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
new file mode 100644 (file)
index 0000000..5c0e47a
--- /dev/null
@@ -0,0 +1,388 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static LIST_HEAD(service_table);
+static DEFINE_MUTEX(service_lock);
+
+static void adf_service_add(struct service_hndl *service)
+{
+       mutex_lock(&service_lock);
+       list_add(&service->list, &service_table);
+       mutex_unlock(&service_lock);
+}
+
+/**
+ * adf_service_register() - Register acceleration service in the accel framework
+ * @service:    Pointer to the service
+ *
+ * Function adds the acceleration service to the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_service_register(struct service_hndl *service)
+{
+       service->init_status = 0;
+       service->start_status = 0;
+       adf_service_add(service);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_service_register);
+
+static void adf_service_remove(struct service_hndl *service)
+{
+       mutex_lock(&service_lock);
+       list_del(&service->list);
+       mutex_unlock(&service_lock);
+}
+
+/**
+ * adf_service_unregister() - Unregister acceleration service from the framework
+ * @service:    Pointer to the service
+ *
+ * Function remove the acceleration service from the acceleration framework.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_service_unregister(struct service_hndl *service)
+{
+       if (service->init_status || service->start_status) {
+               pr_err("QAT: Could not remove active service\n");
+               return -EFAULT;
+       }
+       adf_service_remove(service);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_service_unregister);
+
+/**
+ * adf_dev_start() - Start acceleration service for the given accel device
+ * @accel_dev:    Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is ready to be used.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_dev_start(struct adf_accel_dev *accel_dev)
+{
+       struct service_hndl *service;
+       struct list_head *list_itr;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+
+       if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) {
+               pr_info("QAT: Device not configured\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+
+       if (adf_ae_init(accel_dev)) {
+               pr_err("QAT: Failed to initialise Acceleration Engine\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status);
+
+       if (adf_ae_fw_load(accel_dev)) {
+               pr_err("QAT: Failed to load acceleration FW\n");
+               adf_ae_fw_release(accel_dev);
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status);
+
+       if (hw_data->alloc_irq(accel_dev)) {
+               pr_err("QAT: Failed to allocate interrupts\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+
+       /*
+        * Subservice initialisation is divided into two stages: init and start.
+        * This is to facilitate any ordering dependencies between services
+        * prior to starting any of the accelerators.
+        */
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
+                       pr_err("QAT: Failed to initialise service %s\n",
+                              service->name);
+                       return -EFAULT;
+               }
+               set_bit(accel_dev->accel_id, &service->init_status);
+       }
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_INIT)) {
+                       pr_err("QAT: Failed to initialise service %s\n",
+                              service->name);
+                       return -EFAULT;
+               }
+               set_bit(accel_dev->accel_id, &service->init_status);
+       }
+
+       hw_data->enable_error_correction(accel_dev);
+
+       if (adf_ae_start(accel_dev)) {
+               pr_err("QAT: AE Start Failed\n");
+               return -EFAULT;
+       }
+       set_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_START)) {
+                       pr_err("QAT: Failed to start service %s\n",
+                              service->name);
+                       return -EFAULT;
+               }
+               set_bit(accel_dev->accel_id, &service->start_status);
+       }
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_START)) {
+                       pr_err("QAT: Failed to start service %s\n",
+                              service->name);
+                       return -EFAULT;
+               }
+               set_bit(accel_dev->accel_id, &service->start_status);
+       }
+
+       clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+       set_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+       if (qat_algs_register()) {
+               pr_err("QAT: Failed to register crypto algs\n");
+               set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+               clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+               return -EFAULT;
+       }
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_start);
+
+/**
+ * adf_dev_stop() - Stop acceleration service for the given accel device
+ * @accel_dev:    Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is shuting down.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_dev_stop(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct service_hndl *service;
+       struct list_head *list_itr;
+       int ret, wait = 0;
+
+       if (!adf_dev_started(accel_dev) &&
+           !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
+               return 0;
+       }
+       clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+       clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
+       clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
+
+       if (qat_algs_unregister())
+               pr_err("QAT: Failed to unregister crypto algs\n");
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->admin)
+                       continue;
+               if (!test_bit(accel_dev->accel_id, &service->start_status))
+                       continue;
+               ret = service->event_hld(accel_dev, ADF_EVENT_STOP);
+               if (!ret) {
+                       clear_bit(accel_dev->accel_id, &service->start_status);
+               } else if (ret == -EAGAIN) {
+                       wait = 1;
+                       clear_bit(accel_dev->accel_id, &service->start_status);
+               }
+       }
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!service->admin)
+                       continue;
+               if (!test_bit(accel_dev->accel_id, &service->start_status))
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_STOP))
+                       pr_err("QAT: Failed to shutdown service %s\n",
+                              service->name);
+               else
+                       clear_bit(accel_dev->accel_id, &service->start_status);
+       }
+
+       if (wait)
+               msleep(100);
+
+       if (adf_dev_started(accel_dev)) {
+               if (adf_ae_stop(accel_dev))
+                       pr_err("QAT: failed to stop AE\n");
+               else
+                       clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
+       }
+
+       if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
+               if (adf_ae_fw_release(accel_dev))
+                       pr_err("QAT: Failed to release the ucode\n");
+               else
+                       clear_bit(ADF_STATUS_AE_UCODE_LOADED,
+                                 &accel_dev->status);
+       }
+
+       if (test_bit(ADF_STATUS_AE_INITIALISED, &accel_dev->status)) {
+               if (adf_ae_shutdown(accel_dev))
+                       pr_err("QAT: Failed to shutdown Accel Engine\n");
+               else
+                       clear_bit(ADF_STATUS_AE_INITIALISED,
+                                 &accel_dev->status);
+       }
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->admin)
+                       continue;
+               if (!test_bit(accel_dev->accel_id, &service->init_status))
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
+                       pr_err("QAT: Failed to shutdown service %s\n",
+                              service->name);
+               else
+                       clear_bit(accel_dev->accel_id, &service->init_status);
+       }
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!service->admin)
+                       continue;
+               if (!test_bit(accel_dev->accel_id, &service->init_status))
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN))
+                       pr_err("QAT: Failed to shutdown service %s\n",
+                              service->name);
+               else
+                       clear_bit(accel_dev->accel_id, &service->init_status);
+       }
+
+       if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) {
+               hw_data->free_irq(accel_dev);
+               clear_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status);
+       }
+
+       /* Delete configuration only if not restarting */
+       if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+               adf_cfg_del_all(accel_dev);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_stop);
+
+int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
+{
+       struct service_hndl *service;
+       struct list_head *list_itr;
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
+                       pr_err("QAT: Failed to restart service %s.\n",
+                              service->name);
+       }
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING))
+                       pr_err("QAT: Failed to restart service %s.\n",
+                              service->name);
+       }
+       return 0;
+}
+
+int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev)
+{
+       struct service_hndl *service;
+       struct list_head *list_itr;
+
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
+                       pr_err("QAT: Failed to restart service %s.\n",
+                              service->name);
+       }
+       list_for_each(list_itr, &service_table) {
+               service = list_entry(list_itr, struct service_hndl, list);
+               if (!service->admin)
+                       continue;
+               if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED))
+                       pr_err("QAT: Failed to restart service %s.\n",
+                              service->name);
+       }
+       return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
new file mode 100644 (file)
index 0000000..5f3fa45
--- /dev/null
@@ -0,0 +1,567 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/delay.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+#include "adf_cfg.h"
+#include "adf_common_drv.h"
+
+static inline uint32_t adf_modulo(uint32_t data, uint32_t shift)
+{
+       uint32_t div = data >> shift;
+       uint32_t mult = div << shift;
+
+       return data - mult;
+}
+
+static inline int adf_check_ring_alignment(uint64_t addr, uint64_t size)
+{
+       if (((size - 1) & addr) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static int adf_verify_ring_size(uint32_t msg_size, uint32_t msg_num)
+{
+       int i = ADF_MIN_RING_SIZE;
+
+       for (; i <= ADF_MAX_RING_SIZE; i++)
+               if ((msg_size * msg_num) == ADF_SIZE_TO_RING_SIZE_IN_BYTES(i))
+                       return i;
+
+       return ADF_DEFAULT_RING_SIZE;
+}
+
+static int adf_reserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+       spin_lock(&bank->lock);
+       if (bank->ring_mask & (1 << ring)) {
+               spin_unlock(&bank->lock);
+               return -EFAULT;
+       }
+       bank->ring_mask |= (1 << ring);
+       spin_unlock(&bank->lock);
+       return 0;
+}
+
+static void adf_unreserve_ring(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+       spin_lock(&bank->lock);
+       bank->ring_mask &= ~(1 << ring);
+       spin_unlock(&bank->lock);
+}
+
+static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+       spin_lock_bh(&bank->lock);
+       bank->irq_mask |= (1 << ring);
+       spin_unlock_bh(&bank->lock);
+       WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
+       WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
+                             bank->irq_coalesc_timer);
+}
+
+static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, uint32_t ring)
+{
+       spin_lock_bh(&bank->lock);
+       bank->irq_mask &= ~(1 << ring);
+       spin_unlock_bh(&bank->lock);
+       WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
+}
+
+int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg)
+{
+       if (atomic_add_return(1, ring->inflights) >
+           ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
+               atomic_dec(ring->inflights);
+               return -EAGAIN;
+       }
+       spin_lock_bh(&ring->lock);
+       memcpy(ring->base_addr + ring->tail, msg,
+              ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+
+       ring->tail = adf_modulo(ring->tail +
+                               ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+                               ADF_RING_SIZE_MODULO(ring->ring_size));
+       WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
+                           ring->ring_number, ring->tail);
+       spin_unlock_bh(&ring->lock);
+       return 0;
+}
+
+static int adf_handle_response(struct adf_etr_ring_data *ring)
+{
+       uint32_t msg_counter = 0;
+       uint32_t *msg = (uint32_t *)(ring->base_addr + ring->head);
+
+       while (*msg != ADF_RING_EMPTY_SIG) {
+               ring->callback((uint32_t *)msg);
+               *msg = ADF_RING_EMPTY_SIG;
+               ring->head = adf_modulo(ring->head +
+                                       ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
+                                       ADF_RING_SIZE_MODULO(ring->ring_size));
+               msg_counter++;
+               msg = (uint32_t *)(ring->base_addr + ring->head);
+       }
+       if (msg_counter > 0) {
+               WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
+                                   ring->bank->bank_number,
+                                   ring->ring_number, ring->head);
+               atomic_sub(msg_counter, ring->inflights);
+       }
+       return 0;
+}
+
+static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
+{
+       uint32_t ring_config = BUILD_RING_CONFIG(ring->ring_size);
+
+       WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
+                             ring->ring_number, ring_config);
+}
+
+static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
+{
+       uint32_t ring_config =
+                       BUILD_RESP_RING_CONFIG(ring->ring_size,
+                                              ADF_RING_NEAR_WATERMARK_512,
+                                              ADF_RING_NEAR_WATERMARK_0);
+
+       WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
+                             ring->ring_number, ring_config);
+}
+
+static int adf_init_ring(struct adf_etr_ring_data *ring)
+{
+       struct adf_etr_bank_data *bank = ring->bank;
+       struct adf_accel_dev *accel_dev = bank->accel_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       uint64_t ring_base;
+       uint32_t ring_size_bytes =
+                       ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+
+       ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+       ring->base_addr = dma_alloc_coherent(&GET_DEV(accel_dev),
+                                            ring_size_bytes, &ring->dma_addr,
+                                            GFP_KERNEL);
+       if (!ring->base_addr)
+               return -ENOMEM;
+
+       memset(ring->base_addr, 0x7F, ring_size_bytes);
+       /* The base_addr has to be aligned to the size of the buffer */
+       if (adf_check_ring_alignment(ring->dma_addr, ring_size_bytes)) {
+               pr_err("QAT: Ring address not aligned\n");
+               dma_free_coherent(&GET_DEV(accel_dev), ring_size_bytes,
+                                 ring->base_addr, ring->dma_addr);
+               return -EFAULT;
+       }
+
+       if (hw_data->tx_rings_mask & (1 << ring->ring_number))
+               adf_configure_tx_ring(ring);
+
+       else
+               adf_configure_rx_ring(ring);
+
+       ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
+       WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
+                           ring->ring_number, ring_base);
+       spin_lock_init(&ring->lock);
+       return 0;
+}
+
+static void adf_cleanup_ring(struct adf_etr_ring_data *ring)
+{
+       uint32_t ring_size_bytes =
+                       ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
+       ring_size_bytes = ADF_RING_SIZE_BYTES_MIN(ring_size_bytes);
+
+       if (ring->base_addr) {
+               memset(ring->base_addr, 0x7F, ring_size_bytes);
+               dma_free_coherent(&GET_DEV(ring->bank->accel_dev),
+                                 ring_size_bytes, ring->base_addr,
+                                 ring->dma_addr);
+       }
+}
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+                   uint32_t bank_num, uint32_t num_msgs,
+                   uint32_t msg_size, const char *ring_name,
+                   adf_callback_fn callback, int poll_mode,
+                   struct adf_etr_ring_data **ring_ptr)
+{
+       struct adf_etr_data *transport_data = accel_dev->transport;
+       struct adf_etr_bank_data *bank;
+       struct adf_etr_ring_data *ring;
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+       uint32_t ring_num;
+       int ret;
+
+       if (bank_num >= GET_MAX_BANKS(accel_dev)) {
+               pr_err("QAT: Invalid bank number\n");
+               return -EFAULT;
+       }
+       if (msg_size > ADF_MSG_SIZE_TO_BYTES(ADF_MAX_MSG_SIZE)) {
+               pr_err("QAT: Invalid msg size\n");
+               return -EFAULT;
+       }
+       if (ADF_MAX_INFLIGHTS(adf_verify_ring_size(msg_size, num_msgs),
+                             ADF_BYTES_TO_MSG_SIZE(msg_size)) < 2) {
+               pr_err("QAT: Invalid ring size for given msg size\n");
+               return -EFAULT;
+       }
+       if (adf_cfg_get_param_value(accel_dev, section, ring_name, val)) {
+               pr_err("QAT: Section %s, no such entry : %s\n",
+                      section, ring_name);
+               return -EFAULT;
+       }
+       if (kstrtouint(val, 10, &ring_num)) {
+               pr_err("QAT: Can't get ring number\n");
+               return -EFAULT;
+       }
+
+       bank = &transport_data->banks[bank_num];
+       if (adf_reserve_ring(bank, ring_num)) {
+               pr_err("QAT: Ring %d, %s already exists.\n",
+                      ring_num, ring_name);
+               return -EFAULT;
+       }
+       ring = &bank->rings[ring_num];
+       ring->ring_number = ring_num;
+       ring->bank = bank;
+       ring->callback = callback;
+       ring->msg_size = ADF_BYTES_TO_MSG_SIZE(msg_size);
+       ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
+       ring->head = 0;
+       ring->tail = 0;
+       atomic_set(ring->inflights, 0);
+       ret = adf_init_ring(ring);
+       if (ret)
+               goto err;
+
+       /* Enable HW arbitration for the given ring */
+       accel_dev->hw_device->hw_arb_ring_enable(ring);
+
+       if (adf_ring_debugfs_add(ring, ring_name)) {
+               pr_err("QAT: Couldn't add ring debugfs entry\n");
+               ret = -EFAULT;
+               goto err;
+       }
+
+       /* Enable interrupts if needed */
+       if (callback && (!poll_mode))
+               adf_enable_ring_irq(bank, ring->ring_number);
+       *ring_ptr = ring;
+       return 0;
+err:
+       adf_cleanup_ring(ring);
+       adf_unreserve_ring(bank, ring_num);
+       accel_dev->hw_device->hw_arb_ring_disable(ring);
+       return ret;
+}
+
+void adf_remove_ring(struct adf_etr_ring_data *ring)
+{
+       struct adf_etr_bank_data *bank = ring->bank;
+       struct adf_accel_dev *accel_dev = bank->accel_dev;
+
+       /* Disable interrupts for the given ring */
+       adf_disable_ring_irq(bank, ring->ring_number);
+
+       /* Clear PCI config space */
+       WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
+                             ring->ring_number, 0);
+       WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
+                           ring->ring_number, 0);
+       adf_ring_debugfs_rm(ring);
+       adf_unreserve_ring(bank, ring->ring_number);
+       /* Disable HW arbitration for the given ring */
+       accel_dev->hw_device->hw_arb_ring_disable(ring);
+       adf_cleanup_ring(ring);
+}
+
+static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
+{
+       uint32_t empty_rings, i;
+
+       empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
+       empty_rings = ~empty_rings & bank->irq_mask;
+
+       for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
+               if (empty_rings & (1 << i))
+                       adf_handle_response(&bank->rings[i]);
+       }
+}
+
+/**
+ * adf_response_handler() - Bottom half handler response handler
+ * @bank_addr:  Address of a ring bank for with the BH was scheduled.
+ *
+ * Function is the bottom half handler for the response from acceleration
+ * device. There is one handler for every ring bank. Function checks all
+ * communication rings in the bank.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_response_handler(unsigned long bank_addr)
+{
+       struct adf_etr_bank_data *bank = (void *)bank_addr;
+
+       /* Handle all the responses nad reenable IRQs */
+       adf_ring_response_handler(bank);
+       WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
+                                  bank->irq_mask);
+}
+EXPORT_SYMBOL_GPL(adf_response_handler);
+
+static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
+                                 const char *section, const char *format,
+                                 uint32_t key, uint32_t *value)
+{
+       char key_buf[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val_buf[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+       snprintf(key_buf, ADF_CFG_MAX_KEY_LEN_IN_BYTES, format, key);
+
+       if (adf_cfg_get_param_value(accel_dev, section, key_buf, val_buf))
+               return -EFAULT;
+
+       if (kstrtouint(val_buf, 10, value))
+               return -EFAULT;
+       return 0;
+}
+
+static void adf_enable_coalesc(struct adf_etr_bank_data *bank,
+                              const char *section, uint32_t bank_num_in_accel)
+{
+       if (adf_get_cfg_int(bank->accel_dev, section,
+                           ADF_ETRMGR_COALESCE_TIMER_FORMAT,
+                           bank_num_in_accel, &bank->irq_coalesc_timer))
+               bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+
+       if (ADF_COALESCING_MAX_TIME < bank->irq_coalesc_timer ||
+           ADF_COALESCING_MIN_TIME > bank->irq_coalesc_timer)
+               bank->irq_coalesc_timer = ADF_COALESCING_DEF_TIME;
+}
+
+static int adf_init_bank(struct adf_accel_dev *accel_dev,
+                        struct adf_etr_bank_data *bank,
+                        uint32_t bank_num, void __iomem *csr_addr)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_etr_ring_data *ring;
+       struct adf_etr_ring_data *tx_ring;
+       uint32_t i, coalesc_enabled;
+
+       memset(bank, 0, sizeof(*bank));
+       bank->bank_number = bank_num;
+       bank->csr_addr = csr_addr;
+       bank->accel_dev = accel_dev;
+       spin_lock_init(&bank->lock);
+
+       /* Enable IRQ coalescing always. This will allow to use
+        * the optimised flag and coalesc register.
+        * If it is disabled in the config file just use min time value */
+       if (adf_get_cfg_int(accel_dev, "Accelerator0",
+                           ADF_ETRMGR_COALESCING_ENABLED_FORMAT,
+                           bank_num, &coalesc_enabled) && coalesc_enabled)
+               adf_enable_coalesc(bank, "Accelerator0", bank_num);
+       else
+               bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
+
+       for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+               WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
+               WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
+               ring = &bank->rings[i];
+               if (hw_data->tx_rings_mask & (1 << i)) {
+                       ring->inflights = kzalloc_node(sizeof(atomic_t),
+                                                      GFP_KERNEL,
+                                                      accel_dev->numa_node);
+                       if (!ring->inflights)
+                               goto err;
+               } else {
+                       if (i < hw_data->tx_rx_gap) {
+                               pr_err("QAT: Invalid tx rings mask config\n");
+                               goto err;
+                       }
+                       tx_ring = &bank->rings[i - hw_data->tx_rx_gap];
+                       ring->inflights = tx_ring->inflights;
+               }
+       }
+       if (adf_bank_debugfs_add(bank)) {
+               pr_err("QAT: Failed to add bank debugfs entry\n");
+               goto err;
+       }
+
+       WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
+       return 0;
+err:
+       for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+               ring = &bank->rings[i];
+               if (hw_data->tx_rings_mask & (1 << i) && ring->inflights)
+                       kfree(ring->inflights);
+       }
+       return -ENOMEM;
+}
+
+/**
+ * adf_init_etr_data() - Initialize transport rings for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function is the initializes the communications channels (rings) to the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_init_etr_data(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *etr_data;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       void __iomem *csr_addr;
+       uint32_t size;
+       uint32_t num_banks = 0;
+       int i, ret;
+
+       etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
+                               accel_dev->numa_node);
+       if (!etr_data)
+               return -ENOMEM;
+
+       num_banks = GET_MAX_BANKS(accel_dev);
+       size = num_banks * sizeof(struct adf_etr_bank_data);
+       etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node);
+       if (!etr_data->banks) {
+               ret = -ENOMEM;
+               goto err_bank;
+       }
+
+       accel_dev->transport = etr_data;
+       i = hw_data->get_etr_bar_id(hw_data);
+       csr_addr = accel_dev->accel_pci_dev.pci_bars[i].virt_addr;
+
+       /* accel_dev->debugfs_dir should always be non-NULL here */
+       etr_data->debug = debugfs_create_dir("transport",
+                                            accel_dev->debugfs_dir);
+       if (!etr_data->debug) {
+               pr_err("QAT: Unable to create transport debugfs entry\n");
+               ret = -ENOENT;
+               goto err_bank_debug;
+       }
+
+       for (i = 0; i < num_banks; i++) {
+               ret = adf_init_bank(accel_dev, &etr_data->banks[i], i,
+                                   csr_addr);
+               if (ret)
+                       goto err_bank_all;
+       }
+
+       return 0;
+
+err_bank_all:
+       debugfs_remove(etr_data->debug);
+err_bank_debug:
+       kfree(etr_data->banks);
+err_bank:
+       kfree(etr_data);
+       accel_dev->transport = NULL;
+       return ret;
+}
+EXPORT_SYMBOL_GPL(adf_init_etr_data);
+
+static void cleanup_bank(struct adf_etr_bank_data *bank)
+{
+       uint32_t i;
+
+       for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+               struct adf_accel_dev *accel_dev = bank->accel_dev;
+               struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+               struct adf_etr_ring_data *ring = &bank->rings[i];
+
+               if (bank->ring_mask & (1 << i))
+                       adf_cleanup_ring(ring);
+
+               if (hw_data->tx_rings_mask & (1 << i))
+                       kfree(ring->inflights);
+       }
+       adf_bank_debugfs_rm(bank);
+       memset(bank, 0, sizeof(*bank));
+}
+
+static void adf_cleanup_etr_handles(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *etr_data = accel_dev->transport;
+       uint32_t i, num_banks = GET_MAX_BANKS(accel_dev);
+
+       for (i = 0; i < num_banks; i++)
+               cleanup_bank(&etr_data->banks[i]);
+}
+
+/**
+ * adf_cleanup_etr_data() - Clear transport rings for acceleration device
+ * @accel_dev:  Pointer to acceleration device.
+ *
+ * Function is the clears the communications channels (rings) of the
+ * acceleration device accel_dev.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: void
+ */
+void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *etr_data = accel_dev->transport;
+
+       if (etr_data) {
+               adf_cleanup_etr_handles(accel_dev);
+               debugfs_remove(etr_data->debug);
+               kfree(etr_data->banks);
+               kfree(etr_data);
+               accel_dev->transport = NULL;
+       }
+}
+EXPORT_SYMBOL_GPL(adf_cleanup_etr_data);
diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h
new file mode 100644 (file)
index 0000000..386485b
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_H
+#define ADF_TRANSPORT_H
+
+#include "adf_accel_devices.h"
+
+struct adf_etr_ring_data;
+
+typedef void (*adf_callback_fn)(void *resp_msg);
+
+int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
+                   uint32_t bank_num, uint32_t num_mgs, uint32_t msg_size,
+                   const char *ring_name, adf_callback_fn callback,
+                   int poll_mode, struct adf_etr_ring_data **ring_ptr);
+
+int adf_send_message(struct adf_etr_ring_data *ring, uint32_t *msg);
+void adf_remove_ring(struct adf_etr_ring_data *ring);
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_access_macros.h b/drivers/crypto/qat/qat_common/adf_transport_access_macros.h
new file mode 100644 (file)
index 0000000..91d88d6
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_ACCESS_MACROS_H
+#define ADF_TRANSPORT_ACCESS_MACROS_H
+
+#include "adf_accel_devices.h"
+#define ADF_BANK_INT_SRC_SEL_MASK_0 0x4444444CUL
+#define ADF_BANK_INT_SRC_SEL_MASK_X 0x44444444UL
+#define ADF_RING_CSR_RING_CONFIG 0x000
+#define ADF_RING_CSR_RING_LBASE 0x040
+#define ADF_RING_CSR_RING_UBASE 0x080
+#define ADF_RING_CSR_RING_HEAD 0x0C0
+#define ADF_RING_CSR_RING_TAIL 0x100
+#define ADF_RING_CSR_E_STAT 0x14C
+#define ADF_RING_CSR_INT_SRCSEL 0x174
+#define ADF_RING_CSR_INT_SRCSEL_2 0x178
+#define ADF_RING_CSR_INT_COL_EN 0x17C
+#define ADF_RING_CSR_INT_COL_CTL 0x180
+#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
+#define ADF_RING_CSR_INT_COL_CTL_ENABLE        0x80000000
+#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_RING_CONFIG_NEAR_FULL_WM 0x0A
+#define ADF_RING_CONFIG_NEAR_EMPTY_WM 0x05
+#define ADF_COALESCING_MIN_TIME 0x1FF
+#define ADF_COALESCING_MAX_TIME 0xFFFFF
+#define ADF_COALESCING_DEF_TIME 0x27FF
+#define ADF_RING_NEAR_WATERMARK_512 0x08
+#define ADF_RING_NEAR_WATERMARK_0 0x00
+#define ADF_RING_EMPTY_SIG 0x7F7F7F7F
+
+/* Valid internal ring size values */
+#define ADF_RING_SIZE_128 0x01
+#define ADF_RING_SIZE_256 0x02
+#define ADF_RING_SIZE_512 0x03
+#define ADF_RING_SIZE_4K 0x06
+#define ADF_RING_SIZE_16K 0x08
+#define ADF_RING_SIZE_4M 0x10
+#define ADF_MIN_RING_SIZE ADF_RING_SIZE_128
+#define ADF_MAX_RING_SIZE ADF_RING_SIZE_4M
+#define ADF_DEFAULT_RING_SIZE ADF_RING_SIZE_16K
+
+/* Valid internal msg size values internal */
+#define ADF_MSG_SIZE_32 0x01
+#define ADF_MSG_SIZE_64 0x02
+#define ADF_MSG_SIZE_128 0x04
+#define ADF_MIN_MSG_SIZE ADF_MSG_SIZE_32
+#define ADF_MAX_MSG_SIZE ADF_MSG_SIZE_128
+
+/* Size to bytes conversion macros for ring and msg values */
+#define ADF_MSG_SIZE_TO_BYTES(SIZE) (SIZE << 5)
+#define ADF_BYTES_TO_MSG_SIZE(SIZE) (SIZE >> 5)
+#define ADF_SIZE_TO_RING_SIZE_IN_BYTES(SIZE) ((1 << (SIZE - 1)) << 7)
+#define ADF_RING_SIZE_IN_BYTES_TO_SIZE(SIZE) ((1 << (SIZE - 1)) >> 7)
+
+/* Minimum ring bufer size for memory allocation */
+#define ADF_RING_SIZE_BYTES_MIN(SIZE) ((SIZE < ADF_RING_SIZE_4K) ? \
+                               ADF_RING_SIZE_4K : SIZE)
+#define ADF_RING_SIZE_MODULO(SIZE) (SIZE + 0x6)
+#define ADF_MAX_INFLIGHTS(RING_SIZE, MSG_SIZE) \
+       ((((1 << (RING_SIZE - 1)) << 4) >> MSG_SIZE) - 1)
+#define BUILD_RING_CONFIG(size)        \
+       ((ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_FULL_WM) \
+       | (ADF_RING_NEAR_WATERMARK_0 << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+       | size)
+#define BUILD_RESP_RING_CONFIG(size, watermark_nf, watermark_ne) \
+       ((watermark_nf << ADF_RING_CONFIG_NEAR_FULL_WM) \
+       | (watermark_ne << ADF_RING_CONFIG_NEAR_EMPTY_WM) \
+       | size)
+#define BUILD_RING_BASE_ADDR(addr, size) \
+       ((addr >> 6) & (0xFFFFFFFFFFFFFFFFULL << size))
+#define READ_CSR_RING_HEAD(csr_base_addr, bank, ring) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_RING_HEAD + (ring << 2))
+#define READ_CSR_RING_TAIL(csr_base_addr, bank, ring) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_RING_TAIL + (ring << 2))
+#define READ_CSR_E_STAT(csr_base_addr, bank) \
+       ADF_CSR_RD(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_E_STAT)
+#define WRITE_CSR_RING_CONFIG(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_CONFIG + (ring << 2), value)
+#define WRITE_CSR_RING_BASE(csr_base_addr, bank, ring, value) \
+do { \
+       uint32_t l_base = 0, u_base = 0; \
+       l_base = (uint32_t)(value & 0xFFFFFFFF); \
+       u_base = (uint32_t)((value & 0xFFFFFFFF00000000ULL) >> 32); \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_LBASE + (ring << 2), l_base); \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_UBASE + (ring << 2), u_base); \
+} while (0)
+#define WRITE_CSR_RING_HEAD(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_HEAD + (ring << 2), value)
+#define WRITE_CSR_RING_TAIL(csr_base_addr, bank, ring, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+               ADF_RING_CSR_RING_TAIL + (ring << 2), value)
+#define WRITE_CSR_INT_SRCSEL(csr_base_addr, bank) \
+do { \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+       ADF_RING_CSR_INT_SRCSEL, ADF_BANK_INT_SRC_SEL_MASK_0);  \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+       ADF_RING_CSR_INT_SRCSEL_2, ADF_BANK_INT_SRC_SEL_MASK_X); \
+} while (0)
+#define WRITE_CSR_INT_COL_EN(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_INT_COL_EN, value)
+#define WRITE_CSR_INT_COL_CTL(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_INT_COL_CTL, \
+                       ADF_RING_CSR_INT_COL_CTL_ENABLE | value)
+#define WRITE_CSR_INT_FLAG_AND_COL(csr_base_addr, bank, value) \
+       ADF_CSR_WR(csr_base_addr, (ADF_RING_BUNDLE_SIZE * bank) + \
+                       ADF_RING_CSR_INT_FLAG_AND_COL, value)
+#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport_debug.c b/drivers/crypto/qat/qat_common/adf_transport_debug.c
new file mode 100644 (file)
index 0000000..6b69745
--- /dev/null
@@ -0,0 +1,304 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include "adf_accel_devices.h"
+#include "adf_transport_internal.h"
+#include "adf_transport_access_macros.h"
+
+static DEFINE_MUTEX(ring_read_lock);
+static DEFINE_MUTEX(bank_read_lock);
+
+static void *adf_ring_start(struct seq_file *sfile, loff_t *pos)
+{
+       struct adf_etr_ring_data *ring = sfile->private;
+
+       mutex_lock(&ring_read_lock);
+       if (*pos == 0)
+               return SEQ_START_TOKEN;
+
+       if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+                    ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+               return NULL;
+
+       return ring->base_addr +
+               (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static void *adf_ring_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+       struct adf_etr_ring_data *ring = sfile->private;
+
+       if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) /
+                    ADF_MSG_SIZE_TO_BYTES(ring->msg_size)))
+               return NULL;
+
+       return ring->base_addr +
+               (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++);
+}
+
+static int adf_ring_show(struct seq_file *sfile, void *v)
+{
+       struct adf_etr_ring_data *ring = sfile->private;
+       struct adf_etr_bank_data *bank = ring->bank;
+       uint32_t *msg = v;
+       void __iomem *csr = ring->bank->csr_addr;
+       int i, x;
+
+       if (v == SEQ_START_TOKEN) {
+               int head, tail, empty;
+
+               head = READ_CSR_RING_HEAD(csr, bank->bank_number,
+                                         ring->ring_number);
+               tail = READ_CSR_RING_TAIL(csr, bank->bank_number,
+                                         ring->ring_number);
+               empty = READ_CSR_E_STAT(csr, bank->bank_number);
+
+               seq_puts(sfile, "------- Ring configuration -------\n");
+               seq_printf(sfile, "ring num %d, bank num %d\n",
+                          ring->ring_number, ring->bank->bank_number);
+               seq_printf(sfile, "head %x, tail %x, empty: %d\n",
+                          head, tail, (empty & 1 << ring->ring_number)
+                          >> ring->ring_number);
+               seq_printf(sfile, "ring size %d, msg size %d\n",
+                          ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size),
+                          ADF_MSG_SIZE_TO_BYTES(ring->msg_size));
+               seq_puts(sfile, "----------- Ring data ------------\n");
+               return 0;
+       }
+       seq_printf(sfile, "%p:", msg);
+       x = 0;
+       i = 0;
+       for (; i < (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2); i++) {
+               seq_printf(sfile, " %08X", *(msg + i));
+               if ((ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2) != i + 1 &&
+                   (++x == 8)) {
+                       seq_printf(sfile, "\n%p:", msg + i + 1);
+                       x = 0;
+               }
+       }
+       seq_puts(sfile, "\n");
+       return 0;
+}
+
+static void adf_ring_stop(struct seq_file *sfile, void *v)
+{
+       mutex_unlock(&ring_read_lock);
+}
+
+static const struct seq_operations adf_ring_sops = {
+       .start = adf_ring_start,
+       .next = adf_ring_next,
+       .stop = adf_ring_stop,
+       .show = adf_ring_show
+};
+
+static int adf_ring_open(struct inode *inode, struct file *file)
+{
+       int ret = seq_open(file, &adf_ring_sops);
+
+       if (!ret) {
+               struct seq_file *seq_f = file->private_data;
+
+               seq_f->private = inode->i_private;
+       }
+       return ret;
+}
+
+static const struct file_operations adf_ring_debug_fops = {
+       .open = adf_ring_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release
+};
+
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name)
+{
+       struct adf_etr_ring_debug_entry *ring_debug;
+       char entry_name[8];
+
+       ring_debug = kzalloc(sizeof(*ring_debug), GFP_KERNEL);
+       if (!ring_debug)
+               return -ENOMEM;
+
+       strlcpy(ring_debug->ring_name, name, sizeof(ring_debug->ring_name));
+       snprintf(entry_name, sizeof(entry_name), "ring_%02d",
+                ring->ring_number);
+
+       ring_debug->debug = debugfs_create_file(entry_name, S_IRUSR,
+                                               ring->bank->bank_debug_dir,
+                                               ring, &adf_ring_debug_fops);
+       if (!ring_debug->debug) {
+               pr_err("QAT: Failed to create ring debug entry.\n");
+               kfree(ring_debug);
+               return -EFAULT;
+       }
+       ring->ring_debug = ring_debug;
+       return 0;
+}
+
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring)
+{
+       if (ring->ring_debug) {
+               debugfs_remove(ring->ring_debug->debug);
+               kfree(ring->ring_debug);
+               ring->ring_debug = NULL;
+       }
+}
+
+static void *adf_bank_start(struct seq_file *sfile, loff_t *pos)
+{
+       mutex_lock(&bank_read_lock);
+       if (*pos == 0)
+               return SEQ_START_TOKEN;
+
+       if (*pos >= ADF_ETR_MAX_RINGS_PER_BANK)
+               return NULL;
+
+       return pos;
+}
+
+static void *adf_bank_next(struct seq_file *sfile, void *v, loff_t *pos)
+{
+       if (++(*pos) >= ADF_ETR_MAX_RINGS_PER_BANK)
+               return NULL;
+
+       return pos;
+}
+
+static int adf_bank_show(struct seq_file *sfile, void *v)
+{
+       struct adf_etr_bank_data *bank = sfile->private;
+
+       if (v == SEQ_START_TOKEN) {
+               seq_printf(sfile, "------- Bank %d configuration -------\n",
+                          bank->bank_number);
+       } else {
+               int ring_id = *((int *)v) - 1;
+               struct adf_etr_ring_data *ring = &bank->rings[ring_id];
+               void __iomem *csr = bank->csr_addr;
+               int head, tail, empty;
+
+               if (!(bank->ring_mask & 1 << ring_id))
+                       return 0;
+
+               head = READ_CSR_RING_HEAD(csr, bank->bank_number,
+                                         ring->ring_number);
+               tail = READ_CSR_RING_TAIL(csr, bank->bank_number,
+                                         ring->ring_number);
+               empty = READ_CSR_E_STAT(csr, bank->bank_number);
+
+               seq_printf(sfile,
+                          "ring num %02d, head %04x, tail %04x, empty: %d\n",
+                          ring->ring_number, head, tail,
+                          (empty & 1 << ring->ring_number) >>
+                          ring->ring_number);
+       }
+       return 0;
+}
+
+static void adf_bank_stop(struct seq_file *sfile, void *v)
+{
+       mutex_unlock(&bank_read_lock);
+}
+
+static const struct seq_operations adf_bank_sops = {
+       .start = adf_bank_start,
+       .next = adf_bank_next,
+       .stop = adf_bank_stop,
+       .show = adf_bank_show
+};
+
+static int adf_bank_open(struct inode *inode, struct file *file)
+{
+       int ret = seq_open(file, &adf_bank_sops);
+
+       if (!ret) {
+               struct seq_file *seq_f = file->private_data;
+
+               seq_f->private = inode->i_private;
+       }
+       return ret;
+}
+
+static const struct file_operations adf_bank_debug_fops = {
+       .open = adf_bank_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = seq_release
+};
+
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+       struct adf_accel_dev *accel_dev = bank->accel_dev;
+       struct dentry *parent = accel_dev->transport->debug;
+       char name[8];
+
+       snprintf(name, sizeof(name), "bank_%02d", bank->bank_number);
+       bank->bank_debug_dir = debugfs_create_dir(name, parent);
+       if (!bank->bank_debug_dir) {
+               pr_err("QAT: Failed to create bank debug dir.\n");
+               return -EFAULT;
+       }
+
+       bank->bank_debug_cfg = debugfs_create_file("config", S_IRUSR,
+                                                  bank->bank_debug_dir, bank,
+                                                  &adf_bank_debug_fops);
+       if (!bank->bank_debug_cfg) {
+               pr_err("QAT: Failed to create bank debug entry.\n");
+               debugfs_remove(bank->bank_debug_dir);
+               return -EFAULT;
+       }
+       return 0;
+}
+
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank)
+{
+       debugfs_remove(bank->bank_debug_cfg);
+       debugfs_remove(bank->bank_debug_dir);
+}
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
new file mode 100644 (file)
index 0000000..f854bac
--- /dev/null
@@ -0,0 +1,118 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_TRANSPORT_INTRN_H
+#define ADF_TRANSPORT_INTRN_H
+
+#include <linux/interrupt.h>
+#include <linux/atomic.h>
+#include <linux/spinlock_types.h>
+#include "adf_transport.h"
+
+struct adf_etr_ring_debug_entry {
+       char ring_name[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       struct dentry *debug;
+};
+
+struct adf_etr_ring_data {
+       void *base_addr;
+       atomic_t *inflights;
+       spinlock_t lock;        /* protects ring data struct */
+       adf_callback_fn callback;
+       struct adf_etr_bank_data *bank;
+       dma_addr_t dma_addr;
+       uint16_t head;
+       uint16_t tail;
+       uint8_t ring_number;
+       uint8_t ring_size;
+       uint8_t msg_size;
+       uint8_t reserved;
+       struct adf_etr_ring_debug_entry *ring_debug;
+} __packed;
+
+struct adf_etr_bank_data {
+       struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK];
+       struct tasklet_struct resp_hanlder;
+       void __iomem *csr_addr;
+       struct adf_accel_dev *accel_dev;
+       uint32_t irq_coalesc_timer;
+       uint16_t ring_mask;
+       uint16_t irq_mask;
+       spinlock_t lock;        /* protects bank data struct */
+       struct dentry *bank_debug_dir;
+       struct dentry *bank_debug_cfg;
+       uint32_t bank_number;
+} __packed;
+
+struct adf_etr_data {
+       struct adf_etr_bank_data *banks;
+       struct dentry *debug;
+};
+
+void adf_response_handler(unsigned long bank_addr);
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+int adf_bank_debugfs_add(struct adf_etr_bank_data *bank);
+void adf_bank_debugfs_rm(struct adf_etr_bank_data *bank);
+int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name);
+void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring);
+#else
+static inline int adf_bank_debugfs_add(struct adf_etr_bank_data *bank)
+{
+       return 0;
+}
+
+#define adf_bank_debugfs_rm(bank) do {} while (0)
+
+static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring,
+                                      const char *name)
+{
+       return 0;
+}
+
+#define adf_ring_debugfs_rm(ring) do {} while (0)
+#endif
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw.h b/drivers/crypto/qat/qat_common/icp_qat_fw.h
new file mode 100644 (file)
index 0000000..f1e30e2
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_H_
+#define _ICP_QAT_FW_H_
+#include <linux/types.h>
+#include "icp_qat_hw.h"
+
+#define QAT_FIELD_SET(flags, val, bitpos, mask) \
+{ (flags) = (((flags) & (~((mask) << (bitpos)))) | \
+               (((val) & (mask)) << (bitpos))) ; }
+
+#define QAT_FIELD_GET(flags, bitpos, mask) \
+       (((flags) >> (bitpos)) & (mask))
+
+#define ICP_QAT_FW_REQ_DEFAULT_SZ 128
+#define ICP_QAT_FW_RESP_DEFAULT_SZ 32
+#define ICP_QAT_FW_COMN_ONE_BYTE_SHIFT 8
+#define ICP_QAT_FW_COMN_SINGLE_BYTE_MASK 0xFF
+#define ICP_QAT_FW_NUM_LONGWORDS_1 1
+#define ICP_QAT_FW_NUM_LONGWORDS_2 2
+#define ICP_QAT_FW_NUM_LONGWORDS_3 3
+#define ICP_QAT_FW_NUM_LONGWORDS_4 4
+#define ICP_QAT_FW_NUM_LONGWORDS_5 5
+#define ICP_QAT_FW_NUM_LONGWORDS_6 6
+#define ICP_QAT_FW_NUM_LONGWORDS_7 7
+#define ICP_QAT_FW_NUM_LONGWORDS_10 10
+#define ICP_QAT_FW_NUM_LONGWORDS_13 13
+#define ICP_QAT_FW_NULL_REQ_SERV_ID 1
+
+enum icp_qat_fw_comn_resp_serv_id {
+       ICP_QAT_FW_COMN_RESP_SERV_NULL,
+       ICP_QAT_FW_COMN_RESP_SERV_CPM_FW,
+       ICP_QAT_FW_COMN_RESP_SERV_DELIMITER
+};
+
+enum icp_qat_fw_comn_request_id {
+       ICP_QAT_FW_COMN_REQ_NULL = 0,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_PKE = 3,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_LA = 4,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_DMA = 7,
+       ICP_QAT_FW_COMN_REQ_CPM_FW_COMP = 9,
+       ICP_QAT_FW_COMN_REQ_DELIMITER
+};
+
+struct icp_qat_fw_comn_req_hdr_cd_pars {
+       union {
+               struct {
+                       uint64_t content_desc_addr;
+                       uint16_t content_desc_resrvd1;
+                       uint8_t content_desc_params_sz;
+                       uint8_t content_desc_hdr_resrvd2;
+                       uint32_t content_desc_resrvd3;
+               } s;
+               struct {
+                       uint32_t serv_specif_fields[4];
+               } s1;
+       } u;
+};
+
+struct icp_qat_fw_comn_req_mid {
+       uint64_t opaque_data;
+       uint64_t src_data_addr;
+       uint64_t dest_data_addr;
+       uint32_t src_length;
+       uint32_t dst_length;
+};
+
+struct icp_qat_fw_comn_req_cd_ctrl {
+       uint32_t content_desc_ctrl_lw[ICP_QAT_FW_NUM_LONGWORDS_5];
+};
+
+struct icp_qat_fw_comn_req_hdr {
+       uint8_t resrvd1;
+       uint8_t service_cmd_id;
+       uint8_t service_type;
+       uint8_t hdr_flags;
+       uint16_t serv_specif_flags;
+       uint16_t comn_req_flags;
+};
+
+struct icp_qat_fw_comn_req_rqpars {
+       uint32_t serv_specif_rqpars_lw[ICP_QAT_FW_NUM_LONGWORDS_13];
+};
+
+struct icp_qat_fw_comn_req {
+       struct icp_qat_fw_comn_req_hdr comn_hdr;
+       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+       struct icp_qat_fw_comn_req_mid comn_mid;
+       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+struct icp_qat_fw_comn_error {
+       uint8_t xlat_err_code;
+       uint8_t cmp_err_code;
+};
+
+struct icp_qat_fw_comn_resp_hdr {
+       uint8_t resrvd1;
+       uint8_t service_id;
+       uint8_t response_type;
+       uint8_t hdr_flags;
+       struct icp_qat_fw_comn_error comn_error;
+       uint8_t comn_status;
+       uint8_t cmd_id;
+};
+
+struct icp_qat_fw_comn_resp {
+       struct icp_qat_fw_comn_resp_hdr comn_hdr;
+       uint64_t opaque_data;
+       uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_COMN_REQ_FLAG_SET 1
+#define ICP_QAT_FW_COMN_REQ_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_VALID_FLAG_BITPOS 7
+#define ICP_QAT_FW_COMN_VALID_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK 0x7F
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_GET(icp_qat_fw_comn_req_hdr_t) \
+       icp_qat_fw_comn_req_hdr_t.service_type
+
+#define ICP_QAT_FW_COMN_OV_SRV_TYPE_SET(icp_qat_fw_comn_req_hdr_t, val) \
+       icp_qat_fw_comn_req_hdr_t.service_type = val
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_GET(icp_qat_fw_comn_req_hdr_t) \
+       icp_qat_fw_comn_req_hdr_t.service_cmd_id
+
+#define ICP_QAT_FW_COMN_OV_SRV_CMD_ID_SET(icp_qat_fw_comn_req_hdr_t, val) \
+       icp_qat_fw_comn_req_hdr_t.service_cmd_id = val
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_GET(hdr_t) \
+       ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_t.hdr_flags)
+
+#define ICP_QAT_FW_COMN_HDR_VALID_FLAG_SET(hdr_t, val) \
+       ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_GET(hdr_flags) \
+       QAT_FIELD_GET(hdr_flags, \
+       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_RESRVD_FLD_GET(hdr_flags) \
+       (hdr_flags & ICP_QAT_FW_COMN_HDR_RESRVD_FLD_MASK)
+
+#define ICP_QAT_FW_COMN_VALID_FLAG_SET(hdr_t, val) \
+       QAT_FIELD_SET((hdr_t.hdr_flags), (val), \
+       ICP_QAT_FW_COMN_VALID_FLAG_BITPOS, \
+       ICP_QAT_FW_COMN_VALID_FLAG_MASK)
+
+#define ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(valid) \
+       (((valid) & ICP_QAT_FW_COMN_VALID_FLAG_MASK) << \
+        ICP_QAT_FW_COMN_VALID_FLAG_BITPOS)
+
+#define QAT_COMN_PTR_TYPE_BITPOS 0
+#define QAT_COMN_PTR_TYPE_MASK 0x1
+#define QAT_COMN_CD_FLD_TYPE_BITPOS 1
+#define QAT_COMN_CD_FLD_TYPE_MASK 0x1
+#define QAT_COMN_PTR_TYPE_FLAT 0x0
+#define QAT_COMN_PTR_TYPE_SGL 0x1
+#define QAT_COMN_CD_FLD_TYPE_64BIT_ADR 0x0
+#define QAT_COMN_CD_FLD_TYPE_16BYTE_DATA 0x1
+
+#define ICP_QAT_FW_COMN_FLAGS_BUILD(cdt, ptr) \
+       ((((cdt) & QAT_COMN_CD_FLD_TYPE_MASK) << QAT_COMN_CD_FLD_TYPE_BITPOS) \
+        | (((ptr) & QAT_COMN_PTR_TYPE_MASK) << QAT_COMN_PTR_TYPE_BITPOS))
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_COMN_PTR_TYPE_BITPOS, QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+                       QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_PTR_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_COMN_PTR_TYPE_BITPOS, \
+                       QAT_COMN_PTR_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_CD_FLD_TYPE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_COMN_CD_FLD_TYPE_BITPOS, \
+                       QAT_COMN_CD_FLD_TYPE_MASK)
+
+#define ICP_QAT_FW_COMN_NEXT_ID_BITPOS 4
+#define ICP_QAT_FW_COMN_NEXT_ID_MASK 0xF0
+#define ICP_QAT_FW_COMN_CURR_ID_BITPOS 0
+#define ICP_QAT_FW_COMN_CURR_ID_MASK 0x0F
+
+#define ICP_QAT_FW_COMN_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+        & ICP_QAT_FW_COMN_NEXT_ID_MASK)); }
+
+#define ICP_QAT_FW_COMN_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id) & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+       { ((cd_ctrl_hdr_t)->next_curr_id) = ((((cd_ctrl_hdr_t)->next_curr_id) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)); }
+
+#define QAT_COMN_RESP_CRYPTO_STATUS_BITPOS 7
+#define QAT_COMN_RESP_CRYPTO_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_STATUS_BITPOS 5
+#define QAT_COMN_RESP_CMP_STATUS_MASK 0x1
+#define QAT_COMN_RESP_XLAT_STATUS_BITPOS 4
+#define QAT_COMN_RESP_XLAT_STATUS_MASK 0x1
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS 3
+#define QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK 0x1
+
+#define ICP_QAT_FW_COMN_RESP_STATUS_BUILD(crypto, comp, xlat, eolb) \
+       ((((crypto) & QAT_COMN_RESP_CRYPTO_STATUS_MASK) << \
+       QAT_COMN_RESP_CRYPTO_STATUS_BITPOS) | \
+       (((comp) & QAT_COMN_RESP_CMP_STATUS_MASK) << \
+       QAT_COMN_RESP_CMP_STATUS_BITPOS) | \
+       (((xlat) & QAT_COMN_RESP_XLAT_STATUS_MASK) << \
+       QAT_COMN_RESP_XLAT_STATUS_BITPOS) | \
+       (((eolb) & QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK) << \
+       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS))
+
+#define ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CRYPTO_STATUS_BITPOS, \
+       QAT_COMN_RESP_CRYPTO_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_STATUS_BITPOS, \
+       QAT_COMN_RESP_CMP_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_XLAT_STAT_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_XLAT_STATUS_BITPOS, \
+       QAT_COMN_RESP_XLAT_STATUS_MASK)
+
+#define ICP_QAT_FW_COMN_RESP_CMP_END_OF_LAST_BLK_FLAG_GET(status) \
+       QAT_FIELD_GET(status, QAT_COMN_RESP_CMP_END_OF_LAST_BLK_BITPOS, \
+       QAT_COMN_RESP_CMP_END_OF_LAST_BLK_MASK)
+
+#define ICP_QAT_FW_COMN_STATUS_FLAG_OK 0
+#define ICP_QAT_FW_COMN_STATUS_FLAG_ERROR 1
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_CLR 0
+#define ICP_QAT_FW_COMN_STATUS_CMP_END_OF_LAST_BLK_FLAG_SET 1
+#define ERR_CODE_NO_ERROR 0
+#define ERR_CODE_INVALID_BLOCK_TYPE -1
+#define ERR_CODE_NO_MATCH_ONES_COMP -2
+#define ERR_CODE_TOO_MANY_LEN_OR_DIS -3
+#define ERR_CODE_INCOMPLETE_LEN -4
+#define ERR_CODE_RPT_LEN_NO_FIRST_LEN -5
+#define ERR_CODE_RPT_GT_SPEC_LEN -6
+#define ERR_CODE_INV_LIT_LEN_CODE_LEN -7
+#define ERR_CODE_INV_DIS_CODE_LEN -8
+#define ERR_CODE_INV_LIT_LEN_DIS_IN_BLK -9
+#define ERR_CODE_DIS_TOO_FAR_BACK -10
+#define ERR_CODE_OVERFLOW_ERROR -11
+#define ERR_CODE_SOFT_ERROR -12
+#define ERR_CODE_FATAL_ERROR -13
+#define ERR_CODE_SSM_ERROR -14
+#define ERR_CODE_ENDPOINT_ERROR -15
+
+enum icp_qat_fw_slice {
+       ICP_QAT_FW_SLICE_NULL = 0,
+       ICP_QAT_FW_SLICE_CIPHER = 1,
+       ICP_QAT_FW_SLICE_AUTH = 2,
+       ICP_QAT_FW_SLICE_DRAM_RD = 3,
+       ICP_QAT_FW_SLICE_DRAM_WR = 4,
+       ICP_QAT_FW_SLICE_COMP = 5,
+       ICP_QAT_FW_SLICE_XLAT = 6,
+       ICP_QAT_FW_SLICE_DELIMITER
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/qat/qat_common/icp_qat_fw_init_admin.h
new file mode 100644 (file)
index 0000000..72a59fa
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_INIT_ADMIN_H_
+#define _ICP_QAT_FW_INIT_ADMIN_H_
+
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_init_admin_cmd_id {
+       ICP_QAT_FW_INIT_ME = 0,
+       ICP_QAT_FW_TRNG_ENABLE = 1,
+       ICP_QAT_FW_TRNG_DISABLE = 2,
+       ICP_QAT_FW_CONSTANTS_CFG = 3,
+       ICP_QAT_FW_STATUS_GET = 4,
+       ICP_QAT_FW_COUNTERS_GET = 5,
+       ICP_QAT_FW_LOOPBACK = 6,
+       ICP_QAT_FW_HEARTBEAT_SYNC = 7,
+       ICP_QAT_FW_HEARTBEAT_GET = 8
+};
+
+enum icp_qat_fw_init_admin_resp_status {
+       ICP_QAT_FW_INIT_RESP_STATUS_SUCCESS = 0,
+       ICP_QAT_FW_INIT_RESP_STATUS_FAIL
+};
+
+struct icp_qat_fw_init_admin_req {
+       uint16_t init_cfg_sz;
+       uint8_t resrvd1;
+       uint8_t init_admin_cmd_id;
+       uint32_t resrvd2;
+       uint64_t opaque_data;
+       uint64_t init_cfg_ptr;
+       uint64_t resrvd3;
+};
+
+struct icp_qat_fw_init_admin_resp_hdr {
+       uint8_t flags;
+       uint8_t resrvd1;
+       uint8_t status;
+       uint8_t init_admin_cmd_id;
+};
+
+struct icp_qat_fw_init_admin_resp_pars {
+       union {
+               uint32_t resrvd1[ICP_QAT_FW_NUM_LONGWORDS_4];
+               struct {
+                       uint32_t version_patch_num;
+                       uint8_t context_id;
+                       uint8_t ae_id;
+                       uint16_t resrvd1;
+                       uint64_t resrvd2;
+               } s1;
+               struct {
+                       uint64_t req_rec_count;
+                       uint64_t resp_sent_count;
+               } s2;
+       } u;
+};
+
+struct icp_qat_fw_init_admin_resp {
+       struct icp_qat_fw_init_admin_resp_hdr init_resp_hdr;
+       union {
+               uint32_t resrvd2;
+               struct {
+                       uint16_t version_minor_num;
+                       uint16_t version_major_num;
+               } s;
+       } u;
+       uint64_t opaque_data;
+       struct icp_qat_fw_init_admin_resp_pars init_resp_pars;
+};
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_OK 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_BLOCKED 1
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS 0
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK 0x1
+#define ICP_QAT_FW_COMN_STATUS_RESRVD_FLD_MASK 0xFE
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_GET(hdr_t) \
+       ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(hdr_t.flags)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_HDR_FLAG_SET(hdr_t, val) \
+       ICP_QAT_FW_COMN_HEARTBEAT_FLAG_SET(hdr_t, val)
+
+#define ICP_QAT_FW_COMN_HEARTBEAT_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, \
+                ICP_QAT_FW_COMN_HEARTBEAT_FLAG_BITPOS, \
+                ICP_QAT_FW_COMN_HEARTBEAT_FLAG_MASK)
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_la.h b/drivers/crypto/qat/qat_common/icp_qat_fw_la.h
new file mode 100644 (file)
index 0000000..c8d2669
--- /dev/null
@@ -0,0 +1,404 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_FW_LA_H_
+#define _ICP_QAT_FW_LA_H_
+#include "icp_qat_fw.h"
+
+enum icp_qat_fw_la_cmd_id {
+       ICP_QAT_FW_LA_CMD_CIPHER = 0,
+       ICP_QAT_FW_LA_CMD_AUTH = 1,
+       ICP_QAT_FW_LA_CMD_CIPHER_HASH = 2,
+       ICP_QAT_FW_LA_CMD_HASH_CIPHER = 3,
+       ICP_QAT_FW_LA_CMD_TRNG_GET_RANDOM = 4,
+       ICP_QAT_FW_LA_CMD_TRNG_TEST = 5,
+       ICP_QAT_FW_LA_CMD_SSL3_KEY_DERIVE = 6,
+       ICP_QAT_FW_LA_CMD_TLS_V1_1_KEY_DERIVE = 7,
+       ICP_QAT_FW_LA_CMD_TLS_V1_2_KEY_DERIVE = 8,
+       ICP_QAT_FW_LA_CMD_MGF1 = 9,
+       ICP_QAT_FW_LA_CMD_AUTH_PRE_COMP = 10,
+       ICP_QAT_FW_LA_CMD_CIPHER_PRE_COMP = 11,
+       ICP_QAT_FW_LA_CMD_DELIMITER = 12
+};
+
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_ICV_VER_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+#define ICP_QAT_FW_LA_TRNG_STATUS_PASS ICP_QAT_FW_COMN_STATUS_FLAG_OK
+#define ICP_QAT_FW_LA_TRNG_STATUS_FAIL ICP_QAT_FW_COMN_STATUS_FLAG_ERROR
+
+struct icp_qat_fw_la_bulk_req {
+       struct icp_qat_fw_comn_req_hdr comn_hdr;
+       struct icp_qat_fw_comn_req_hdr_cd_pars cd_pars;
+       struct icp_qat_fw_comn_req_mid comn_mid;
+       struct icp_qat_fw_comn_req_rqpars serv_specif_rqpars;
+       struct icp_qat_fw_comn_req_cd_ctrl cd_ctrl;
+};
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_12_OCTETS 1
+#define ICP_QAT_FW_LA_GCM_IV_LEN_NOT_12_OCTETS 0
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS 12
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO 1
+#define QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK 0x1
+#define QAT_LA_GCM_IV_LEN_FLAG_BITPOS 11
+#define QAT_LA_GCM_IV_LEN_FLAG_MASK 0x1
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER 1
+#define ICP_QAT_FW_LA_NO_DIGEST_IN_BUFFER 0
+#define QAT_LA_DIGEST_IN_BUFFER_BITPOS 10
+#define QAT_LA_DIGEST_IN_BUFFER_MASK 0x1
+#define ICP_QAT_FW_LA_SNOW_3G_PROTO 4
+#define ICP_QAT_FW_LA_GCM_PROTO        2
+#define ICP_QAT_FW_LA_CCM_PROTO        1
+#define ICP_QAT_FW_LA_NO_PROTO 0
+#define QAT_LA_PROTO_BITPOS 7
+#define QAT_LA_PROTO_MASK 0x7
+#define ICP_QAT_FW_LA_CMP_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_CMP_AUTH_RES 0
+#define QAT_LA_CMP_AUTH_RES_BITPOS 6
+#define QAT_LA_CMP_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_RET_AUTH_RES 1
+#define ICP_QAT_FW_LA_NO_RET_AUTH_RES 0
+#define QAT_LA_RET_AUTH_RES_BITPOS 5
+#define QAT_LA_RET_AUTH_RES_MASK 0x1
+#define ICP_QAT_FW_LA_UPDATE_STATE 1
+#define ICP_QAT_FW_LA_NO_UPDATE_STATE 0
+#define QAT_LA_UPDATE_STATE_BITPOS 4
+#define QAT_LA_UPDATE_STATE_MASK 0x1
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_CD_SETUP 0
+#define ICP_QAT_FW_CIPH_AUTH_CFG_OFFSET_IN_SHRAM_CP 1
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS 3
+#define QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK 0x1
+#define ICP_QAT_FW_CIPH_IV_64BIT_PTR 0
+#define ICP_QAT_FW_CIPH_IV_16BYTE_DATA 1
+#define QAT_LA_CIPH_IV_FLD_BITPOS 2
+#define QAT_LA_CIPH_IV_FLD_MASK   0x1
+#define ICP_QAT_FW_LA_PARTIAL_NONE 0
+#define ICP_QAT_FW_LA_PARTIAL_START 1
+#define ICP_QAT_FW_LA_PARTIAL_MID 3
+#define ICP_QAT_FW_LA_PARTIAL_END 2
+#define QAT_LA_PARTIAL_BITPOS 0
+#define QAT_LA_PARTIAL_MASK 0x3
+#define ICP_QAT_FW_LA_FLAGS_BUILD(zuc_proto, gcm_iv_len, auth_rslt, proto, \
+       cmp_auth, ret_auth, update_state, \
+       ciph_iv, ciphcfg, partial) \
+       (((zuc_proto & QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK) << \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS) | \
+       ((gcm_iv_len & QAT_LA_GCM_IV_LEN_FLAG_MASK) << \
+       QAT_LA_GCM_IV_LEN_FLAG_BITPOS) | \
+       ((auth_rslt & QAT_LA_DIGEST_IN_BUFFER_MASK) << \
+       QAT_LA_DIGEST_IN_BUFFER_BITPOS) | \
+       ((proto & QAT_LA_PROTO_MASK) << \
+       QAT_LA_PROTO_BITPOS)    | \
+       ((cmp_auth & QAT_LA_CMP_AUTH_RES_MASK) << \
+       QAT_LA_CMP_AUTH_RES_BITPOS) | \
+       ((ret_auth & QAT_LA_RET_AUTH_RES_MASK) << \
+       QAT_LA_RET_AUTH_RES_BITPOS) | \
+       ((update_state & QAT_LA_UPDATE_STATE_MASK) << \
+       QAT_LA_UPDATE_STATE_BITPOS) | \
+       ((ciph_iv & QAT_LA_CIPH_IV_FLD_MASK) << \
+       QAT_LA_CIPH_IV_FLD_BITPOS) | \
+       ((ciphcfg & QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK) << \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS) | \
+       ((partial & QAT_LA_PARTIAL_MASK) << \
+       QAT_LA_PARTIAL_BITPOS))
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CIPH_IV_FLD_BITPOS, \
+       QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+       QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_PROTO_BITPOS, QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_CMP_AUTH_RES_BITPOS, \
+       QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_RET_AUTH_RES_BITPOS, \
+       QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+       QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_UPDATE_STATE_BITPOS, \
+       QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_GET(flags) \
+       QAT_FIELD_GET(flags, QAT_LA_PARTIAL_BITPOS, \
+       QAT_LA_PARTIAL_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_IV_FLD_BITPOS, \
+       QAT_LA_CIPH_IV_FLD_MASK)
+
+#define ICP_QAT_FW_LA_CIPH_AUTH_CFG_OFFSET_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CIPH_AUTH_CFG_OFFSET_BITPOS, \
+       QAT_LA_CIPH_AUTH_CFG_OFFSET_MASK)
+
+#define ICP_QAT_FW_LA_ZUC_3G_PROTO_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_FW_LA_ZUC_3G_PROTO_FLAG_BITPOS, \
+       QAT_FW_LA_ZUC_3G_PROTO_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_GCM_IV_LEN_FLAG_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_GCM_IV_LEN_FLAG_BITPOS, \
+       QAT_LA_GCM_IV_LEN_FLAG_MASK)
+
+#define ICP_QAT_FW_LA_PROTO_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_PROTO_BITPOS, \
+       QAT_LA_PROTO_MASK)
+
+#define ICP_QAT_FW_LA_CMP_AUTH_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_CMP_AUTH_RES_BITPOS, \
+       QAT_LA_CMP_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_RET_AUTH_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_RET_AUTH_RES_BITPOS, \
+       QAT_LA_RET_AUTH_RES_MASK)
+
+#define ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_DIGEST_IN_BUFFER_BITPOS, \
+       QAT_LA_DIGEST_IN_BUFFER_MASK)
+
+#define ICP_QAT_FW_LA_UPDATE_STATE_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_UPDATE_STATE_BITPOS, \
+       QAT_LA_UPDATE_STATE_MASK)
+
+#define ICP_QAT_FW_LA_PARTIAL_SET(flags, val) \
+       QAT_FIELD_SET(flags, val, QAT_LA_PARTIAL_BITPOS, \
+       QAT_LA_PARTIAL_MASK)
+
+struct icp_qat_fw_cipher_req_hdr_cd_pars {
+       union {
+               struct {
+                       uint64_t content_desc_addr;
+                       uint16_t content_desc_resrvd1;
+                       uint8_t content_desc_params_sz;
+                       uint8_t content_desc_hdr_resrvd2;
+                       uint32_t content_desc_resrvd3;
+               } s;
+               struct {
+                       uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               } s1;
+       } u;
+};
+
+struct icp_qat_fw_cipher_auth_req_hdr_cd_pars {
+       union {
+               struct {
+                       uint64_t content_desc_addr;
+                       uint16_t content_desc_resrvd1;
+                       uint8_t content_desc_params_sz;
+                       uint8_t content_desc_hdr_resrvd2;
+                       uint32_t content_desc_resrvd3;
+               } s;
+               struct {
+                       uint32_t cipher_key_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               } sl;
+       } u;
+};
+
+struct icp_qat_fw_cipher_cd_ctrl_hdr {
+       uint8_t cipher_state_sz;
+       uint8_t cipher_key_sz;
+       uint8_t cipher_cfg_offset;
+       uint8_t next_curr_id;
+       uint8_t cipher_padding_sz;
+       uint8_t resrvd1;
+       uint16_t resrvd2;
+       uint32_t resrvd3[ICP_QAT_FW_NUM_LONGWORDS_3];
+};
+
+struct icp_qat_fw_auth_cd_ctrl_hdr {
+       uint32_t resrvd1;
+       uint8_t resrvd2;
+       uint8_t hash_flags;
+       uint8_t hash_cfg_offset;
+       uint8_t next_curr_id;
+       uint8_t resrvd3;
+       uint8_t outer_prefix_sz;
+       uint8_t final_sz;
+       uint8_t inner_res_sz;
+       uint8_t resrvd4;
+       uint8_t inner_state1_sz;
+       uint8_t inner_state2_offset;
+       uint8_t inner_state2_sz;
+       uint8_t outer_config_offset;
+       uint8_t outer_state1_sz;
+       uint8_t outer_res_sz;
+       uint8_t outer_prefix_offset;
+};
+
+struct icp_qat_fw_cipher_auth_cd_ctrl_hdr {
+       uint8_t cipher_state_sz;
+       uint8_t cipher_key_sz;
+       uint8_t cipher_cfg_offset;
+       uint8_t next_curr_id_cipher;
+       uint8_t cipher_padding_sz;
+       uint8_t hash_flags;
+       uint8_t hash_cfg_offset;
+       uint8_t next_curr_id_auth;
+       uint8_t resrvd1;
+       uint8_t outer_prefix_sz;
+       uint8_t final_sz;
+       uint8_t inner_res_sz;
+       uint8_t resrvd2;
+       uint8_t inner_state1_sz;
+       uint8_t inner_state2_offset;
+       uint8_t inner_state2_sz;
+       uint8_t outer_config_offset;
+       uint8_t outer_state1_sz;
+       uint8_t outer_res_sz;
+       uint8_t outer_prefix_offset;
+};
+
+#define ICP_QAT_FW_AUTH_HDR_FLAG_DO_NESTED 1
+#define ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED 0
+#define ICP_QAT_FW_CCM_GCM_AAD_SZ_MAX  240
+#define ICP_QAT_FW_HASH_REQUEST_PARAMETERS_OFFSET \
+       (sizeof(struct icp_qat_fw_la_cipher_req_params_t))
+#define ICP_QAT_FW_CIPHER_REQUEST_PARAMETERS_OFFSET (0)
+
+struct icp_qat_fw_la_cipher_req_params {
+       uint32_t cipher_offset;
+       uint32_t cipher_length;
+       union {
+               uint32_t cipher_IV_array[ICP_QAT_FW_NUM_LONGWORDS_4];
+               struct {
+                       uint64_t cipher_IV_ptr;
+                       uint64_t resrvd1;
+               } s;
+       } u;
+};
+
+struct icp_qat_fw_la_auth_req_params {
+       uint32_t auth_off;
+       uint32_t auth_len;
+       union {
+               uint64_t auth_partial_st_prefix;
+               uint64_t aad_adr;
+       } u1;
+       uint64_t auth_res_addr;
+       union {
+               uint8_t inner_prefix_sz;
+               uint8_t aad_sz;
+       } u2;
+       uint8_t resrvd1;
+       uint8_t hash_state_sz;
+       uint8_t auth_res_sz;
+} __packed;
+
+struct icp_qat_fw_la_auth_req_params_resrvd_flds {
+       uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_6];
+       union {
+               uint8_t inner_prefix_sz;
+               uint8_t aad_sz;
+       } u2;
+       uint8_t resrvd1;
+       uint16_t resrvd2;
+};
+
+struct icp_qat_fw_la_resp {
+       struct icp_qat_fw_comn_resp_hdr comn_resp;
+       uint64_t opaque_data;
+       uint32_t resrvd[ICP_QAT_FW_NUM_LONGWORDS_4];
+};
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) & \
+         ICP_QAT_FW_COMN_NEXT_ID_MASK) >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_CIPHER_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_CIPHER_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_cipher = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_cipher) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_GET(cd_ctrl_hdr_t) \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) & ICP_QAT_FW_COMN_NEXT_ID_MASK) \
+       >> (ICP_QAT_FW_COMN_NEXT_ID_BITPOS))
+
+#define ICP_QAT_FW_AUTH_NEXT_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK) | \
+       ((val << ICP_QAT_FW_COMN_NEXT_ID_BITPOS) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK)) }
+
+#define ICP_QAT_FW_AUTH_CURR_ID_GET(cd_ctrl_hdr_t) \
+       (((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_CURR_ID_MASK)
+
+#define ICP_QAT_FW_AUTH_CURR_ID_SET(cd_ctrl_hdr_t, val) \
+{ (cd_ctrl_hdr_t)->next_curr_id_auth = \
+       ((((cd_ctrl_hdr_t)->next_curr_id_auth) \
+       & ICP_QAT_FW_COMN_NEXT_ID_MASK) | \
+       ((val) & ICP_QAT_FW_COMN_CURR_ID_MASK)) }
+
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h b/drivers/crypto/qat/qat_common/icp_qat_fw_loader_handle.h
new file mode 100644 (file)
index 0000000..5e1aa40
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_FW_LOADER_HANDLE_H__
+#define __ICP_QAT_FW_LOADER_HANDLE_H__
+#include "icp_qat_uclo.h"
+
+struct icp_qat_fw_loader_ae_data {
+       unsigned int state;
+       unsigned int ustore_size;
+       unsigned int free_addr;
+       unsigned int free_size;
+       unsigned int live_ctx_mask;
+};
+
+struct icp_qat_fw_loader_hal_handle {
+       struct icp_qat_fw_loader_ae_data aes[ICP_QAT_UCLO_MAX_AE];
+       unsigned int ae_mask;
+       unsigned int slice_mask;
+       unsigned int revision_id;
+       unsigned int ae_max_num;
+       unsigned int upc_mask;
+       unsigned int max_ustore;
+};
+
+struct icp_qat_fw_loader_handle {
+       struct icp_qat_fw_loader_hal_handle *hal_handle;
+       void *obj_handle;
+       void __iomem *hal_sram_addr_v;
+       void __iomem *hal_cap_g_ctl_csr_addr_v;
+       void __iomem *hal_cap_ae_xfer_csr_addr_v;
+       void __iomem *hal_cap_ae_local_csr_addr_v;
+       void __iomem *hal_ep_csr_addr_v;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hal.h b/drivers/crypto/qat/qat_common/icp_qat_hal.h
new file mode 100644 (file)
index 0000000..85b6d24
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_HAL_H
+#define __ICP_QAT_HAL_H
+#include "icp_qat_fw_loader_handle.h"
+
+enum hal_global_csr {
+       MISC_CONTROL = 0x04,
+       ICP_RESET = 0x0c,
+       ICP_GLOBAL_CLK_ENABLE = 0x50
+};
+
+enum hal_ae_csr {
+       USTORE_ADDRESS = 0x000,
+       USTORE_DATA_LOWER = 0x004,
+       USTORE_DATA_UPPER = 0x008,
+       ALU_OUT = 0x010,
+       CTX_ARB_CNTL = 0x014,
+       CTX_ENABLES = 0x018,
+       CC_ENABLE = 0x01c,
+       CSR_CTX_POINTER = 0x020,
+       CTX_STS_INDIRECT = 0x040,
+       ACTIVE_CTX_STATUS = 0x044,
+       CTX_SIG_EVENTS_INDIRECT = 0x048,
+       CTX_SIG_EVENTS_ACTIVE = 0x04c,
+       CTX_WAKEUP_EVENTS_INDIRECT = 0x050,
+       LM_ADDR_0_INDIRECT = 0x060,
+       LM_ADDR_1_INDIRECT = 0x068,
+       INDIRECT_LM_ADDR_0_BYTE_INDEX = 0x0e0,
+       INDIRECT_LM_ADDR_1_BYTE_INDEX = 0x0e8,
+       FUTURE_COUNT_SIGNAL_INDIRECT = 0x078,
+       TIMESTAMP_LOW = 0x0c0,
+       TIMESTAMP_HIGH = 0x0c4,
+       PROFILE_COUNT = 0x144,
+       SIGNATURE_ENABLE = 0x150,
+       AE_MISC_CONTROL = 0x160,
+       LOCAL_CSR_STATUS = 0x180,
+};
+
+#define UA_ECS                      (0x1 << 31)
+#define ACS_ABO_BITPOS              31
+#define ACS_ACNO                    0x7
+#define CE_ENABLE_BITPOS            0x8
+#define CE_LMADDR_0_GLOBAL_BITPOS   16
+#define CE_LMADDR_1_GLOBAL_BITPOS   17
+#define CE_NN_MODE_BITPOS           20
+#define CE_REG_PAR_ERR_BITPOS       25
+#define CE_BREAKPOINT_BITPOS        27
+#define CE_CNTL_STORE_PARITY_ERROR_BITPOS 29
+#define CE_INUSE_CONTEXTS_BITPOS    31
+#define CE_NN_MODE                  (0x1 << CE_NN_MODE_BITPOS)
+#define CE_INUSE_CONTEXTS           (0x1 << CE_INUSE_CONTEXTS_BITPOS)
+#define XCWE_VOLUNTARY              (0x1)
+#define LCS_STATUS          (0x1)
+#define MMC_SHARE_CS_BITPOS         2
+#define GLOBAL_CSR                0xA00
+
+#define SET_CAP_CSR(handle, csr, val) \
+       ADF_CSR_WR(handle->hal_cap_g_ctl_csr_addr_v, csr, val)
+#define GET_CAP_CSR(handle, csr) \
+       ADF_CSR_RD(handle->hal_cap_g_ctl_csr_addr_v, csr)
+#define SET_GLB_CSR(handle, csr, val) SET_CAP_CSR(handle, csr + GLOBAL_CSR, val)
+#define GET_GLB_CSR(handle, csr) GET_CAP_CSR(handle, GLOBAL_CSR + csr)
+#define AE_CSR(handle, ae) \
+       (handle->hal_cap_ae_local_csr_addr_v + \
+       ((ae & handle->hal_handle->ae_mask) << 12))
+#define AE_CSR_ADDR(handle, ae, csr) (AE_CSR(handle, ae) + (0x3ff & csr))
+#define SET_AE_CSR(handle, ae, csr, val) \
+       ADF_CSR_WR(AE_CSR_ADDR(handle, ae, csr), 0, val)
+#define GET_AE_CSR(handle, ae, csr) ADF_CSR_RD(AE_CSR_ADDR(handle, ae, csr), 0)
+#define AE_XFER(handle, ae) \
+       (handle->hal_cap_ae_xfer_csr_addr_v + \
+       ((ae & handle->hal_handle->ae_mask) << 12))
+#define AE_XFER_ADDR(handle, ae, reg) (AE_XFER(handle, ae) + \
+       ((reg & 0xff) << 2))
+#define SET_AE_XFER(handle, ae, reg, val) \
+       ADF_CSR_WR(AE_XFER_ADDR(handle, ae, reg), 0, val)
+#define SRAM_WRITE(handle, addr, val) \
+       ADF_CSR_WR(handle->hal_sram_addr_v, addr, val)
+#define SRAM_READ(handle, addr) ADF_CSR_RD(handle->hal_sram_addr_v, addr)
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
new file mode 100644 (file)
index 0000000..5031f8c
--- /dev/null
@@ -0,0 +1,305 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ICP_QAT_HW_H_
+#define _ICP_QAT_HW_H_
+
+enum icp_qat_hw_ae_id {
+       ICP_QAT_HW_AE_0 = 0,
+       ICP_QAT_HW_AE_1 = 1,
+       ICP_QAT_HW_AE_2 = 2,
+       ICP_QAT_HW_AE_3 = 3,
+       ICP_QAT_HW_AE_4 = 4,
+       ICP_QAT_HW_AE_5 = 5,
+       ICP_QAT_HW_AE_6 = 6,
+       ICP_QAT_HW_AE_7 = 7,
+       ICP_QAT_HW_AE_8 = 8,
+       ICP_QAT_HW_AE_9 = 9,
+       ICP_QAT_HW_AE_10 = 10,
+       ICP_QAT_HW_AE_11 = 11,
+       ICP_QAT_HW_AE_DELIMITER = 12
+};
+
+enum icp_qat_hw_qat_id {
+       ICP_QAT_HW_QAT_0 = 0,
+       ICP_QAT_HW_QAT_1 = 1,
+       ICP_QAT_HW_QAT_2 = 2,
+       ICP_QAT_HW_QAT_3 = 3,
+       ICP_QAT_HW_QAT_4 = 4,
+       ICP_QAT_HW_QAT_5 = 5,
+       ICP_QAT_HW_QAT_DELIMITER = 6
+};
+
+enum icp_qat_hw_auth_algo {
+       ICP_QAT_HW_AUTH_ALGO_NULL = 0,
+       ICP_QAT_HW_AUTH_ALGO_SHA1 = 1,
+       ICP_QAT_HW_AUTH_ALGO_MD5 = 2,
+       ICP_QAT_HW_AUTH_ALGO_SHA224 = 3,
+       ICP_QAT_HW_AUTH_ALGO_SHA256 = 4,
+       ICP_QAT_HW_AUTH_ALGO_SHA384 = 5,
+       ICP_QAT_HW_AUTH_ALGO_SHA512 = 6,
+       ICP_QAT_HW_AUTH_ALGO_AES_XCBC_MAC = 7,
+       ICP_QAT_HW_AUTH_ALGO_AES_CBC_MAC = 8,
+       ICP_QAT_HW_AUTH_ALGO_AES_F9 = 9,
+       ICP_QAT_HW_AUTH_ALGO_GALOIS_128 = 10,
+       ICP_QAT_HW_AUTH_ALGO_GALOIS_64 = 11,
+       ICP_QAT_HW_AUTH_ALGO_KASUMI_F9 = 12,
+       ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2 = 13,
+       ICP_QAT_HW_AUTH_ALGO_ZUC_3G_128_EIA3 = 14,
+       ICP_QAT_HW_AUTH_RESERVED_1 = 15,
+       ICP_QAT_HW_AUTH_RESERVED_2 = 16,
+       ICP_QAT_HW_AUTH_ALGO_SHA3_256 = 17,
+       ICP_QAT_HW_AUTH_RESERVED_3 = 18,
+       ICP_QAT_HW_AUTH_ALGO_SHA3_512 = 19,
+       ICP_QAT_HW_AUTH_ALGO_DELIMITER = 20
+};
+
+enum icp_qat_hw_auth_mode {
+       ICP_QAT_HW_AUTH_MODE0 = 0,
+       ICP_QAT_HW_AUTH_MODE1 = 1,
+       ICP_QAT_HW_AUTH_MODE2 = 2,
+       ICP_QAT_HW_AUTH_MODE_DELIMITER = 3
+};
+
+struct icp_qat_hw_auth_config {
+       uint32_t config;
+       uint32_t reserved;
+};
+
+#define QAT_AUTH_MODE_BITPOS 4
+#define QAT_AUTH_MODE_MASK 0xF
+#define QAT_AUTH_ALGO_BITPOS 0
+#define QAT_AUTH_ALGO_MASK 0xF
+#define QAT_AUTH_CMP_BITPOS 8
+#define QAT_AUTH_CMP_MASK 0x7F
+#define QAT_AUTH_SHA3_PADDING_BITPOS 16
+#define QAT_AUTH_SHA3_PADDING_MASK 0x1
+#define QAT_AUTH_ALGO_SHA3_BITPOS 22
+#define QAT_AUTH_ALGO_SHA3_MASK 0x3
+#define ICP_QAT_HW_AUTH_CONFIG_BUILD(mode, algo, cmp_len) \
+       (((mode & QAT_AUTH_MODE_MASK) << QAT_AUTH_MODE_BITPOS) | \
+       ((algo & QAT_AUTH_ALGO_MASK) << QAT_AUTH_ALGO_BITPOS) | \
+       (((algo >> 4) & QAT_AUTH_ALGO_SHA3_MASK) << \
+        QAT_AUTH_ALGO_SHA3_BITPOS) | \
+        (((((algo == ICP_QAT_HW_AUTH_ALGO_SHA3_256) || \
+       (algo == ICP_QAT_HW_AUTH_ALGO_SHA3_512)) ? 1 : 0) \
+       & QAT_AUTH_SHA3_PADDING_MASK) << QAT_AUTH_SHA3_PADDING_BITPOS) | \
+       ((cmp_len & QAT_AUTH_CMP_MASK) << QAT_AUTH_CMP_BITPOS))
+
+struct icp_qat_hw_auth_counter {
+       __be32 counter;
+       uint32_t reserved;
+};
+
+#define QAT_AUTH_COUNT_MASK 0xFFFFFFFF
+#define QAT_AUTH_COUNT_BITPOS 0
+#define ICP_QAT_HW_AUTH_COUNT_BUILD(val) \
+       (((val) & QAT_AUTH_COUNT_MASK) << QAT_AUTH_COUNT_BITPOS)
+
+struct icp_qat_hw_auth_setup {
+       struct icp_qat_hw_auth_config auth_config;
+       struct icp_qat_hw_auth_counter auth_counter;
+};
+
+#define QAT_HW_DEFAULT_ALIGNMENT 8
+#define QAT_HW_ROUND_UP(val, n) (((val) + ((n)-1)) & (~(n-1)))
+#define ICP_QAT_HW_NULL_STATE1_SZ 32
+#define ICP_QAT_HW_MD5_STATE1_SZ 16
+#define ICP_QAT_HW_SHA1_STATE1_SZ 20
+#define ICP_QAT_HW_SHA224_STATE1_SZ 32
+#define ICP_QAT_HW_SHA256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE1_SZ 32
+#define ICP_QAT_HW_SHA384_STATE1_SZ 64
+#define ICP_QAT_HW_SHA512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE1_SZ 64
+#define ICP_QAT_HW_SHA3_224_STATE1_SZ 28
+#define ICP_QAT_HW_SHA3_384_STATE1_SZ 48
+#define ICP_QAT_HW_AES_XCBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_STATE1_SZ 16
+#define ICP_QAT_HW_AES_F9_STATE1_SZ 32
+#define ICP_QAT_HW_KASUMI_F9_STATE1_SZ 16
+#define ICP_QAT_HW_GALOIS_128_STATE1_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE1_SZ 8
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE1_SZ 8
+#define ICP_QAT_HW_NULL_STATE2_SZ 32
+#define ICP_QAT_HW_MD5_STATE2_SZ 16
+#define ICP_QAT_HW_SHA1_STATE2_SZ 20
+#define ICP_QAT_HW_SHA224_STATE2_SZ 32
+#define ICP_QAT_HW_SHA256_STATE2_SZ 32
+#define ICP_QAT_HW_SHA3_256_STATE2_SZ 0
+#define ICP_QAT_HW_SHA384_STATE2_SZ 64
+#define ICP_QAT_HW_SHA512_STATE2_SZ 64
+#define ICP_QAT_HW_SHA3_512_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_224_STATE2_SZ 0
+#define ICP_QAT_HW_SHA3_384_STATE2_SZ 0
+#define ICP_QAT_HW_AES_XCBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CBC_MAC_KEY_SZ 16
+#define ICP_QAT_HW_AES_CCM_CBC_E_CTR0_SZ 16
+#define ICP_QAT_HW_F9_IK_SZ 16
+#define ICP_QAT_HW_F9_FK_SZ 16
+#define ICP_QAT_HW_KASUMI_F9_STATE2_SZ (ICP_QAT_HW_F9_IK_SZ + \
+       ICP_QAT_HW_F9_FK_SZ)
+#define ICP_QAT_HW_AES_F9_STATE2_SZ ICP_QAT_HW_KASUMI_F9_STATE2_SZ
+#define ICP_QAT_HW_SNOW_3G_UIA2_STATE2_SZ 24
+#define ICP_QAT_HW_ZUC_3G_EIA3_STATE2_SZ 32
+#define ICP_QAT_HW_GALOIS_H_SZ 16
+#define ICP_QAT_HW_GALOIS_LEN_A_SZ 8
+#define ICP_QAT_HW_GALOIS_E_CTR0_SZ 16
+
+struct icp_qat_hw_auth_sha512 {
+       struct icp_qat_hw_auth_setup inner_setup;
+       uint8_t state1[ICP_QAT_HW_SHA512_STATE1_SZ];
+       struct icp_qat_hw_auth_setup outer_setup;
+       uint8_t state2[ICP_QAT_HW_SHA512_STATE2_SZ];
+};
+
+struct icp_qat_hw_auth_algo_blk {
+       struct icp_qat_hw_auth_sha512 sha;
+};
+
+#define ICP_QAT_HW_GALOIS_LEN_A_BITPOS 0
+#define ICP_QAT_HW_GALOIS_LEN_A_MASK 0xFFFFFFFF
+
+enum icp_qat_hw_cipher_algo {
+       ICP_QAT_HW_CIPHER_ALGO_NULL = 0,
+       ICP_QAT_HW_CIPHER_ALGO_DES = 1,
+       ICP_QAT_HW_CIPHER_ALGO_3DES = 2,
+       ICP_QAT_HW_CIPHER_ALGO_AES128 = 3,
+       ICP_QAT_HW_CIPHER_ALGO_AES192 = 4,
+       ICP_QAT_HW_CIPHER_ALGO_AES256 = 5,
+       ICP_QAT_HW_CIPHER_ALGO_ARC4 = 6,
+       ICP_QAT_HW_CIPHER_ALGO_KASUMI = 7,
+       ICP_QAT_HW_CIPHER_ALGO_SNOW_3G_UEA2 = 8,
+       ICP_QAT_HW_CIPHER_ALGO_ZUC_3G_128_EEA3 = 9,
+       ICP_QAT_HW_CIPHER_DELIMITER = 10
+};
+
+enum icp_qat_hw_cipher_mode {
+       ICP_QAT_HW_CIPHER_ECB_MODE = 0,
+       ICP_QAT_HW_CIPHER_CBC_MODE = 1,
+       ICP_QAT_HW_CIPHER_CTR_MODE = 2,
+       ICP_QAT_HW_CIPHER_F8_MODE = 3,
+       ICP_QAT_HW_CIPHER_XTS_MODE = 6,
+       ICP_QAT_HW_CIPHER_MODE_DELIMITER = 7
+};
+
+struct icp_qat_hw_cipher_config {
+       uint32_t val;
+       uint32_t reserved;
+};
+
+enum icp_qat_hw_cipher_dir {
+       ICP_QAT_HW_CIPHER_ENCRYPT = 0,
+       ICP_QAT_HW_CIPHER_DECRYPT = 1,
+};
+
+enum icp_qat_hw_cipher_convert {
+       ICP_QAT_HW_CIPHER_NO_CONVERT = 0,
+       ICP_QAT_HW_CIPHER_KEY_CONVERT = 1,
+};
+
+#define QAT_CIPHER_MODE_BITPOS 4
+#define QAT_CIPHER_MODE_MASK 0xF
+#define QAT_CIPHER_ALGO_BITPOS 0
+#define QAT_CIPHER_ALGO_MASK 0xF
+#define QAT_CIPHER_CONVERT_BITPOS 9
+#define QAT_CIPHER_CONVERT_MASK 0x1
+#define QAT_CIPHER_DIR_BITPOS 8
+#define QAT_CIPHER_DIR_MASK 0x1
+#define QAT_CIPHER_MODE_F8_KEY_SZ_MULT 2
+#define QAT_CIPHER_MODE_XTS_KEY_SZ_MULT 2
+#define ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, algo, convert, dir) \
+       (((mode & QAT_CIPHER_MODE_MASK) << QAT_CIPHER_MODE_BITPOS) | \
+       ((algo & QAT_CIPHER_ALGO_MASK) << QAT_CIPHER_ALGO_BITPOS) | \
+       ((convert & QAT_CIPHER_CONVERT_MASK) << QAT_CIPHER_CONVERT_BITPOS) | \
+       ((dir & QAT_CIPHER_DIR_MASK) << QAT_CIPHER_DIR_BITPOS))
+#define ICP_QAT_HW_DES_BLK_SZ 8
+#define ICP_QAT_HW_3DES_BLK_SZ 8
+#define ICP_QAT_HW_NULL_BLK_SZ 8
+#define ICP_QAT_HW_AES_BLK_SZ 16
+#define ICP_QAT_HW_KASUMI_BLK_SZ 8
+#define ICP_QAT_HW_SNOW_3G_BLK_SZ 8
+#define ICP_QAT_HW_ZUC_3G_BLK_SZ 8
+#define ICP_QAT_HW_NULL_KEY_SZ 256
+#define ICP_QAT_HW_DES_KEY_SZ 8
+#define ICP_QAT_HW_3DES_KEY_SZ 24
+#define ICP_QAT_HW_AES_128_KEY_SZ 16
+#define ICP_QAT_HW_AES_192_KEY_SZ 24
+#define ICP_QAT_HW_AES_256_KEY_SZ 32
+#define ICP_QAT_HW_AES_128_F8_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_192_F8_KEY_SZ (ICP_QAT_HW_AES_192_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_F8_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_KASUMI_KEY_SZ 16
+#define ICP_QAT_HW_KASUMI_F8_KEY_SZ (ICP_QAT_HW_KASUMI_KEY_SZ * \
+       QAT_CIPHER_MODE_F8_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_128_XTS_KEY_SZ (ICP_QAT_HW_AES_128_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_AES_256_XTS_KEY_SZ (ICP_QAT_HW_AES_256_KEY_SZ * \
+       QAT_CIPHER_MODE_XTS_KEY_SZ_MULT)
+#define ICP_QAT_HW_ARC4_KEY_SZ 256
+#define ICP_QAT_HW_SNOW_3G_UEA2_KEY_SZ 16
+#define ICP_QAT_HW_SNOW_3G_UEA2_IV_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_KEY_SZ 16
+#define ICP_QAT_HW_ZUC_3G_EEA3_IV_SZ 16
+#define ICP_QAT_HW_MODE_F8_NUM_REG_TO_CLEAR 2
+#define INIT_SHRAM_CONSTANTS_TABLE_SZ 1024
+
+struct icp_qat_hw_cipher_aes256_f8 {
+       struct icp_qat_hw_cipher_config cipher_config;
+       uint8_t key[ICP_QAT_HW_AES_256_F8_KEY_SZ];
+};
+
+struct icp_qat_hw_cipher_algo_blk {
+       struct icp_qat_hw_cipher_aes256_f8 aes;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/qat/qat_common/icp_qat_uclo.h
new file mode 100644 (file)
index 0000000..2132a8c
--- /dev/null
@@ -0,0 +1,377 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __ICP_QAT_UCLO_H__
+#define __ICP_QAT_UCLO_H__
+
+#define ICP_QAT_AC_C_CPU_TYPE     0x00400000
+#define ICP_QAT_UCLO_MAX_AE       12
+#define ICP_QAT_UCLO_MAX_CTX      8
+#define ICP_QAT_UCLO_MAX_UIMAGE   (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX)
+#define ICP_QAT_UCLO_MAX_USTORE   0x4000
+#define ICP_QAT_UCLO_MAX_XFER_REG 128
+#define ICP_QAT_UCLO_MAX_GPR_REG  128
+#define ICP_QAT_UCLO_MAX_NN_REG   128
+#define ICP_QAT_UCLO_MAX_LMEM_REG 1024
+#define ICP_QAT_UCLO_AE_ALL_CTX   0xff
+#define ICP_QAT_UOF_OBJID_LEN     8
+#define ICP_QAT_UOF_FID 0xc6c2
+#define ICP_QAT_UOF_MAJVER 0x4
+#define ICP_QAT_UOF_MINVER 0x11
+#define ICP_QAT_UOF_NN_MODE_NOTCARE   0xff
+#define ICP_QAT_UOF_OBJS        "UOF_OBJS"
+#define ICP_QAT_UOF_STRT        "UOF_STRT"
+#define ICP_QAT_UOF_GTID        "UOF_GTID"
+#define ICP_QAT_UOF_IMAG        "UOF_IMAG"
+#define ICP_QAT_UOF_IMEM        "UOF_IMEM"
+#define ICP_QAT_UOF_MSEG        "UOF_MSEG"
+#define ICP_QAT_UOF_LOCAL_SCOPE     1
+#define ICP_QAT_UOF_INIT_EXPR               0
+#define ICP_QAT_UOF_INIT_REG                1
+#define ICP_QAT_UOF_INIT_REG_CTX            2
+#define ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP   3
+
+#define ICP_QAT_CTX_MODE(ae_mode) ((ae_mode) & 0xf)
+#define ICP_QAT_NN_MODE(ae_mode) (((ae_mode) >> 0x4) & 0xf)
+#define ICP_QAT_SHARED_USTORE_MODE(ae_mode) (((ae_mode) >> 0xb) & 0x1)
+#define RELOADABLE_CTX_SHARED_MODE(ae_mode) (((ae_mode) >> 0xc) & 0x1)
+
+#define ICP_QAT_LOC_MEM0_MODE(ae_mode) (((ae_mode) >> 0x8) & 0x1)
+#define ICP_QAT_LOC_MEM1_MODE(ae_mode) (((ae_mode) >> 0x9) & 0x1)
+
+enum icp_qat_uof_mem_region {
+       ICP_QAT_UOF_SRAM_REGION = 0x0,
+       ICP_QAT_UOF_LMEM_REGION = 0x3,
+       ICP_QAT_UOF_UMEM_REGION = 0x5
+};
+
+enum icp_qat_uof_regtype {
+       ICP_NO_DEST,
+       ICP_GPA_REL,
+       ICP_GPA_ABS,
+       ICP_GPB_REL,
+       ICP_GPB_ABS,
+       ICP_SR_REL,
+       ICP_SR_RD_REL,
+       ICP_SR_WR_REL,
+       ICP_SR_ABS,
+       ICP_SR_RD_ABS,
+       ICP_SR_WR_ABS,
+       ICP_DR_REL,
+       ICP_DR_RD_REL,
+       ICP_DR_WR_REL,
+       ICP_DR_ABS,
+       ICP_DR_RD_ABS,
+       ICP_DR_WR_ABS,
+       ICP_LMEM,
+       ICP_LMEM0,
+       ICP_LMEM1,
+       ICP_NEIGH_REL,
+};
+
+struct icp_qat_uclo_page {
+       struct icp_qat_uclo_encap_page *encap_page;
+       struct icp_qat_uclo_region *region;
+       unsigned int flags;
+};
+
+struct icp_qat_uclo_region {
+       struct icp_qat_uclo_page *loaded;
+       struct icp_qat_uclo_page *page;
+};
+
+struct icp_qat_uclo_aeslice {
+       struct icp_qat_uclo_region *region;
+       struct icp_qat_uclo_page *page;
+       struct icp_qat_uclo_page *cur_page[ICP_QAT_UCLO_MAX_CTX];
+       struct icp_qat_uclo_encapme *encap_image;
+       unsigned int ctx_mask_assigned;
+       unsigned int new_uaddr[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uclo_aedata {
+       unsigned int slice_num;
+       unsigned int eff_ustore_size;
+       struct icp_qat_uclo_aeslice ae_slices[ICP_QAT_UCLO_MAX_CTX];
+};
+
+struct icp_qat_uof_encap_obj {
+       char *beg_uof;
+       struct icp_qat_uof_objhdr *obj_hdr;
+       struct icp_qat_uof_chunkhdr *chunk_hdr;
+       struct icp_qat_uof_varmem_seg *var_mem_seg;
+};
+
+struct icp_qat_uclo_encap_uwblock {
+       unsigned int start_addr;
+       unsigned int words_num;
+       uint64_t micro_words;
+};
+
+struct icp_qat_uclo_encap_page {
+       unsigned int def_page;
+       unsigned int page_region;
+       unsigned int beg_addr_v;
+       unsigned int beg_addr_p;
+       unsigned int micro_words_num;
+       unsigned int uwblock_num;
+       struct icp_qat_uclo_encap_uwblock *uwblock;
+};
+
+struct icp_qat_uclo_encapme {
+       struct icp_qat_uof_image *img_ptr;
+       struct icp_qat_uclo_encap_page *page;
+       unsigned int ae_reg_num;
+       struct icp_qat_uof_ae_reg *ae_reg;
+       unsigned int init_regsym_num;
+       struct icp_qat_uof_init_regsym *init_regsym;
+       unsigned int sbreak_num;
+       struct icp_qat_uof_sbreak *sbreak;
+       unsigned int uwords_num;
+};
+
+struct icp_qat_uclo_init_mem_table {
+       unsigned int entry_num;
+       struct icp_qat_uof_initmem *init_mem;
+};
+
+struct icp_qat_uclo_objhdr {
+       char *file_buff;
+       unsigned int checksum;
+       unsigned int size;
+};
+
+struct icp_qat_uof_strtable {
+       unsigned int table_len;
+       unsigned int reserved;
+       uint64_t strings;
+};
+
+struct icp_qat_uclo_objhandle {
+       unsigned int prod_type;
+       unsigned int prod_rev;
+       struct icp_qat_uclo_objhdr *obj_hdr;
+       struct icp_qat_uof_encap_obj encap_uof_obj;
+       struct icp_qat_uof_strtable str_table;
+       struct icp_qat_uclo_encapme ae_uimage[ICP_QAT_UCLO_MAX_UIMAGE];
+       struct icp_qat_uclo_aedata ae_data[ICP_QAT_UCLO_MAX_AE];
+       struct icp_qat_uclo_init_mem_table init_mem_tab;
+       struct icp_qat_uof_batch_init *lm_init_tab[ICP_QAT_UCLO_MAX_AE];
+       struct icp_qat_uof_batch_init *umem_init_tab[ICP_QAT_UCLO_MAX_AE];
+       int uimage_num;
+       int uword_in_bytes;
+       int global_inited;
+       unsigned int ae_num;
+       unsigned int ustore_phy_size;
+       void *obj_buf;
+       uint64_t *uword_buf;
+};
+
+struct icp_qat_uof_uword_block {
+       unsigned int start_addr;
+       unsigned int words_num;
+       unsigned int uword_offset;
+       unsigned int reserved;
+};
+
+struct icp_qat_uof_filehdr {
+       unsigned short file_id;
+       unsigned short reserved1;
+       char min_ver;
+       char maj_ver;
+       unsigned short reserved2;
+       unsigned short max_chunks;
+       unsigned short num_chunks;
+};
+
+struct icp_qat_uof_filechunkhdr {
+       char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+       unsigned int checksum;
+       unsigned int offset;
+       unsigned int size;
+};
+
+struct icp_qat_uof_objhdr {
+       unsigned int cpu_type;
+       unsigned short min_cpu_ver;
+       unsigned short max_cpu_ver;
+       short max_chunks;
+       short num_chunks;
+       unsigned int reserved1;
+       unsigned int reserved2;
+};
+
+struct icp_qat_uof_chunkhdr {
+       char chunk_id[ICP_QAT_UOF_OBJID_LEN];
+       unsigned int offset;
+       unsigned int size;
+};
+
+struct icp_qat_uof_memvar_attr {
+       unsigned int offset_in_byte;
+       unsigned int value;
+};
+
+struct icp_qat_uof_initmem {
+       unsigned int sym_name;
+       char region;
+       char scope;
+       unsigned short reserved1;
+       unsigned int addr;
+       unsigned int num_in_bytes;
+       unsigned int val_attr_num;
+};
+
+struct icp_qat_uof_init_regsym {
+       unsigned int sym_name;
+       char init_type;
+       char value_type;
+       char reg_type;
+       unsigned char ctx;
+       unsigned int reg_addr;
+       unsigned int value;
+};
+
+struct icp_qat_uof_varmem_seg {
+       unsigned int sram_base;
+       unsigned int sram_size;
+       unsigned int sram_alignment;
+       unsigned int sdram_base;
+       unsigned int sdram_size;
+       unsigned int sdram_alignment;
+       unsigned int sdram1_base;
+       unsigned int sdram1_size;
+       unsigned int sdram1_alignment;
+       unsigned int scratch_base;
+       unsigned int scratch_size;
+       unsigned int scratch_alignment;
+};
+
+struct icp_qat_uof_gtid {
+       char tool_id[ICP_QAT_UOF_OBJID_LEN];
+       int tool_ver;
+       unsigned int reserved1;
+       unsigned int reserved2;
+};
+
+struct icp_qat_uof_sbreak {
+       unsigned int page_num;
+       unsigned int virt_uaddr;
+       unsigned char sbreak_type;
+       unsigned char reg_type;
+       unsigned short reserved1;
+       unsigned int addr_offset;
+       unsigned int reg_addr;
+};
+
+struct icp_qat_uof_code_page {
+       unsigned int page_region;
+       unsigned int page_num;
+       unsigned char def_page;
+       unsigned char reserved2;
+       unsigned short reserved1;
+       unsigned int beg_addr_v;
+       unsigned int beg_addr_p;
+       unsigned int neigh_reg_tab_offset;
+       unsigned int uc_var_tab_offset;
+       unsigned int imp_var_tab_offset;
+       unsigned int imp_expr_tab_offset;
+       unsigned int code_area_offset;
+};
+
+struct icp_qat_uof_image {
+       unsigned int img_name;
+       unsigned int ae_assigned;
+       unsigned int ctx_assigned;
+       unsigned int cpu_type;
+       unsigned int entry_address;
+       unsigned int fill_pattern[2];
+       unsigned int reloadable_size;
+       unsigned char sensitivity;
+       unsigned char reserved;
+       unsigned short ae_mode;
+       unsigned short max_ver;
+       unsigned short min_ver;
+       unsigned short image_attrib;
+       unsigned short reserved2;
+       unsigned short page_region_num;
+       unsigned short numpages;
+       unsigned int reg_tab_offset;
+       unsigned int init_reg_sym_tab;
+       unsigned int sbreak_tab;
+       unsigned int app_metadata;
+};
+
+struct icp_qat_uof_objtable {
+       unsigned int entry_num;
+};
+
+struct icp_qat_uof_ae_reg {
+       unsigned int name;
+       unsigned int vis_name;
+       unsigned short type;
+       unsigned short addr;
+       unsigned short access_mode;
+       unsigned char visible;
+       unsigned char reserved1;
+       unsigned short ref_count;
+       unsigned short reserved2;
+       unsigned int xo_id;
+};
+
+struct icp_qat_uof_code_area {
+       unsigned int micro_words_num;
+       unsigned int uword_block_tab;
+};
+
+struct icp_qat_uof_batch_init {
+       unsigned int ae;
+       unsigned int addr;
+       unsigned int *value;
+       unsigned int size;
+       struct icp_qat_uof_batch_init *next;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
new file mode 100644 (file)
index 0000000..59df488
--- /dev/null
@@ -0,0 +1,1038 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/crypto.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/authenc.h>
+#include <crypto/rng.h>
+#include <linux/dma-mapping.h>
+#include "adf_accel_devices.h"
+#include "adf_transport.h"
+#include "adf_common_drv.h"
+#include "qat_crypto.h"
+#include "icp_qat_hw.h"
+#include "icp_qat_fw.h"
+#include "icp_qat_fw_la.h"
+
+#define QAT_AES_HW_CONFIG_ENC(alg) \
+       ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+                       ICP_QAT_HW_CIPHER_NO_CONVERT, \
+                       ICP_QAT_HW_CIPHER_ENCRYPT)
+
+#define QAT_AES_HW_CONFIG_DEC(alg) \
+       ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
+                       ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+                       ICP_QAT_HW_CIPHER_DECRYPT)
+
+static atomic_t active_dev;
+
+struct qat_alg_buf {
+       uint32_t len;
+       uint32_t resrvd;
+       uint64_t addr;
+} __packed;
+
+struct qat_alg_buf_list {
+       uint64_t resrvd;
+       uint32_t num_bufs;
+       uint32_t num_mapped_bufs;
+       struct qat_alg_buf bufers[];
+} __packed __aligned(64);
+
+/* Common content descriptor */
+struct qat_alg_cd {
+       union {
+               struct qat_enc { /* Encrypt content desc */
+                       struct icp_qat_hw_cipher_algo_blk cipher;
+                       struct icp_qat_hw_auth_algo_blk hash;
+               } qat_enc_cd;
+               struct qat_dec { /* Decrytp content desc */
+                       struct icp_qat_hw_auth_algo_blk hash;
+                       struct icp_qat_hw_cipher_algo_blk cipher;
+               } qat_dec_cd;
+       };
+} __aligned(64);
+
+#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
+
+struct qat_auth_state {
+       uint8_t data[MAX_AUTH_STATE_SIZE];
+} __aligned(64);
+
+struct qat_alg_session_ctx {
+       struct qat_alg_cd *enc_cd;
+       dma_addr_t enc_cd_paddr;
+       struct qat_alg_cd *dec_cd;
+       dma_addr_t dec_cd_paddr;
+       struct qat_auth_state *auth_hw_state_enc;
+       dma_addr_t auth_state_enc_paddr;
+       struct qat_auth_state *auth_hw_state_dec;
+       dma_addr_t auth_state_dec_paddr;
+       struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
+       struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
+       struct qat_crypto_instance *inst;
+       struct crypto_tfm *tfm;
+       struct crypto_shash *hash_tfm;
+       enum icp_qat_hw_auth_algo qat_hash_alg;
+       uint8_t salt[AES_BLOCK_SIZE];
+       spinlock_t lock;        /* protects qat_alg_session_ctx struct */
+};
+
+static int get_current_node(void)
+{
+       return cpu_data(current_thread_info()->cpu).phys_proc_id;
+}
+
+static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
+{
+       switch (qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               return ICP_QAT_HW_SHA1_STATE1_SZ;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               return ICP_QAT_HW_SHA256_STATE1_SZ;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               return ICP_QAT_HW_SHA512_STATE1_SZ;
+       default:
+               return -EFAULT;
+       };
+       return -EFAULT;
+}
+
+static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
+                                 struct qat_alg_session_ctx *ctx,
+                                 const uint8_t *auth_key,
+                                 unsigned int auth_keylen, uint8_t *auth_state)
+{
+       struct {
+               struct shash_desc shash;
+               char ctx[crypto_shash_descsize(ctx->hash_tfm)];
+       } desc;
+       struct sha1_state sha1;
+       struct sha256_state sha256;
+       struct sha512_state sha512;
+       int block_size = crypto_shash_blocksize(ctx->hash_tfm);
+       int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
+       uint8_t *ipad = auth_state;
+       uint8_t *opad = ipad + block_size;
+       __be32 *hash_state_out;
+       __be64 *hash512_state_out;
+       int i, offset;
+
+       desc.shash.tfm = ctx->hash_tfm;
+       desc.shash.flags = 0x0;
+
+       if (auth_keylen > block_size) {
+               char buff[SHA512_BLOCK_SIZE];
+               int ret = crypto_shash_digest(&desc.shash, auth_key,
+                                             auth_keylen, buff);
+               if (ret)
+                       return ret;
+
+               memcpy(ipad, buff, digest_size);
+               memcpy(opad, buff, digest_size);
+               memset(ipad + digest_size, 0, block_size - digest_size);
+               memset(opad + digest_size, 0, block_size - digest_size);
+       } else {
+               memcpy(ipad, auth_key, auth_keylen);
+               memcpy(opad, auth_key, auth_keylen);
+               memset(ipad + auth_keylen, 0, block_size - auth_keylen);
+               memset(opad + auth_keylen, 0, block_size - auth_keylen);
+       }
+
+       for (i = 0; i < block_size; i++) {
+               char *ipad_ptr = ipad + i;
+               char *opad_ptr = opad + i;
+               *ipad_ptr ^= 0x36;
+               *opad_ptr ^= 0x5C;
+       }
+
+       if (crypto_shash_init(&desc.shash))
+               return -EFAULT;
+
+       if (crypto_shash_update(&desc.shash, ipad, block_size))
+               return -EFAULT;
+
+       hash_state_out = (__be32 *)hash->sha.state1;
+       hash512_state_out = (__be64 *)hash_state_out;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               if (crypto_shash_export(&desc.shash, &sha1))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(*(sha1.state + i));
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               if (crypto_shash_export(&desc.shash, &sha256))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(*(sha256.state + i));
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               if (crypto_shash_export(&desc.shash, &sha512))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+                       *hash512_state_out = cpu_to_be64(*(sha512.state + i));
+               break;
+       default:
+               return -EFAULT;
+       }
+
+       if (crypto_shash_init(&desc.shash))
+               return -EFAULT;
+
+       if (crypto_shash_update(&desc.shash, opad, block_size))
+               return -EFAULT;
+
+       offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
+       hash_state_out = (__be32 *)(hash->sha.state1 + offset);
+       hash512_state_out = (__be64 *)hash_state_out;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               if (crypto_shash_export(&desc.shash, &sha1))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(*(sha1.state + i));
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               if (crypto_shash_export(&desc.shash, &sha256))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
+                       *hash_state_out = cpu_to_be32(*(sha256.state + i));
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               if (crypto_shash_export(&desc.shash, &sha512))
+                       return -EFAULT;
+               for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
+                       *hash512_state_out = cpu_to_be64(*(sha512.state + i));
+               break;
+       default:
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
+{
+       header->hdr_flags =
+               ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
+       header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
+       header->comn_req_flags =
+               ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
+                                           QAT_COMN_PTR_TYPE_SGL);
+       ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+                                          ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
+       ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
+                                 ICP_QAT_FW_LA_PARTIAL_NONE);
+       ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
+                                          ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
+       ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
+                               ICP_QAT_FW_LA_NO_PROTO);
+       ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
+                                      ICP_QAT_FW_LA_NO_UPDATE_STATE);
+}
+
+static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
+                                   int alg, struct crypto_authenc_keys *keys)
+{
+       struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
+       unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+       struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
+       struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
+       struct icp_qat_hw_auth_algo_blk *hash =
+               (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
+               sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
+       struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+       void *ptr = &req_tmpl->cd_ctrl;
+       struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+       struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+       struct icp_qat_fw_la_auth_req_params *auth_param =
+               (struct icp_qat_fw_la_auth_req_params *)
+               ((char *)&req_tmpl->serv_specif_rqpars +
+                sizeof(struct icp_qat_fw_la_cipher_req_params));
+
+       /* CD setup */
+       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
+       memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+       hash->sha.inner_setup.auth_config.config =
+               ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+                                            ctx->qat_hash_alg, digestsize);
+       hash->sha.inner_setup.auth_counter.counter =
+               cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+       if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
+                                  (uint8_t *)ctx->auth_hw_state_enc))
+               return -EFAULT;
+
+       /* Request setup */
+       qat_alg_init_common_hdr(header);
+       header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+       ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_RET_AUTH_RES);
+       ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
+       cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+       cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+       /* Cipher CD config setup */
+       cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+       cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+       cipher_cd_ctrl->cipher_cfg_offset = 0;
+       ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+       /* Auth CD config setup */
+       hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
+       hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+       hash_cd_ctrl->inner_res_sz = digestsize;
+       hash_cd_ctrl->final_sz = digestsize;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               hash_cd_ctrl->inner_state1_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+               hash_cd_ctrl->inner_state2_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+               break;
+       default:
+               break;
+       }
+       hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+                       ((sizeof(struct icp_qat_hw_auth_setup) +
+                        round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+       auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
+                       sizeof(struct icp_qat_hw_auth_counter) +
+                       round_up(hash_cd_ctrl->inner_state1_sz, 8);
+       ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+       return 0;
+}
+
+static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
+                                   int alg, struct crypto_authenc_keys *keys)
+{
+       struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
+       unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
+       struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
+       struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
+       struct icp_qat_hw_cipher_algo_blk *cipher =
+               (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
+               sizeof(struct icp_qat_hw_auth_setup) +
+               roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
+       struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
+       struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
+       struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
+       void *ptr = &req_tmpl->cd_ctrl;
+       struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
+       struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
+       struct icp_qat_fw_la_auth_req_params *auth_param =
+               (struct icp_qat_fw_la_auth_req_params *)
+               ((char *)&req_tmpl->serv_specif_rqpars +
+               sizeof(struct icp_qat_fw_la_cipher_req_params));
+
+       /* CD setup */
+       cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
+       memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
+       hash->sha.inner_setup.auth_config.config =
+               ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
+                                            ctx->qat_hash_alg,
+                                            digestsize);
+       hash->sha.inner_setup.auth_counter.counter =
+               cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
+
+       if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen,
+                                  (uint8_t *)ctx->auth_hw_state_dec))
+               return -EFAULT;
+
+       /* Request setup */
+       qat_alg_init_common_hdr(header);
+       header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+       ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_NO_RET_AUTH_RES);
+       ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
+                                  ICP_QAT_FW_LA_CMP_AUTH_RES);
+       cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+       cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
+
+       /* Cipher CD config setup */
+       cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
+       cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+       cipher_cd_ctrl->cipher_cfg_offset =
+               (sizeof(struct icp_qat_hw_auth_setup) +
+                roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
+       ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+
+       /* Auth CD config setup */
+       hash_cd_ctrl->hash_cfg_offset = 0;
+       hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
+       hash_cd_ctrl->inner_res_sz = digestsize;
+       hash_cd_ctrl->final_sz = digestsize;
+
+       switch (ctx->qat_hash_alg) {
+       case ICP_QAT_HW_AUTH_ALGO_SHA1:
+               hash_cd_ctrl->inner_state1_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
+               hash_cd_ctrl->inner_state2_sz =
+                       round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA256:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
+               break;
+       case ICP_QAT_HW_AUTH_ALGO_SHA512:
+               hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
+               hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
+               break;
+       default:
+               break;
+       }
+
+       hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
+                       ((sizeof(struct icp_qat_hw_auth_setup) +
+                        round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
+       auth_param->u1.auth_partial_st_prefix = ctx->auth_state_enc_paddr +
+                       sizeof(struct icp_qat_hw_auth_counter) +
+                       round_up(hash_cd_ctrl->inner_state1_sz, 8);
+       auth_param->auth_res_sz = digestsize;
+       ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
+       ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+       return 0;
+}
+
+static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
+                                const uint8_t *key, unsigned int keylen)
+{
+       struct crypto_authenc_keys keys;
+       int alg;
+
+       if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
+               return -EFAULT;
+
+       if (crypto_authenc_extractkeys(&keys, key, keylen))
+               goto bad_key;
+
+       switch (keys.enckeylen) {
+       case AES_KEYSIZE_128:
+               alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+               break;
+       case AES_KEYSIZE_192:
+               alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+               break;
+       case AES_KEYSIZE_256:
+               alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+               break;
+       default:
+               goto bad_key;
+               break;
+       }
+
+       if (qat_alg_init_enc_session(ctx, alg, &keys))
+               goto error;
+
+       if (qat_alg_init_dec_session(ctx, alg, &keys))
+               goto error;
+
+       return 0;
+bad_key:
+       crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+       return -EINVAL;
+error:
+       return -EFAULT;
+}
+
+static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
+                         unsigned int keylen)
+{
+       struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
+       struct device *dev;
+
+       spin_lock(&ctx->lock);
+       if (ctx->enc_cd) {
+               /* rekeying */
+               dev = &GET_DEV(ctx->inst->accel_dev);
+               memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
+               memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
+               memset(ctx->auth_hw_state_enc, 0,
+                      sizeof(struct qat_auth_state));
+               memset(ctx->auth_hw_state_dec, 0,
+                      sizeof(struct qat_auth_state));
+               memset(&ctx->enc_fw_req_tmpl, 0,
+                      sizeof(struct icp_qat_fw_la_bulk_req));
+               memset(&ctx->dec_fw_req_tmpl, 0,
+                      sizeof(struct icp_qat_fw_la_bulk_req));
+       } else {
+               /* new key */
+               int node = get_current_node();
+               struct qat_crypto_instance *inst =
+                               qat_crypto_get_instance_node(node);
+               if (!inst) {
+                       spin_unlock(&ctx->lock);
+                       return -EINVAL;
+               }
+
+               dev = &GET_DEV(inst->accel_dev);
+               ctx->inst = inst;
+               ctx->enc_cd = dma_zalloc_coherent(dev,
+                                                 sizeof(struct qat_alg_cd),
+                                                 &ctx->enc_cd_paddr,
+                                                 GFP_ATOMIC);
+               if (!ctx->enc_cd) {
+                       spin_unlock(&ctx->lock);
+                       return -ENOMEM;
+               }
+               ctx->dec_cd = dma_zalloc_coherent(dev,
+                                                 sizeof(struct qat_alg_cd),
+                                                 &ctx->dec_cd_paddr,
+                                                 GFP_ATOMIC);
+               if (!ctx->dec_cd) {
+                       spin_unlock(&ctx->lock);
+                       goto out_free_enc;
+               }
+               ctx->auth_hw_state_enc =
+                       dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
+                                           &ctx->auth_state_enc_paddr,
+                                           GFP_ATOMIC);
+               if (!ctx->auth_hw_state_enc) {
+                       spin_unlock(&ctx->lock);
+                       goto out_free_dec;
+               }
+               ctx->auth_hw_state_dec =
+                       dma_zalloc_coherent(dev, sizeof(struct qat_auth_state),
+                                           &ctx->auth_state_dec_paddr,
+                                           GFP_ATOMIC);
+               if (!ctx->auth_hw_state_dec) {
+                       spin_unlock(&ctx->lock);
+                       goto out_free_auth_enc;
+               }
+       }
+       spin_unlock(&ctx->lock);
+       if (qat_alg_init_sessions(ctx, key, keylen))
+               goto out_free_all;
+
+       return 0;
+
+out_free_all:
+       dma_free_coherent(dev, sizeof(struct qat_auth_state),
+                         ctx->auth_hw_state_dec, ctx->auth_state_dec_paddr);
+       ctx->auth_hw_state_dec = NULL;
+out_free_auth_enc:
+       dma_free_coherent(dev, sizeof(struct qat_auth_state),
+                         ctx->auth_hw_state_enc, ctx->auth_state_enc_paddr);
+       ctx->auth_hw_state_enc = NULL;
+out_free_dec:
+       dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                         ctx->dec_cd, ctx->dec_cd_paddr);
+       ctx->dec_cd = NULL;
+out_free_enc:
+       dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                         ctx->enc_cd, ctx->enc_cd_paddr);
+       ctx->enc_cd = NULL;
+       return -ENOMEM;
+}
+
+static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
+                             struct qat_crypto_request *qat_req)
+{
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       struct qat_alg_buf_list *bl = qat_req->buf.bl;
+       struct qat_alg_buf_list *blout = qat_req->buf.blout;
+       dma_addr_t blp = qat_req->buf.blp;
+       dma_addr_t blpout = qat_req->buf.bloutp;
+       size_t sz = qat_req->buf.sz;
+       int i, bufs = bl->num_bufs;
+
+       for (i = 0; i < bl->num_bufs; i++)
+               dma_unmap_single(dev, bl->bufers[i].addr,
+                                bl->bufers[i].len, DMA_BIDIRECTIONAL);
+
+       dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+       kfree(bl);
+       if (blp != blpout) {
+               /* If out of place operation dma unmap only data */
+               int bufless = bufs - blout->num_mapped_bufs;
+
+               for (i = bufless; i < bufs; i++) {
+                       dma_unmap_single(dev, blout->bufers[i].addr,
+                                        blout->bufers[i].len,
+                                        DMA_BIDIRECTIONAL);
+               }
+               dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
+               kfree(blout);
+       }
+}
+
+static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
+                              struct scatterlist *assoc,
+                              struct scatterlist *sgl,
+                              struct scatterlist *sglout, uint8_t *iv,
+                              uint8_t ivlen,
+                              struct qat_crypto_request *qat_req)
+{
+       struct device *dev = &GET_DEV(inst->accel_dev);
+       int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
+       struct qat_alg_buf_list *bufl;
+       struct qat_alg_buf_list *buflout = NULL;
+       dma_addr_t blp;
+       dma_addr_t bloutp = 0;
+       struct scatterlist *sg;
+       size_t sz = sizeof(struct qat_alg_buf_list) +
+                       ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+
+       if (unlikely(!n))
+               return -EINVAL;
+
+       bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node);
+       if (unlikely(!bufl))
+               return -ENOMEM;
+
+       blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev, blp)))
+               goto err;
+
+       for_each_sg(assoc, sg, assoc_n, i) {
+               bufl->bufers[bufs].addr = dma_map_single(dev,
+                                                        sg_virt(sg),
+                                                        sg->length,
+                                                        DMA_BIDIRECTIONAL);
+               bufl->bufers[bufs].len = sg->length;
+               if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
+                       goto err;
+               bufs++;
+       }
+       bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
+                                                DMA_BIDIRECTIONAL);
+       bufl->bufers[bufs].len = ivlen;
+       if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
+               goto err;
+       bufs++;
+
+       for_each_sg(sgl, sg, n, i) {
+               int y = i + bufs;
+
+               bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+                                                     sg->length,
+                                                     DMA_BIDIRECTIONAL);
+               bufl->bufers[y].len = sg->length;
+               if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
+                       goto err;
+       }
+       bufl->num_bufs = n + bufs;
+       qat_req->buf.bl = bufl;
+       qat_req->buf.blp = blp;
+       qat_req->buf.sz = sz;
+       /* Handle out of place operation */
+       if (sgl != sglout) {
+               struct qat_alg_buf *bufers;
+
+               buflout = kmalloc_node(sz, GFP_ATOMIC,
+                                      inst->accel_dev->numa_node);
+               if (unlikely(!buflout))
+                       goto err;
+               bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
+               if (unlikely(dma_mapping_error(dev, bloutp)))
+                       goto err;
+               bufers = buflout->bufers;
+               /* For out of place operation dma map only data and
+                * reuse assoc mapping and iv */
+               for (i = 0; i < bufs; i++) {
+                       bufers[i].len = bufl->bufers[i].len;
+                       bufers[i].addr = bufl->bufers[i].addr;
+               }
+               for_each_sg(sglout, sg, n, i) {
+                       int y = i + bufs;
+
+                       bufers[y].addr = dma_map_single(dev, sg_virt(sg),
+                                                       sg->length,
+                                                       DMA_BIDIRECTIONAL);
+                       buflout->bufers[y].len = sg->length;
+                       if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
+                               goto err;
+               }
+               buflout->num_bufs = n + bufs;
+               buflout->num_mapped_bufs = n;
+               qat_req->buf.blout = buflout;
+               qat_req->buf.bloutp = bloutp;
+       } else {
+               /* Otherwise set the src and dst to the same address */
+               qat_req->buf.bloutp = qat_req->buf.blp;
+       }
+       return 0;
+err:
+       dev_err(dev, "Failed to map buf for dma\n");
+       for_each_sg(sgl, sg, n + bufs, i) {
+               if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
+                       dma_unmap_single(dev, bufl->bufers[i].addr,
+                                        bufl->bufers[i].len,
+                                        DMA_BIDIRECTIONAL);
+               }
+       }
+       if (!dma_mapping_error(dev, blp))
+               dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
+       kfree(bufl);
+       if (sgl != sglout && buflout) {
+               for_each_sg(sglout, sg, n, i) {
+                       int y = i + bufs;
+
+                       if (!dma_mapping_error(dev, buflout->bufers[y].addr))
+                               dma_unmap_single(dev, buflout->bufers[y].addr,
+                                                buflout->bufers[y].len,
+                                                DMA_BIDIRECTIONAL);
+               }
+               if (!dma_mapping_error(dev, bloutp))
+                       dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
+               kfree(buflout);
+       }
+       return -ENOMEM;
+}
+
+void qat_alg_callback(void *resp)
+{
+       struct icp_qat_fw_la_resp *qat_resp = resp;
+       struct qat_crypto_request *qat_req =
+                               (void *)(__force long)qat_resp->opaque_data;
+       struct qat_alg_session_ctx *ctx = qat_req->ctx;
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct aead_request *areq = qat_req->areq;
+       uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+       int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+       qat_alg_free_bufl(inst, qat_req);
+       if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+               res = -EBADMSG;
+       areq->base.complete(&areq->base, res);
+}
+
+static int qat_alg_dec(struct aead_request *areq)
+{
+       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+       struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       struct icp_qat_fw_la_auth_req_params *auth_param;
+       struct icp_qat_fw_la_bulk_req *msg;
+       int digst_size = crypto_aead_crt(aead_tfm)->authsize;
+       int ret, ctr = 0;
+
+       ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
+                                 areq->iv, AES_BLOCK_SIZE, qat_req);
+       if (unlikely(ret))
+               return ret;
+
+       msg = &qat_req->req;
+       *msg = ctx->dec_fw_req_tmpl;
+       qat_req->ctx = ctx;
+       qat_req->areq = areq;
+       qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+       cipher_param->cipher_length = areq->cryptlen - digst_size;
+       cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
+       memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
+       auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+       auth_param->auth_off = 0;
+       auth_param->auth_len = areq->assoclen +
+                               cipher_param->cipher_length + AES_BLOCK_SIZE;
+       do {
+               ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+       } while (ret == -EAGAIN && ctr++ < 10);
+
+       if (ret == -EAGAIN) {
+               qat_alg_free_bufl(ctx->inst, qat_req);
+               return -EBUSY;
+       }
+       return -EINPROGRESS;
+}
+
+static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
+                               int enc_iv)
+{
+       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
+       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+       struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_crypto_request *qat_req = aead_request_ctx(areq);
+       struct icp_qat_fw_la_cipher_req_params *cipher_param;
+       struct icp_qat_fw_la_auth_req_params *auth_param;
+       struct icp_qat_fw_la_bulk_req *msg;
+       int ret, ctr = 0;
+
+       ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst,
+                                 iv, AES_BLOCK_SIZE, qat_req);
+       if (unlikely(ret))
+               return ret;
+
+       msg = &qat_req->req;
+       *msg = ctx->enc_fw_req_tmpl;
+       qat_req->ctx = ctx;
+       qat_req->areq = areq;
+       qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+       qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+       qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+       cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+       auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
+
+       if (enc_iv) {
+               cipher_param->cipher_length = areq->cryptlen + AES_BLOCK_SIZE;
+               cipher_param->cipher_offset = areq->assoclen;
+       } else {
+               memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
+               cipher_param->cipher_length = areq->cryptlen;
+               cipher_param->cipher_offset = areq->assoclen + AES_BLOCK_SIZE;
+       }
+       auth_param->auth_off = 0;
+       auth_param->auth_len = areq->assoclen + areq->cryptlen + AES_BLOCK_SIZE;
+
+       do {
+               ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+       } while (ret == -EAGAIN && ctr++ < 10);
+
+       if (ret == -EAGAIN) {
+               qat_alg_free_bufl(ctx->inst, qat_req);
+               return -EBUSY;
+       }
+       return -EINPROGRESS;
+}
+
+static int qat_alg_enc(struct aead_request *areq)
+{
+       return qat_alg_enc_internal(areq, areq->iv, 0);
+}
+
+static int qat_alg_genivenc(struct aead_givcrypt_request *req)
+{
+       struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
+       struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
+       struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+       __be64 seq;
+
+       memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
+       seq = cpu_to_be64(req->seq);
+       memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
+              &seq, sizeof(uint64_t));
+       return qat_alg_enc_internal(&req->areq, req->giv, 1);
+}
+
+static int qat_alg_init(struct crypto_tfm *tfm,
+                       enum icp_qat_hw_auth_algo hash, const char *hash_name)
+{
+       struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       memset(ctx, '\0', sizeof(*ctx));
+       ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
+       if (IS_ERR(ctx->hash_tfm))
+               return -EFAULT;
+       spin_lock_init(&ctx->lock);
+       ctx->qat_hash_alg = hash;
+       tfm->crt_aead.reqsize = sizeof(struct aead_request) +
+                               sizeof(struct qat_crypto_request);
+       ctx->tfm = tfm;
+       return 0;
+}
+
+static int qat_alg_sha1_init(struct crypto_tfm *tfm)
+{
+       return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+}
+
+static int qat_alg_sha256_init(struct crypto_tfm *tfm)
+{
+       return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+}
+
+static int qat_alg_sha512_init(struct crypto_tfm *tfm)
+{
+       return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+}
+
+static void qat_alg_exit(struct crypto_tfm *tfm)
+{
+       struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qat_crypto_instance *inst = ctx->inst;
+       struct device *dev;
+
+       if (!IS_ERR(ctx->hash_tfm))
+               crypto_free_shash(ctx->hash_tfm);
+
+       if (!inst)
+               return;
+
+       dev = &GET_DEV(inst->accel_dev);
+       if (ctx->enc_cd)
+               dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                                 ctx->enc_cd, ctx->enc_cd_paddr);
+       if (ctx->dec_cd)
+               dma_free_coherent(dev, sizeof(struct qat_alg_cd),
+                                 ctx->dec_cd, ctx->dec_cd_paddr);
+       if (ctx->auth_hw_state_enc)
+               dma_free_coherent(dev, sizeof(struct qat_auth_state),
+                                 ctx->auth_hw_state_enc,
+                                 ctx->auth_state_enc_paddr);
+
+       if (ctx->auth_hw_state_dec)
+               dma_free_coherent(dev, sizeof(struct qat_auth_state),
+                                 ctx->auth_hw_state_dec,
+                                 ctx->auth_state_dec_paddr);
+
+       qat_crypto_put_instance(inst);
+}
+
+static struct crypto_alg qat_algs[] = { {
+       .cra_name = "authenc(hmac(sha1),cbc(aes))",
+       .cra_driver_name = "qat_aes_cbc_hmac_sha1",
+       .cra_priority = 4001,
+       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+       .cra_blocksize = AES_BLOCK_SIZE,
+       .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+       .cra_alignmask = 0,
+       .cra_type = &crypto_aead_type,
+       .cra_module = THIS_MODULE,
+       .cra_init = qat_alg_sha1_init,
+       .cra_exit = qat_alg_exit,
+       .cra_u = {
+               .aead = {
+                       .setkey = qat_alg_setkey,
+                       .decrypt = qat_alg_dec,
+                       .encrypt = qat_alg_enc,
+                       .givencrypt = qat_alg_genivenc,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA1_DIGEST_SIZE,
+               },
+       },
+}, {
+       .cra_name = "authenc(hmac(sha256),cbc(aes))",
+       .cra_driver_name = "qat_aes_cbc_hmac_sha256",
+       .cra_priority = 4001,
+       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+       .cra_blocksize = AES_BLOCK_SIZE,
+       .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+       .cra_alignmask = 0,
+       .cra_type = &crypto_aead_type,
+       .cra_module = THIS_MODULE,
+       .cra_init = qat_alg_sha256_init,
+       .cra_exit = qat_alg_exit,
+       .cra_u = {
+               .aead = {
+                       .setkey = qat_alg_setkey,
+                       .decrypt = qat_alg_dec,
+                       .encrypt = qat_alg_enc,
+                       .givencrypt = qat_alg_genivenc,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA256_DIGEST_SIZE,
+               },
+       },
+}, {
+       .cra_name = "authenc(hmac(sha512),cbc(aes))",
+       .cra_driver_name = "qat_aes_cbc_hmac_sha512",
+       .cra_priority = 4001,
+       .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
+       .cra_blocksize = AES_BLOCK_SIZE,
+       .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+       .cra_alignmask = 0,
+       .cra_type = &crypto_aead_type,
+       .cra_module = THIS_MODULE,
+       .cra_init = qat_alg_sha512_init,
+       .cra_exit = qat_alg_exit,
+       .cra_u = {
+               .aead = {
+                       .setkey = qat_alg_setkey,
+                       .decrypt = qat_alg_dec,
+                       .encrypt = qat_alg_enc,
+                       .givencrypt = qat_alg_genivenc,
+                       .ivsize = AES_BLOCK_SIZE,
+                       .maxauthsize = SHA512_DIGEST_SIZE,
+               },
+       },
+} };
+
+int qat_algs_register(void)
+{
+       if (atomic_add_return(1, &active_dev) == 1) {
+               int i;
+
+               for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
+                       qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
+                                               CRYPTO_ALG_ASYNC;
+               return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
+       }
+       return 0;
+}
+
+int qat_algs_unregister(void)
+{
+       if (atomic_sub_return(1, &active_dev) == 0)
+               return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
+       return 0;
+}
+
+int qat_algs_init(void)
+{
+       atomic_set(&active_dev, 0);
+       crypto_get_default_rng();
+       return 0;
+}
+
+void qat_algs_exit(void)
+{
+       crypto_put_default_rng();
+}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
new file mode 100644 (file)
index 0000000..0d59bcb
--- /dev/null
@@ -0,0 +1,284 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "adf_transport.h"
+#include "adf_cfg.h"
+#include "adf_cfg_strings.h"
+#include "qat_crypto.h"
+#include "icp_qat_fw.h"
+
+#define SEC ADF_KERNEL_SEC
+
+static struct service_hndl qat_crypto;
+
+void qat_crypto_put_instance(struct qat_crypto_instance *inst)
+{
+       if (atomic_sub_return(1, &inst->refctr) == 0)
+               adf_dev_put(inst->accel_dev);
+}
+
+static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
+{
+       struct qat_crypto_instance *inst;
+       struct list_head *list_ptr, *tmp;
+       int i;
+
+       list_for_each_safe(list_ptr, tmp, &accel_dev->crypto_list) {
+               inst = list_entry(list_ptr, struct qat_crypto_instance, list);
+
+               for (i = 0; i < atomic_read(&inst->refctr); i++)
+                       qat_crypto_put_instance(inst);
+
+               if (inst->sym_tx)
+                       adf_remove_ring(inst->sym_tx);
+
+               if (inst->sym_rx)
+                       adf_remove_ring(inst->sym_rx);
+
+               if (inst->pke_tx)
+                       adf_remove_ring(inst->pke_tx);
+
+               if (inst->pke_rx)
+                       adf_remove_ring(inst->pke_rx);
+
+               if (inst->rnd_tx)
+                       adf_remove_ring(inst->rnd_tx);
+
+               if (inst->rnd_rx)
+                       adf_remove_ring(inst->rnd_rx);
+
+               list_del(list_ptr);
+               kfree(inst);
+       }
+       return 0;
+}
+
+struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
+{
+       struct adf_accel_dev *accel_dev = NULL;
+       struct qat_crypto_instance *inst_best = NULL;
+       struct list_head *itr;
+       unsigned long best = ~0;
+
+       list_for_each(itr, adf_devmgr_get_head()) {
+               accel_dev = list_entry(itr, struct adf_accel_dev, list);
+               if (accel_dev->numa_node == node && adf_dev_started(accel_dev))
+                       break;
+               accel_dev = NULL;
+       }
+       if (!accel_dev) {
+               pr_err("QAT: Could not find device on give node\n");
+               accel_dev = adf_devmgr_get_first();
+       }
+       if (!accel_dev || !adf_dev_started(accel_dev))
+               return NULL;
+
+       list_for_each(itr, &accel_dev->crypto_list) {
+               struct qat_crypto_instance *inst;
+               unsigned long cur;
+
+               inst = list_entry(itr, struct qat_crypto_instance, list);
+               cur = atomic_read(&inst->refctr);
+               if (best > cur) {
+                       inst_best = inst;
+                       best = cur;
+               }
+       }
+       if (inst_best) {
+               if (atomic_add_return(1, &inst_best->refctr) == 1) {
+                       if (adf_dev_get(accel_dev)) {
+                               atomic_dec(&inst_best->refctr);
+                               pr_err("QAT: Could increment dev refctr\n");
+                               return NULL;
+                       }
+               }
+       }
+       return inst_best;
+}
+
+static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
+{
+       int i;
+       unsigned long bank;
+       unsigned long num_inst, num_msg_sym, num_msg_asym;
+       int msg_size;
+       struct qat_crypto_instance *inst;
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+       strlcpy(key, ADF_NUM_CY, sizeof(key));
+
+       if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+               return -EFAULT;
+
+       if (kstrtoul(val, 0, &num_inst))
+               return -EFAULT;
+
+       for (i = 0; i < num_inst; i++) {
+               inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
+                                   accel_dev->numa_node);
+               if (!inst)
+                       goto err;
+
+               list_add_tail(&inst->list, &accel_dev->crypto_list);
+               inst->id = i;
+               atomic_set(&inst->refctr, 0);
+               inst->accel_dev = accel_dev;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+               if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+                       goto err;
+
+               if (kstrtoul(val, 10, &bank))
+                       goto err;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+               if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+                       goto err;
+
+               if (kstrtoul(val, 10, &num_msg_sym))
+                       goto err;
+               num_msg_sym = num_msg_sym >> 1;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+               if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
+                       goto err;
+
+               if (kstrtoul(val, 10, &num_msg_asym))
+                       goto err;
+               num_msg_asym = num_msg_asym >> 1;
+
+               msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+               if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
+                                   msg_size, key, NULL, 0, &inst->sym_tx))
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
+               if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+                                   msg_size, key, NULL, 0, &inst->rnd_tx))
+                       goto err;
+
+               msg_size = msg_size >> 1;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+               if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+                                   msg_size, key, NULL, 0, &inst->pke_tx))
+                       goto err;
+
+               msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+               if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
+                                   msg_size, key, qat_alg_callback, 0,
+                                   &inst->sym_rx))
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
+               if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+                                   msg_size, key, qat_alg_callback, 0,
+                                   &inst->rnd_rx))
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+               if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
+                                   msg_size, key, qat_alg_callback, 0,
+                                   &inst->pke_rx))
+                       goto err;
+       }
+       return 0;
+err:
+       qat_crypto_free_instances(accel_dev);
+       return -ENOMEM;
+}
+
+static int qat_crypto_init(struct adf_accel_dev *accel_dev)
+{
+       if (qat_crypto_create_instances(accel_dev))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
+{
+       return qat_crypto_free_instances(accel_dev);
+}
+
+static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
+                                   enum adf_event event)
+{
+       int ret;
+
+       switch (event) {
+       case ADF_EVENT_INIT:
+               ret = qat_crypto_init(accel_dev);
+               break;
+       case ADF_EVENT_SHUTDOWN:
+               ret = qat_crypto_shutdown(accel_dev);
+               break;
+       case ADF_EVENT_RESTARTING:
+       case ADF_EVENT_RESTARTED:
+       case ADF_EVENT_START:
+       case ADF_EVENT_STOP:
+       default:
+               ret = 0;
+       }
+       return ret;
+}
+
+int qat_crypto_register(void)
+{
+       memset(&qat_crypto, 0, sizeof(qat_crypto));
+       qat_crypto.event_hld = qat_crypto_event_handler;
+       qat_crypto.name = "qat_crypto";
+       return adf_service_register(&qat_crypto);
+}
+
+int qat_crypto_unregister(void)
+{
+       return adf_service_unregister(&qat_crypto);
+}
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
new file mode 100644 (file)
index 0000000..ab8468d
--- /dev/null
@@ -0,0 +1,83 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _QAT_CRYPTO_INSTANCE_H_
+#define _QAT_CRYPTO_INSTANCE_H_
+
+#include <linux/list.h>
+#include <linux/slab.h>
+#include "adf_accel_devices.h"
+#include "icp_qat_fw_la.h"
+
+struct qat_crypto_instance {
+       struct adf_etr_ring_data *sym_tx;
+       struct adf_etr_ring_data *sym_rx;
+       struct adf_etr_ring_data *pke_tx;
+       struct adf_etr_ring_data *pke_rx;
+       struct adf_etr_ring_data *rnd_tx;
+       struct adf_etr_ring_data *rnd_rx;
+       struct adf_accel_dev *accel_dev;
+       struct list_head list;
+       unsigned long state;
+       int id;
+       atomic_t refctr;
+};
+
+struct qat_crypto_request_buffs {
+       struct qat_alg_buf_list *bl;
+       dma_addr_t blp;
+       struct qat_alg_buf_list *blout;
+       dma_addr_t bloutp;
+       size_t sz;
+};
+
+struct qat_crypto_request {
+       struct icp_qat_fw_la_bulk_req req;
+       struct qat_alg_session_ctx *ctx;
+       struct aead_request *areq;
+       struct qat_crypto_request_buffs buf;
+};
+#endif
diff --git a/drivers/crypto/qat/qat_common/qat_hal.c b/drivers/crypto/qat/qat_common/qat_hal.c
new file mode 100644 (file)
index 0000000..9b8a315
--- /dev/null
@@ -0,0 +1,1393 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/slab.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_uclo.h"
+
+#define BAD_REGADDR               0xffff
+#define MAX_RETRY_TIMES           10000
+#define INIT_CTX_ARB_VALUE        0x0
+#define INIT_CTX_ENABLE_VALUE     0x0
+#define INIT_PC_VALUE             0x0
+#define INIT_WAKEUP_EVENTS_VALUE  0x1
+#define INIT_SIG_EVENTS_VALUE     0x1
+#define INIT_CCENABLE_VALUE       0x2000
+#define RST_CSR_QAT_LSB           20
+#define RST_CSR_AE_LSB           0
+#define MC_TIMESTAMP_ENABLE       (0x1 << 7)
+
+#define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
+       (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
+       (~(1 << CE_REG_PAR_ERR_BITPOS)))
+#define INSERT_IMMED_GPRA_CONST(inst, const_val) \
+       (inst = ((inst & 0xFFFF00C03FFull) | \
+               ((((const_val) << 12) & 0x0FF00000ull) | \
+               (((const_val) << 10) & 0x0003FC00ull))))
+#define INSERT_IMMED_GPRB_CONST(inst, const_val) \
+       (inst = ((inst & 0xFFFF00FFF00ull) | \
+               ((((const_val) << 12) & 0x0FF00000ull) | \
+               (((const_val) <<  0) & 0x000000FFull))))
+
+#define AE(handle, ae) handle->hal_handle->aes[ae]
+
+static const uint64_t inst_4b[] = {
+       0x0F0400C0000ull, 0x0F4400C0000ull, 0x0F040000300ull, 0x0F440000300ull,
+       0x0FC066C0000ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+       0x0A021000000ull
+};
+
+static const uint64_t inst[] = {
+       0x0F0000C0000ull, 0x0F000000380ull, 0x0D805000011ull, 0x0FC082C0300ull,
+       0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+       0x0A0643C0000ull, 0x0BAC0000301ull, 0x0D802000101ull, 0x0F0000C0001ull,
+       0x0FC066C0001ull, 0x0F0000C0300ull, 0x0F0000C0300ull, 0x0F0000C0300ull,
+       0x0F000400300ull, 0x0A0610C0000ull, 0x0BAC0000301ull, 0x0D804400101ull,
+       0x0A0580C0000ull, 0x0A0581C0000ull, 0x0A0582C0000ull, 0x0A0583C0000ull,
+       0x0A0584C0000ull, 0x0A0585C0000ull, 0x0A0586C0000ull, 0x0A0587C0000ull,
+       0x0A0588C0000ull, 0x0A0589C0000ull, 0x0A058AC0000ull, 0x0A058BC0000ull,
+       0x0A058CC0000ull, 0x0A058DC0000ull, 0x0A058EC0000ull, 0x0A058FC0000ull,
+       0x0A05C0C0000ull, 0x0A05C1C0000ull, 0x0A05C2C0000ull, 0x0A05C3C0000ull,
+       0x0A05C4C0000ull, 0x0A05C5C0000ull, 0x0A05C6C0000ull, 0x0A05C7C0000ull,
+       0x0A05C8C0000ull, 0x0A05C9C0000ull, 0x0A05CAC0000ull, 0x0A05CBC0000ull,
+       0x0A05CCC0000ull, 0x0A05CDC0000ull, 0x0A05CEC0000ull, 0x0A05CFC0000ull,
+       0x0A0400C0000ull, 0x0B0400C0000ull, 0x0A0401C0000ull, 0x0B0401C0000ull,
+       0x0A0402C0000ull, 0x0B0402C0000ull, 0x0A0403C0000ull, 0x0B0403C0000ull,
+       0x0A0404C0000ull, 0x0B0404C0000ull, 0x0A0405C0000ull, 0x0B0405C0000ull,
+       0x0A0406C0000ull, 0x0B0406C0000ull, 0x0A0407C0000ull, 0x0B0407C0000ull,
+       0x0A0408C0000ull, 0x0B0408C0000ull, 0x0A0409C0000ull, 0x0B0409C0000ull,
+       0x0A040AC0000ull, 0x0B040AC0000ull, 0x0A040BC0000ull, 0x0B040BC0000ull,
+       0x0A040CC0000ull, 0x0B040CC0000ull, 0x0A040DC0000ull, 0x0B040DC0000ull,
+       0x0A040EC0000ull, 0x0B040EC0000ull, 0x0A040FC0000ull, 0x0B040FC0000ull,
+       0x0D81581C010ull, 0x0E000010000ull, 0x0E000010000ull,
+};
+
+void qat_hal_set_live_ctx(struct icp_qat_fw_loader_handle *handle,
+                         unsigned char ae, unsigned int ctx_mask)
+{
+       AE(handle, ae).live_ctx_mask = ctx_mask;
+}
+
+#define CSR_RETRY_TIMES 500
+static int qat_hal_rd_ae_csr(struct icp_qat_fw_loader_handle *handle,
+                            unsigned char ae, unsigned int csr,
+                            unsigned int *value)
+{
+       unsigned int iterations = CSR_RETRY_TIMES;
+
+       do {
+               *value = GET_AE_CSR(handle, ae, csr);
+               if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+                       return 0;
+       } while (iterations--);
+
+       pr_err("QAT: Read CSR timeout\n");
+       return -EFAULT;
+}
+
+static int qat_hal_wr_ae_csr(struct icp_qat_fw_loader_handle *handle,
+                            unsigned char ae, unsigned int csr,
+                            unsigned int value)
+{
+       unsigned int iterations = CSR_RETRY_TIMES;
+
+       do {
+               SET_AE_CSR(handle, ae, csr, value);
+               if (!(GET_AE_CSR(handle, ae, LOCAL_CSR_STATUS) & LCS_STATUS))
+                       return 0;
+       } while (iterations--);
+
+       pr_err("QAT: Write CSR Timeout\n");
+       return -EFAULT;
+}
+
+static void qat_hal_get_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+                                    unsigned char ae, unsigned char ctx,
+                                    unsigned int *events)
+{
+       unsigned int cur_ctx;
+
+       qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+       qat_hal_rd_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT, events);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_wait_cycles(struct icp_qat_fw_loader_handle *handle,
+                              unsigned char ae, unsigned int cycles,
+                              int chk_inactive)
+{
+       unsigned int base_cnt = 0, cur_cnt = 0;
+       unsigned int csr = (1 << ACS_ABO_BITPOS);
+       int times = MAX_RETRY_TIMES;
+       int elapsed_cycles = 0;
+
+       qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &base_cnt);
+       base_cnt &= 0xffff;
+       while ((int)cycles > elapsed_cycles && times--) {
+               if (chk_inactive)
+                       qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &csr);
+
+               qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT, &cur_cnt);
+               cur_cnt &= 0xffff;
+               elapsed_cycles = cur_cnt - base_cnt;
+
+               if (elapsed_cycles < 0)
+                       elapsed_cycles += 0x10000;
+
+               /* ensure at least 8 time cycles elapsed in wait_cycles */
+               if (elapsed_cycles >= 8 && !(csr & (1 << ACS_ABO_BITPOS)))
+                       return 0;
+       }
+       if (!times) {
+               pr_err("QAT: wait_num_cycles time out\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+#define CLR_BIT(wrd, bit) (wrd & ~(1 << bit))
+#define SET_BIT(wrd, bit) (wrd | 1 << bit)
+
+int qat_hal_set_ae_ctx_mode(struct icp_qat_fw_loader_handle *handle,
+                           unsigned char ae, unsigned char mode)
+{
+       unsigned int csr, new_csr;
+
+       if ((mode != 4) && (mode != 8)) {
+               pr_err("QAT: bad ctx mode=%d\n", mode);
+               return -EINVAL;
+       }
+
+       /* Sets the accelaration engine context mode to either four or eight */
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+       csr = IGNORE_W1C_MASK & csr;
+       new_csr = (mode == 4) ?
+               SET_BIT(csr, CE_INUSE_CONTEXTS_BITPOS) :
+               CLR_BIT(csr, CE_INUSE_CONTEXTS_BITPOS);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+       return 0;
+}
+
+int qat_hal_set_ae_nn_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, unsigned char mode)
+{
+       unsigned int csr, new_csr;
+
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+       csr &= IGNORE_W1C_MASK;
+
+       new_csr = (mode) ?
+               SET_BIT(csr, CE_NN_MODE_BITPOS) :
+               CLR_BIT(csr, CE_NN_MODE_BITPOS);
+
+       if (new_csr != csr)
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+
+       return 0;
+}
+
+int qat_hal_set_ae_lm_mode(struct icp_qat_fw_loader_handle *handle,
+                          unsigned char ae, enum icp_qat_uof_regtype lm_type,
+                          unsigned char mode)
+{
+       unsigned int csr, new_csr;
+
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr);
+       csr &= IGNORE_W1C_MASK;
+       switch (lm_type) {
+       case ICP_LMEM0:
+               new_csr = (mode) ?
+                       SET_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS) :
+                       CLR_BIT(csr, CE_LMADDR_0_GLOBAL_BITPOS);
+               break;
+       case ICP_LMEM1:
+               new_csr = (mode) ?
+                       SET_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS) :
+                       CLR_BIT(csr, CE_LMADDR_1_GLOBAL_BITPOS);
+               break;
+       default:
+               pr_err("QAT: lmType = 0x%x\n", lm_type);
+               return -EINVAL;
+       }
+
+       if (new_csr != csr)
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, new_csr);
+       return 0;
+}
+
+static unsigned short qat_hal_get_reg_addr(unsigned int type,
+                                          unsigned short reg_num)
+{
+       unsigned short reg_addr;
+
+       switch (type) {
+       case ICP_GPA_ABS:
+       case ICP_GPB_ABS:
+               reg_addr = 0x80 | (reg_num & 0x7f);
+               break;
+       case ICP_GPA_REL:
+       case ICP_GPB_REL:
+               reg_addr = reg_num & 0x1f;
+               break;
+       case ICP_SR_RD_REL:
+       case ICP_SR_WR_REL:
+       case ICP_SR_REL:
+               reg_addr = 0x180 | (reg_num & 0x1f);
+               break;
+       case ICP_SR_ABS:
+               reg_addr = 0x140 | ((reg_num & 0x3) << 1);
+               break;
+       case ICP_DR_RD_REL:
+       case ICP_DR_WR_REL:
+       case ICP_DR_REL:
+               reg_addr = 0x1c0 | (reg_num & 0x1f);
+               break;
+       case ICP_DR_ABS:
+               reg_addr = 0x100 | ((reg_num & 0x3) << 1);
+               break;
+       case ICP_NEIGH_REL:
+               reg_addr = 0x280 | (reg_num & 0x1f);
+               break;
+       case ICP_LMEM0:
+               reg_addr = 0x200;
+               break;
+       case ICP_LMEM1:
+               reg_addr = 0x220;
+               break;
+       case ICP_NO_DEST:
+               reg_addr = 0x300 | (reg_num & 0xff);
+               break;
+       default:
+               reg_addr = BAD_REGADDR;
+               break;
+       }
+       return reg_addr;
+}
+
+void qat_hal_reset(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned int ae_reset_csr;
+
+       ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
+       ae_reset_csr |= handle->hal_handle->ae_mask << RST_CSR_AE_LSB;
+       ae_reset_csr |= handle->hal_handle->slice_mask << RST_CSR_QAT_LSB;
+       SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
+}
+
+static void qat_hal_wr_indr_csr(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned int ctx_mask,
+                               unsigned int ae_csr, unsigned int csr_val)
+{
+       unsigned int ctx, cur_ctx;
+
+       qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!(ctx_mask & (1 << ctx)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+               qat_hal_wr_ae_csr(handle, ae, ae_csr, csr_val);
+       }
+
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_rd_indr_csr(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned char ctx,
+                               unsigned int ae_csr, unsigned int *csr_val)
+{
+       unsigned int cur_ctx;
+
+       qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+       qat_hal_rd_ae_csr(handle, ae, ae_csr, csr_val);
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_put_sig_event(struct icp_qat_fw_loader_handle *handle,
+                                 unsigned char ae, unsigned int ctx_mask,
+                                 unsigned int events)
+{
+       unsigned int ctx, cur_ctx;
+
+       qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!(ctx_mask & (1 << ctx)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+               qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_INDIRECT, events);
+       }
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static void qat_hal_put_wakeup_event(struct icp_qat_fw_loader_handle *handle,
+                                    unsigned char ae, unsigned int ctx_mask,
+                                    unsigned int events)
+{
+       unsigned int ctx, cur_ctx;
+
+       qat_hal_rd_ae_csr(handle, ae, CSR_CTX_POINTER, &cur_ctx);
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!(ctx_mask & (1 << ctx)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, ctx);
+               qat_hal_wr_ae_csr(handle, ae, CTX_WAKEUP_EVENTS_INDIRECT,
+                                 events);
+       }
+       qat_hal_wr_ae_csr(handle, ae, CSR_CTX_POINTER, cur_ctx);
+}
+
+static int qat_hal_check_ae_alive(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned int base_cnt, cur_cnt;
+       unsigned char ae;
+       unsigned int times = MAX_RETRY_TIMES;
+
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!(handle->hal_handle->ae_mask & (1 << ae)))
+                       continue;
+
+               qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
+                                 (unsigned int *)&base_cnt);
+               base_cnt &= 0xffff;
+
+               do {
+                       qat_hal_rd_ae_csr(handle, ae, PROFILE_COUNT,
+                                         (unsigned int *)&cur_cnt);
+                       cur_cnt &= 0xffff;
+               } while (times-- && (cur_cnt == base_cnt));
+
+               if (!times) {
+                       pr_err("QAT: AE%d is inactive!!\n", ae);
+                       return -EFAULT;
+               }
+       }
+
+       return 0;
+}
+
+static void qat_hal_reset_timestamp(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned int misc_ctl;
+       unsigned char ae;
+
+       /* stop the timestamp timers */
+       misc_ctl = GET_GLB_CSR(handle, MISC_CONTROL);
+       if (misc_ctl & MC_TIMESTAMP_ENABLE)
+               SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl &
+                           (~MC_TIMESTAMP_ENABLE));
+
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!(handle->hal_handle->ae_mask & (1 << ae)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_LOW, 0);
+               qat_hal_wr_ae_csr(handle, ae, TIMESTAMP_HIGH, 0);
+       }
+       /* start timestamp timers */
+       SET_GLB_CSR(handle, MISC_CONTROL, misc_ctl | MC_TIMESTAMP_ENABLE);
+}
+
+#define ESRAM_AUTO_TINIT (1<<2)
+#define ESRAM_AUTO_TINIT_DONE (1<<3)
+#define ESRAM_AUTO_INIT_USED_CYCLES (1640)
+#define ESRAM_AUTO_INIT_CSR_OFFSET 0xC1C
+static int qat_hal_init_esram(struct icp_qat_fw_loader_handle *handle)
+{
+       void __iomem *csr_addr = handle->hal_ep_csr_addr_v +
+                                ESRAM_AUTO_INIT_CSR_OFFSET;
+       unsigned int csr_val, times = 30;
+
+       csr_val = ADF_CSR_RD(csr_addr, 0);
+       if ((csr_val & ESRAM_AUTO_TINIT) && (csr_val & ESRAM_AUTO_TINIT_DONE))
+               return 0;
+
+       csr_val = ADF_CSR_RD(csr_addr, 0);
+       csr_val |= ESRAM_AUTO_TINIT;
+       ADF_CSR_WR(csr_addr, 0, csr_val);
+
+       do {
+               qat_hal_wait_cycles(handle, 0, ESRAM_AUTO_INIT_USED_CYCLES, 0);
+               csr_val = ADF_CSR_RD(csr_addr, 0);
+       } while (!(csr_val & ESRAM_AUTO_TINIT_DONE) && times--);
+       if ((!times)) {
+               pr_err("QAT: Fail to init eSram!\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+#define SHRAM_INIT_CYCLES 2060
+int qat_hal_clr_reset(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned int ae_reset_csr;
+       unsigned char ae;
+       unsigned int clk_csr;
+       unsigned int times = 100;
+       unsigned int csr;
+
+       /* write to the reset csr */
+       ae_reset_csr = GET_GLB_CSR(handle, ICP_RESET);
+       ae_reset_csr &= ~(handle->hal_handle->ae_mask << RST_CSR_AE_LSB);
+       ae_reset_csr &= ~(handle->hal_handle->slice_mask << RST_CSR_QAT_LSB);
+       do {
+               SET_GLB_CSR(handle, ICP_RESET, ae_reset_csr);
+               if (!(times--))
+                       goto out_err;
+               csr = GET_GLB_CSR(handle, ICP_RESET);
+       } while ((handle->hal_handle->ae_mask |
+                (handle->hal_handle->slice_mask << RST_CSR_QAT_LSB)) & csr);
+       /* enable clock */
+       clk_csr = GET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE);
+       clk_csr |= handle->hal_handle->ae_mask << 0;
+       clk_csr |= handle->hal_handle->slice_mask << 20;
+       SET_GLB_CSR(handle, ICP_GLOBAL_CLK_ENABLE, clk_csr);
+       if (qat_hal_check_ae_alive(handle))
+               goto out_err;
+
+       /* Set undefined power-up/reset states to reasonable default values */
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!(handle->hal_handle->ae_mask & (1 << ae)))
+                       continue;
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+                                 INIT_CTX_ENABLE_VALUE);
+               qat_hal_wr_indr_csr(handle, ae, ICP_QAT_UCLO_AE_ALL_CTX,
+                                   CTX_STS_INDIRECT,
+                                   handle->hal_handle->upc_mask &
+                                   INIT_PC_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+               qat_hal_put_wakeup_event(handle, ae,
+                                        ICP_QAT_UCLO_AE_ALL_CTX,
+                                        INIT_WAKEUP_EVENTS_VALUE);
+               qat_hal_put_sig_event(handle, ae,
+                                     ICP_QAT_UCLO_AE_ALL_CTX,
+                                     INIT_SIG_EVENTS_VALUE);
+       }
+       if (qat_hal_init_esram(handle))
+               goto out_err;
+       if (qat_hal_wait_cycles(handle, 0, SHRAM_INIT_CYCLES, 0))
+               goto out_err;
+       qat_hal_reset_timestamp(handle);
+
+       return 0;
+out_err:
+       pr_err("QAT: failed to get device out of reset\n");
+       return -EFAULT;
+}
+
+static void qat_hal_disable_ctx(struct icp_qat_fw_loader_handle *handle,
+                               unsigned char ae, unsigned int ctx_mask)
+{
+       unsigned int ctx;
+
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+       ctx &= IGNORE_W1C_MASK &
+               (~((ctx_mask & ICP_QAT_UCLO_AE_ALL_CTX) << CE_ENABLE_BITPOS));
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static uint64_t qat_hal_parity_64bit(uint64_t word)
+{
+       word ^= word >> 1;
+       word ^= word >> 2;
+       word ^= word >> 4;
+       word ^= word >> 8;
+       word ^= word >> 16;
+       word ^= word >> 32;
+       return word & 1;
+}
+
+static uint64_t qat_hal_set_uword_ecc(uint64_t uword)
+{
+       uint64_t bit0_mask = 0xff800007fffULL, bit1_mask = 0x1f801ff801fULL,
+               bit2_mask = 0xe387e0781e1ULL, bit3_mask = 0x7cb8e388e22ULL,
+               bit4_mask = 0xaf5b2c93244ULL, bit5_mask = 0xf56d5525488ULL,
+               bit6_mask = 0xdaf69a46910ULL;
+
+       /* clear the ecc bits */
+       uword &= ~(0x7fULL << 0x2C);
+       uword |= qat_hal_parity_64bit(bit0_mask & uword) << 0x2C;
+       uword |= qat_hal_parity_64bit(bit1_mask & uword) << 0x2D;
+       uword |= qat_hal_parity_64bit(bit2_mask & uword) << 0x2E;
+       uword |= qat_hal_parity_64bit(bit3_mask & uword) << 0x2F;
+       uword |= qat_hal_parity_64bit(bit4_mask & uword) << 0x30;
+       uword |= qat_hal_parity_64bit(bit5_mask & uword) << 0x31;
+       uword |= qat_hal_parity_64bit(bit6_mask & uword) << 0x32;
+       return uword;
+}
+
+void qat_hal_wr_uwords(struct icp_qat_fw_loader_handle *handle,
+                      unsigned char ae, unsigned int uaddr,
+                      unsigned int words_num, uint64_t *uword)
+{
+       unsigned int ustore_addr;
+       unsigned int i;
+
+       qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+       uaddr |= UA_ECS;
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       for (i = 0; i < words_num; i++) {
+               unsigned int uwrd_lo, uwrd_hi;
+               uint64_t tmp;
+
+               tmp = qat_hal_set_uword_ecc(uword[i]);
+               uwrd_lo = (unsigned int)(tmp & 0xffffffff);
+               uwrd_hi = (unsigned int)(tmp >> 0x20);
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+       }
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+static void qat_hal_enable_ctx(struct icp_qat_fw_loader_handle *handle,
+                              unsigned char ae, unsigned int ctx_mask)
+{
+       unsigned int ctx;
+
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx);
+       ctx &= IGNORE_W1C_MASK;
+       ctx_mask &= (ctx & CE_INUSE_CONTEXTS) ? 0x55 : 0xFF;
+       ctx |= (ctx_mask << CE_ENABLE_BITPOS);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx);
+}
+
+static int qat_hal_clear_gpr(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned char ae;
+       unsigned int ctx_mask = ICP_QAT_UCLO_AE_ALL_CTX;
+       int times = MAX_RETRY_TIMES;
+       unsigned int csr_val = 0;
+       unsigned short reg;
+       unsigned int savctx = 0;
+       int ret = 0;
+
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!(handle->hal_handle->ae_mask & (1 << ae)))
+                       continue;
+               for (reg = 0; reg < ICP_QAT_UCLO_MAX_GPR_REG; reg++) {
+                       qat_hal_init_rd_xfer(handle, ae, 0, ICP_SR_RD_ABS,
+                                            reg, 0);
+                       qat_hal_init_rd_xfer(handle, ae, 0, ICP_DR_RD_ABS,
+                                            reg, 0);
+               }
+               qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+               csr_val &= ~(1 << MMC_SHARE_CS_BITPOS);
+               qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, csr_val);
+               qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &csr_val);
+               csr_val &= IGNORE_W1C_MASK;
+               csr_val |= CE_NN_MODE;
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, csr_val);
+               qat_hal_wr_uwords(handle, ae, 0, ARRAY_SIZE(inst),
+                                 (uint64_t *)inst);
+               qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+                                   handle->hal_handle->upc_mask &
+                                   INIT_PC_VALUE);
+               qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, 0);
+               qat_hal_put_wakeup_event(handle, ae, ctx_mask, XCWE_VOLUNTARY);
+               qat_hal_wr_indr_csr(handle, ae, ctx_mask,
+                                   CTX_SIG_EVENTS_INDIRECT, 0);
+               qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+               qat_hal_enable_ctx(handle, ae, ctx_mask);
+       }
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!(handle->hal_handle->ae_mask & (1 << ae)))
+                       continue;
+               /* wait for AE to finish */
+               do {
+                       ret = qat_hal_wait_cycles(handle, ae, 20, 1);
+               } while (ret && times--);
+
+               if (!times) {
+                       pr_err("QAT: clear GPR of AE %d failed", ae);
+                       return -EINVAL;
+               }
+               qat_hal_disable_ctx(handle, ae, ctx_mask);
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+                                 savctx & ACS_ACNO);
+               qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES,
+                                 INIT_CTX_ENABLE_VALUE);
+               qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+                                   handle->hal_handle->upc_mask &
+                                   INIT_PC_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, INIT_CTX_ARB_VALUE);
+               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, INIT_CCENABLE_VALUE);
+               qat_hal_put_wakeup_event(handle, ae, ctx_mask,
+                                        INIT_WAKEUP_EVENTS_VALUE);
+               qat_hal_put_sig_event(handle, ae, ctx_mask,
+                                     INIT_SIG_EVENTS_VALUE);
+       }
+       return 0;
+}
+
+#define ICP_DH895XCC_AE_OFFSET      0x20000
+#define ICP_DH895XCC_CAP_OFFSET     (ICP_DH895XCC_AE_OFFSET + 0x10000)
+#define LOCAL_TO_XFER_REG_OFFSET    0x800
+#define ICP_DH895XCC_EP_OFFSET      0x3a000
+#define ICP_DH895XCC_PMISC_BAR 1
+int qat_hal_init(struct adf_accel_dev *accel_dev)
+{
+       unsigned char ae;
+       unsigned int max_en_ae_id = 0;
+       struct icp_qat_fw_loader_handle *handle;
+       struct adf_accel_pci *pci_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct adf_bar *bar = &pci_info->pci_bars[ADF_DH895XCC_PMISC_BAR];
+
+       handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+       if (!handle)
+               return -ENOMEM;
+
+       handle->hal_cap_g_ctl_csr_addr_v = bar->virt_addr +
+                                               ICP_DH895XCC_CAP_OFFSET;
+       handle->hal_cap_ae_xfer_csr_addr_v = bar->virt_addr +
+                                               ICP_DH895XCC_AE_OFFSET;
+       handle->hal_ep_csr_addr_v = bar->virt_addr + ICP_DH895XCC_EP_OFFSET;
+       handle->hal_cap_ae_local_csr_addr_v =
+               handle->hal_cap_ae_xfer_csr_addr_v + LOCAL_TO_XFER_REG_OFFSET;
+
+       handle->hal_handle = kzalloc(sizeof(*handle->hal_handle), GFP_KERNEL);
+       if (!handle->hal_handle)
+               goto out_hal_handle;
+       handle->hal_handle->revision_id = accel_dev->accel_pci_dev.revid;
+       handle->hal_handle->ae_mask = hw_data->ae_mask;
+       handle->hal_handle->slice_mask = hw_data->accel_mask;
+       /* create AE objects */
+       handle->hal_handle->upc_mask = 0x1ffff;
+       handle->hal_handle->max_ustore = 0x4000;
+       for (ae = 0; ae < ICP_QAT_UCLO_MAX_AE; ae++) {
+               if (!(hw_data->ae_mask & (1 << ae)))
+                       continue;
+               handle->hal_handle->aes[ae].free_addr = 0;
+               handle->hal_handle->aes[ae].free_size =
+                   handle->hal_handle->max_ustore;
+               handle->hal_handle->aes[ae].ustore_size =
+                   handle->hal_handle->max_ustore;
+               handle->hal_handle->aes[ae].live_ctx_mask =
+                                               ICP_QAT_UCLO_AE_ALL_CTX;
+               max_en_ae_id = ae;
+       }
+       handle->hal_handle->ae_max_num = max_en_ae_id + 1;
+       /* take all AEs out of reset */
+       if (qat_hal_clr_reset(handle)) {
+               pr_err("QAT: qat_hal_clr_reset error\n");
+               goto out_err;
+       }
+       if (qat_hal_clear_gpr(handle))
+               goto out_err;
+       /* Set SIGNATURE_ENABLE[0] to 0x1 in order to enable ALU_OUT csr */
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               unsigned int csr_val = 0;
+
+               if (!(hw_data->ae_mask & (1 << ae)))
+                       continue;
+               qat_hal_rd_ae_csr(handle, ae, SIGNATURE_ENABLE, &csr_val);
+               csr_val |= 0x1;
+               qat_hal_wr_ae_csr(handle, ae, SIGNATURE_ENABLE, csr_val);
+       }
+       accel_dev->fw_loader->fw_loader = handle;
+       return 0;
+
+out_err:
+       kfree(handle->hal_handle);
+out_hal_handle:
+       kfree(handle);
+       return -EFAULT;
+}
+
+void qat_hal_deinit(struct icp_qat_fw_loader_handle *handle)
+{
+       if (!handle)
+               return;
+       kfree(handle->hal_handle);
+       kfree(handle);
+}
+
+void qat_hal_start(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                  unsigned int ctx_mask)
+{
+       qat_hal_put_wakeup_event(handle, ae, (~ctx_mask) &
+                                ICP_QAT_UCLO_AE_ALL_CTX, 0x10000);
+       qat_hal_enable_ctx(handle, ae, ctx_mask);
+}
+
+void qat_hal_stop(struct icp_qat_fw_loader_handle *handle, unsigned char ae,
+                 unsigned int ctx_mask)
+{
+       qat_hal_disable_ctx(handle, ae, ctx_mask);
+}
+
+void qat_hal_set_pc(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned int ctx_mask, unsigned int upc)
+{
+       qat_hal_wr_indr_csr(handle, ae, ctx_mask, CTX_STS_INDIRECT,
+                           handle->hal_handle->upc_mask & upc);
+}
+
+static void qat_hal_get_uwords(struct icp_qat_fw_loader_handle *handle,
+                              unsigned char ae, unsigned int uaddr,
+                              unsigned int words_num, uint64_t *uword)
+{
+       unsigned int i, uwrd_lo, uwrd_hi;
+       unsigned int ustore_addr, misc_control;
+
+       qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &misc_control);
+       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL,
+                         misc_control & 0xfffffffb);
+       qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+       uaddr |= UA_ECS;
+       for (i = 0; i < words_num; i++) {
+               qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+               uaddr++;
+               qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_LOWER, &uwrd_lo);
+               qat_hal_rd_ae_csr(handle, ae, USTORE_DATA_UPPER, &uwrd_hi);
+               uword[i] = uwrd_hi;
+               uword[i] = (uword[i] << 0x20) | uwrd_lo;
+       }
+       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, misc_control);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+void qat_hal_wr_umem(struct icp_qat_fw_loader_handle *handle,
+                    unsigned char ae, unsigned int uaddr,
+                    unsigned int words_num, unsigned int *data)
+{
+       unsigned int i, ustore_addr;
+
+       qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+       uaddr |= UA_ECS;
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       for (i = 0; i < words_num; i++) {
+               unsigned int uwrd_lo, uwrd_hi, tmp;
+
+               uwrd_lo = ((data[i] & 0xfff0000) << 4) | (0x3 << 18) |
+                         ((data[i] & 0xff00) << 2) |
+                         (0x3 << 8) | (data[i] & 0xff);
+               uwrd_hi = (0xf << 4) | ((data[i] & 0xf0000000) >> 28);
+               uwrd_hi |= (hweight32(data[i] & 0xffff) & 0x1) << 8;
+               tmp = ((data[i] >> 0x10) & 0xffff);
+               uwrd_hi |= (hweight32(tmp) & 0x1) << 9;
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+               qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+       }
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+}
+
+#define MAX_EXEC_INST 100
+static int qat_hal_exec_micro_inst(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae, unsigned char ctx,
+                                  uint64_t *micro_inst, unsigned int inst_num,
+                                  int code_off, unsigned int max_cycle,
+                                  unsigned int *endpc)
+{
+       uint64_t savuwords[MAX_EXEC_INST];
+       unsigned int ind_lm_addr0, ind_lm_addr1;
+       unsigned int ind_lm_addr_byte0, ind_lm_addr_byte1;
+       unsigned int ind_cnt_sig;
+       unsigned int ind_sig, act_sig;
+       unsigned int csr_val = 0, newcsr_val;
+       unsigned int savctx;
+       unsigned int savcc, wakeup_events, savpc;
+       unsigned int ctxarb_ctl, ctx_enables;
+
+       if ((inst_num > handle->hal_handle->max_ustore) || !micro_inst) {
+               pr_err("QAT: invalid instruction num %d\n", inst_num);
+               return -EINVAL;
+       }
+       /* save current context */
+       qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_0_INDIRECT, &ind_lm_addr0);
+       qat_hal_rd_indr_csr(handle, ae, ctx, LM_ADDR_1_INDIRECT, &ind_lm_addr1);
+       qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_0_BYTE_INDEX,
+                           &ind_lm_addr_byte0);
+       qat_hal_rd_indr_csr(handle, ae, ctx, INDIRECT_LM_ADDR_1_BYTE_INDEX,
+                           &ind_lm_addr_byte1);
+       if (inst_num <= MAX_EXEC_INST)
+               qat_hal_get_uwords(handle, ae, 0, inst_num, savuwords);
+       qat_hal_get_wakeup_event(handle, ae, ctx, &wakeup_events);
+       qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT, &savpc);
+       savpc = (savpc & handle->hal_handle->upc_mask) >> 0;
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+       ctx_enables &= IGNORE_W1C_MASK;
+       qat_hal_rd_ae_csr(handle, ae, CC_ENABLE, &savcc);
+       qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+       qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_ctl);
+       qat_hal_rd_indr_csr(handle, ae, ctx, FUTURE_COUNT_SIGNAL_INDIRECT,
+                           &ind_cnt_sig);
+       qat_hal_rd_indr_csr(handle, ae, ctx, CTX_SIG_EVENTS_INDIRECT, &ind_sig);
+       qat_hal_rd_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, &act_sig);
+       /* execute micro codes */
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+       qat_hal_wr_uwords(handle, ae, 0, inst_num, micro_inst);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT, 0);
+       qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, ctx & ACS_ACNO);
+       if (code_off)
+               qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc & 0xffffdfff);
+       qat_hal_put_wakeup_event(handle, ae, (1 << ctx), XCWE_VOLUNTARY);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_SIG_EVENTS_INDIRECT, 0);
+       qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, 0);
+       qat_hal_enable_ctx(handle, ae, (1 << ctx));
+       /* wait for micro codes to finish */
+       if (qat_hal_wait_cycles(handle, ae, max_cycle, 1) != 0)
+               return -EFAULT;
+       if (endpc) {
+               unsigned int ctx_status;
+
+               qat_hal_rd_indr_csr(handle, ae, ctx, CTX_STS_INDIRECT,
+                                   &ctx_status);
+               *endpc = ctx_status & handle->hal_handle->upc_mask;
+       }
+       /* retore to saved context */
+       qat_hal_disable_ctx(handle, ae, (1 << ctx));
+       if (inst_num <= MAX_EXEC_INST)
+               qat_hal_wr_uwords(handle, ae, 0, inst_num, savuwords);
+       qat_hal_put_wakeup_event(handle, ae, (1 << ctx), wakeup_events);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx), CTX_STS_INDIRECT,
+                           handle->hal_handle->upc_mask & savpc);
+       qat_hal_rd_ae_csr(handle, ae, AE_MISC_CONTROL, &csr_val);
+       newcsr_val = CLR_BIT(csr_val, MMC_SHARE_CS_BITPOS);
+       qat_hal_wr_ae_csr(handle, ae, AE_MISC_CONTROL, newcsr_val);
+       qat_hal_wr_ae_csr(handle, ae, CC_ENABLE, savcc);
+       qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS, savctx & ACS_ACNO);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_ctl);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           LM_ADDR_0_INDIRECT, ind_lm_addr0);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           LM_ADDR_1_INDIRECT, ind_lm_addr1);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           INDIRECT_LM_ADDR_0_BYTE_INDEX, ind_lm_addr_byte0);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           INDIRECT_LM_ADDR_1_BYTE_INDEX, ind_lm_addr_byte1);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           FUTURE_COUNT_SIGNAL_INDIRECT, ind_cnt_sig);
+       qat_hal_wr_indr_csr(handle, ae, (1 << ctx),
+                           CTX_SIG_EVENTS_INDIRECT, ind_sig);
+       qat_hal_wr_ae_csr(handle, ae, CTX_SIG_EVENTS_ACTIVE, act_sig);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+       return 0;
+}
+
+static int qat_hal_rd_rel_reg(struct icp_qat_fw_loader_handle *handle,
+                             unsigned char ae, unsigned char ctx,
+                             enum icp_qat_uof_regtype reg_type,
+                             unsigned short reg_num, unsigned int *data)
+{
+       unsigned int savctx, uaddr, uwrd_lo, uwrd_hi;
+       unsigned int ctxarb_cntl, ustore_addr, ctx_enables;
+       unsigned short reg_addr;
+       int status = 0;
+       uint64_t insts, savuword;
+
+       reg_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+       if (reg_addr == BAD_REGADDR) {
+               pr_err("QAT: bad regaddr=0x%x\n", reg_addr);
+               return -EINVAL;
+       }
+       switch (reg_type) {
+       case ICP_GPA_REL:
+               insts = 0xA070000000ull | (reg_addr & 0x3ff);
+               break;
+       default:
+               insts = (uint64_t)0xA030000000ull | ((reg_addr & 0x3ff) << 10);
+               break;
+       }
+       qat_hal_rd_ae_csr(handle, ae, ACTIVE_CTX_STATUS, &savctx);
+       qat_hal_rd_ae_csr(handle, ae, CTX_ARB_CNTL, &ctxarb_cntl);
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+       ctx_enables &= IGNORE_W1C_MASK;
+       if (ctx != (savctx & ACS_ACNO))
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+                                 ctx & ACS_ACNO);
+       qat_hal_get_uwords(handle, ae, 0, 1, &savuword);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+       qat_hal_rd_ae_csr(handle, ae, USTORE_ADDRESS, &ustore_addr);
+       uaddr = UA_ECS;
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       insts = qat_hal_set_uword_ecc(insts);
+       uwrd_lo = (unsigned int)(insts & 0xffffffff);
+       uwrd_hi = (unsigned int)(insts >> 0x20);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_LOWER, uwrd_lo);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_DATA_UPPER, uwrd_hi);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, uaddr);
+       /* delay for at least 8 cycles */
+       qat_hal_wait_cycles(handle, ae, 0x8, 0);
+       /*
+        * read ALU output
+        * the instruction should have been executed
+        * prior to clearing the ECS in putUwords
+        */
+       qat_hal_rd_ae_csr(handle, ae, ALU_OUT, data);
+       qat_hal_wr_ae_csr(handle, ae, USTORE_ADDRESS, ustore_addr);
+       qat_hal_wr_uwords(handle, ae, 0, 1, &savuword);
+       if (ctx != (savctx & ACS_ACNO))
+               qat_hal_wr_ae_csr(handle, ae, ACTIVE_CTX_STATUS,
+                                 savctx & ACS_ACNO);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ARB_CNTL, ctxarb_cntl);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+
+       return status;
+}
+
+static int qat_hal_wr_rel_reg(struct icp_qat_fw_loader_handle *handle,
+                             unsigned char ae, unsigned char ctx,
+                             enum icp_qat_uof_regtype reg_type,
+                             unsigned short reg_num, unsigned int data)
+{
+       unsigned short src_hiaddr, src_lowaddr, dest_addr, data16hi, data16lo;
+       uint64_t insts[] = {
+               0x0F440000000ull,
+               0x0F040000000ull,
+               0x0F0000C0300ull,
+               0x0E000010000ull
+       };
+       const int num_inst = ARRAY_SIZE(insts), code_off = 1;
+       const int imm_w1 = 0, imm_w0 = 1;
+
+       dest_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+       if (dest_addr == BAD_REGADDR) {
+               pr_err("QAT: bad destAddr=0x%x\n", dest_addr);
+               return -EINVAL;
+       }
+
+       data16lo = 0xffff & data;
+       data16hi = 0xffff & (data >> 0x10);
+       src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+                                         (0xff & data16hi));
+       src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST, (unsigned short)
+                                          (0xff & data16lo));
+       switch (reg_type) {
+       case ICP_GPA_REL:
+               insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+                   ((src_hiaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+               insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+                   ((src_lowaddr & 0x3ff) << 10) | (dest_addr & 0x3ff);
+               break;
+       default:
+               insts[imm_w1] = insts[imm_w1] | ((data16hi >> 8) << 20) |
+                   ((dest_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+
+               insts[imm_w0] = insts[imm_w0] | ((data16lo >> 8) << 20) |
+                   ((dest_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+               break;
+       }
+
+       return qat_hal_exec_micro_inst(handle, ae, ctx, insts, num_inst,
+                                      code_off, num_inst * 0x5, NULL);
+}
+
+int qat_hal_get_ins_num(void)
+{
+       return ARRAY_SIZE(inst_4b);
+}
+
+static int qat_hal_concat_micro_code(uint64_t *micro_inst,
+                                    unsigned int inst_num, unsigned int size,
+                                    unsigned int addr, unsigned int *value)
+{
+       int i, val_indx;
+       unsigned int cur_value;
+       const uint64_t *inst_arr;
+       int fixup_offset;
+       int usize = 0;
+       int orig_num;
+
+       orig_num = inst_num;
+       val_indx = 0;
+       cur_value = value[val_indx++];
+       inst_arr = inst_4b;
+       usize = ARRAY_SIZE(inst_4b);
+       fixup_offset = inst_num;
+       for (i = 0; i < usize; i++)
+               micro_inst[inst_num++] = inst_arr[i];
+       INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], (addr));
+       fixup_offset++;
+       INSERT_IMMED_GPRA_CONST(micro_inst[fixup_offset], 0);
+       fixup_offset++;
+       INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0));
+       fixup_offset++;
+       INSERT_IMMED_GPRB_CONST(micro_inst[fixup_offset], (cur_value >> 0x10));
+
+       return inst_num - orig_num;
+}
+
+static int qat_hal_exec_micro_init_lm(struct icp_qat_fw_loader_handle *handle,
+                                     unsigned char ae, unsigned char ctx,
+                                     int *pfirst_exec, uint64_t *micro_inst,
+                                     unsigned int inst_num)
+{
+       int stat = 0;
+       unsigned int gpra0 = 0, gpra1 = 0, gpra2 = 0;
+       unsigned int gprb0 = 0, gprb1 = 0;
+
+       if (*pfirst_exec) {
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, &gpra0);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, &gpra1);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, &gpra2);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, &gprb0);
+               qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, &gprb1);
+               *pfirst_exec = 0;
+       }
+       stat = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, inst_num, 1,
+                                      inst_num * 0x5, NULL);
+       if (stat != 0)
+               return -EFAULT;
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0, gpra0);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x1, gpra1);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPA_REL, 0x2, gpra2);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0, gprb0);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, 0x1, gprb1);
+
+       return 0;
+}
+
+int qat_hal_batch_wr_lm(struct icp_qat_fw_loader_handle *handle,
+                       unsigned char ae,
+                       struct icp_qat_uof_batch_init *lm_init_header)
+{
+       struct icp_qat_uof_batch_init *plm_init;
+       uint64_t *micro_inst_arry;
+       int micro_inst_num;
+       int alloc_inst_size;
+       int first_exec = 1;
+       int stat = 0;
+
+       plm_init = lm_init_header->next;
+       alloc_inst_size = lm_init_header->size;
+       if ((unsigned int)alloc_inst_size > handle->hal_handle->max_ustore)
+               alloc_inst_size = handle->hal_handle->max_ustore;
+       micro_inst_arry = kmalloc_array(alloc_inst_size, sizeof(uint64_t),
+                                       GFP_KERNEL);
+       if (!micro_inst_arry)
+               return -ENOMEM;
+       micro_inst_num = 0;
+       while (plm_init) {
+               unsigned int addr, *value, size;
+
+               ae = plm_init->ae;
+               addr = plm_init->addr;
+               value = plm_init->value;
+               size = plm_init->size;
+               micro_inst_num += qat_hal_concat_micro_code(micro_inst_arry,
+                                                           micro_inst_num,
+                                                           size, addr, value);
+               plm_init = plm_init->next;
+       }
+       /* exec micro codes */
+       if (micro_inst_arry && (micro_inst_num > 0)) {
+               micro_inst_arry[micro_inst_num++] = 0x0E000010000ull;
+               stat = qat_hal_exec_micro_init_lm(handle, ae, 0, &first_exec,
+                                                 micro_inst_arry,
+                                                 micro_inst_num);
+       }
+       kfree(micro_inst_arry);
+       return stat;
+}
+
+static int qat_hal_put_rel_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae, unsigned char ctx,
+                                  enum icp_qat_uof_regtype reg_type,
+                                  unsigned short reg_num, unsigned int val)
+{
+       int status = 0;
+       unsigned int reg_addr;
+       unsigned int ctx_enables;
+       unsigned short mask;
+       unsigned short dr_offset = 0x10;
+
+       status = qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+       if (CE_INUSE_CONTEXTS & ctx_enables) {
+               if (ctx & 0x1) {
+                       pr_err("QAT: bad 4-ctx mode,ctx=0x%x\n", ctx);
+                       return -EINVAL;
+               }
+               mask = 0x1f;
+               dr_offset = 0x20;
+       } else {
+               mask = 0x0f;
+       }
+       if (reg_num & ~mask)
+               return -EINVAL;
+       reg_addr = reg_num + (ctx << 0x5);
+       switch (reg_type) {
+       case ICP_SR_RD_REL:
+       case ICP_SR_REL:
+               SET_AE_XFER(handle, ae, reg_addr, val);
+               break;
+       case ICP_DR_RD_REL:
+       case ICP_DR_REL:
+               SET_AE_XFER(handle, ae, (reg_addr + dr_offset), val);
+               break;
+       default:
+               status = -EINVAL;
+               break;
+       }
+       return status;
+}
+
+static int qat_hal_put_rel_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae, unsigned char ctx,
+                                  enum icp_qat_uof_regtype reg_type,
+                                  unsigned short reg_num, unsigned int data)
+{
+       unsigned int gprval, ctx_enables;
+       unsigned short src_hiaddr, src_lowaddr, gpr_addr, xfr_addr, data16hi,
+           data16low;
+       unsigned short reg_mask;
+       int status = 0;
+       uint64_t micro_inst[] = {
+               0x0F440000000ull,
+               0x0F040000000ull,
+               0x0A000000000ull,
+               0x0F0000C0300ull,
+               0x0E000010000ull
+       };
+       const int num_inst = ARRAY_SIZE(micro_inst), code_off = 1;
+       const unsigned short gprnum = 0, dly = num_inst * 0x5;
+
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+       if (CE_INUSE_CONTEXTS & ctx_enables) {
+               if (ctx & 0x1) {
+                       pr_err("QAT: 4-ctx mode,ctx=0x%x\n", ctx);
+                       return -EINVAL;
+               }
+               reg_mask = (unsigned short)~0x1f;
+       } else {
+               reg_mask = (unsigned short)~0xf;
+       }
+       if (reg_num & reg_mask)
+               return -EINVAL;
+       xfr_addr = qat_hal_get_reg_addr(reg_type, reg_num);
+       if (xfr_addr == BAD_REGADDR) {
+               pr_err("QAT: bad xfrAddr=0x%x\n", xfr_addr);
+               return -EINVAL;
+       }
+       qat_hal_rd_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, &gprval);
+       gpr_addr = qat_hal_get_reg_addr(ICP_GPB_REL, gprnum);
+       data16low = 0xffff & data;
+       data16hi = 0xffff & (data >> 0x10);
+       src_hiaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+                                         (unsigned short)(0xff & data16hi));
+       src_lowaddr = qat_hal_get_reg_addr(ICP_NO_DEST,
+                                          (unsigned short)(0xff & data16low));
+       micro_inst[0] = micro_inst[0x0] | ((data16hi >> 8) << 20) |
+           ((gpr_addr & 0x3ff) << 10) | (src_hiaddr & 0x3ff);
+       micro_inst[1] = micro_inst[0x1] | ((data16low >> 8) << 20) |
+           ((gpr_addr & 0x3ff) << 10) | (src_lowaddr & 0x3ff);
+       micro_inst[0x2] = micro_inst[0x2] |
+           ((xfr_addr & 0x3ff) << 20) | ((gpr_addr & 0x3ff) << 10);
+       status = qat_hal_exec_micro_inst(handle, ae, ctx, micro_inst, num_inst,
+                                        code_off, dly, NULL);
+       qat_hal_wr_rel_reg(handle, ae, ctx, ICP_GPB_REL, gprnum, gprval);
+       return status;
+}
+
+static int qat_hal_put_rel_nn(struct icp_qat_fw_loader_handle *handle,
+                             unsigned char ae, unsigned char ctx,
+                             unsigned short nn, unsigned int val)
+{
+       unsigned int ctx_enables;
+       int stat = 0;
+
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+       ctx_enables &= IGNORE_W1C_MASK;
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables | CE_NN_MODE);
+
+       stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, ICP_NEIGH_REL, nn, val);
+       qat_hal_wr_ae_csr(handle, ae, CTX_ENABLES, ctx_enables);
+       return stat;
+}
+
+static int qat_hal_convert_abs_to_rel(struct icp_qat_fw_loader_handle
+                                     *handle, unsigned char ae,
+                                     unsigned short absreg_num,
+                                     unsigned short *relreg,
+                                     unsigned char *ctx)
+{
+       unsigned int ctx_enables;
+
+       qat_hal_rd_ae_csr(handle, ae, CTX_ENABLES, &ctx_enables);
+       if (ctx_enables & CE_INUSE_CONTEXTS) {
+               /* 4-ctx mode */
+               *relreg = absreg_num & 0x1F;
+               *ctx = (absreg_num >> 0x4) & 0x6;
+       } else {
+               /* 8-ctx mode */
+               *relreg = absreg_num & 0x0F;
+               *ctx = (absreg_num >> 0x4) & 0x7;
+       }
+       return 0;
+}
+
+int qat_hal_init_gpr(struct icp_qat_fw_loader_handle *handle,
+                    unsigned char ae, unsigned char ctx_mask,
+                    enum icp_qat_uof_regtype reg_type,
+                    unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned short reg;
+       unsigned char ctx = 0;
+       enum icp_qat_uof_regtype type;
+
+       if (reg_num >= ICP_QAT_UCLO_MAX_GPR_REG)
+               return -EINVAL;
+
+       do {
+               if (ctx_mask == 0) {
+                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+                                                  &ctx);
+                       type = reg_type - 1;
+               } else {
+                       reg = reg_num;
+                       type = reg_type;
+                       if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+                               continue;
+               }
+               stat = qat_hal_wr_rel_reg(handle, ae, ctx, type, reg, regdata);
+               if (stat) {
+                       pr_err("QAT: write gpr fail\n");
+                       return -EINVAL;
+               }
+       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+       return 0;
+}
+
+int qat_hal_init_wr_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned char ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned short reg;
+       unsigned char ctx = 0;
+       enum icp_qat_uof_regtype type;
+
+       if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+               return -EINVAL;
+
+       do {
+               if (ctx_mask == 0) {
+                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+                                                  &ctx);
+                       type = reg_type - 3;
+               } else {
+                       reg = reg_num;
+                       type = reg_type;
+                       if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+                               continue;
+               }
+               stat = qat_hal_put_rel_wr_xfer(handle, ae, ctx, type, reg,
+                                              regdata);
+               if (stat) {
+                       pr_err("QAT: write wr xfer fail\n");
+                       return -EINVAL;
+               }
+       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+       return 0;
+}
+
+int qat_hal_init_rd_xfer(struct icp_qat_fw_loader_handle *handle,
+                        unsigned char ae, unsigned char ctx_mask,
+                        enum icp_qat_uof_regtype reg_type,
+                        unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned short reg;
+       unsigned char ctx = 0;
+       enum icp_qat_uof_regtype type;
+
+       if (reg_num >= ICP_QAT_UCLO_MAX_XFER_REG)
+               return -EINVAL;
+
+       do {
+               if (ctx_mask == 0) {
+                       qat_hal_convert_abs_to_rel(handle, ae, reg_num, &reg,
+                                                  &ctx);
+                       type = reg_type - 3;
+               } else {
+                       reg = reg_num;
+                       type = reg_type;
+                       if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+                               continue;
+               }
+               stat = qat_hal_put_rel_rd_xfer(handle, ae, ctx, type, reg,
+                                              regdata);
+               if (stat) {
+                       pr_err("QAT: write rd xfer fail\n");
+                       return -EINVAL;
+               }
+       } while (ctx_mask && (ctx++ < ICP_QAT_UCLO_MAX_CTX));
+
+       return 0;
+}
+
+int qat_hal_init_nn(struct icp_qat_fw_loader_handle *handle,
+                   unsigned char ae, unsigned char ctx_mask,
+                   unsigned short reg_num, unsigned int regdata)
+{
+       int stat = 0;
+       unsigned char ctx;
+
+       if (ctx_mask == 0)
+               return -EINVAL;
+
+       for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++) {
+               if (!test_bit(ctx, (unsigned long *)&ctx_mask))
+                       continue;
+               stat = qat_hal_put_rel_nn(handle, ae, ctx, reg_num, regdata);
+               if (stat) {
+                       pr_err("QAT: write neigh error\n");
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
new file mode 100644 (file)
index 0000000..1e27f9f
--- /dev/null
@@ -0,0 +1,1181 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+
+#include "adf_accel_devices.h"
+#include "adf_common_drv.h"
+#include "icp_qat_uclo.h"
+#include "icp_qat_hal.h"
+#include "icp_qat_fw_loader_handle.h"
+
+#define UWORD_CPYBUF_SIZE 1024
+#define INVLD_UWORD 0xffffffffffull
+#define PID_MINOR_REV 0xf
+#define PID_MAJOR_REV (0xf << 4)
+
+static int qat_uclo_init_ae_data(struct icp_qat_uclo_objhandle *obj_handle,
+                                unsigned int ae, unsigned int image_num)
+{
+       struct icp_qat_uclo_aedata *ae_data;
+       struct icp_qat_uclo_encapme *encap_image;
+       struct icp_qat_uclo_page *page = NULL;
+       struct icp_qat_uclo_aeslice *ae_slice = NULL;
+
+       ae_data = &obj_handle->ae_data[ae];
+       encap_image = &obj_handle->ae_uimage[image_num];
+       ae_slice = &ae_data->ae_slices[ae_data->slice_num];
+       ae_slice->encap_image = encap_image;
+
+       if (encap_image->img_ptr) {
+               ae_slice->ctx_mask_assigned =
+                                       encap_image->img_ptr->ctx_assigned;
+               ae_data->eff_ustore_size = obj_handle->ustore_phy_size;
+       } else {
+               ae_slice->ctx_mask_assigned = 0;
+       }
+       ae_slice->region = kzalloc(sizeof(*ae_slice->region), GFP_KERNEL);
+       if (!ae_slice->region)
+               return -ENOMEM;
+       ae_slice->page = kzalloc(sizeof(*ae_slice->page), GFP_KERNEL);
+       if (!ae_slice->page)
+               goto out_err;
+       page = ae_slice->page;
+       page->encap_page = encap_image->page;
+       ae_slice->page->region = ae_slice->region;
+       ae_data->slice_num++;
+       return 0;
+out_err:
+       kfree(ae_slice->region);
+       ae_slice->region = NULL;
+       return -ENOMEM;
+}
+
+static int qat_uclo_free_ae_data(struct icp_qat_uclo_aedata *ae_data)
+{
+       unsigned int i;
+
+       if (!ae_data) {
+               pr_err("QAT: bad argument, ae_data is NULL\n ");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < ae_data->slice_num; i++) {
+               kfree(ae_data->ae_slices[i].region);
+               ae_data->ae_slices[i].region = NULL;
+               kfree(ae_data->ae_slices[i].page);
+               ae_data->ae_slices[i].page = NULL;
+       }
+       return 0;
+}
+
+static char *qat_uclo_get_string(struct icp_qat_uof_strtable *str_table,
+                                unsigned int str_offset)
+{
+       if ((!str_table->table_len) || (str_offset > str_table->table_len))
+               return NULL;
+       return (char *)(((unsigned long)(str_table->strings)) + str_offset);
+}
+
+static int qat_uclo_check_format(struct icp_qat_uof_filehdr *hdr)
+{
+       int maj = hdr->maj_ver & 0xff;
+       int min = hdr->min_ver & 0xff;
+
+       if (hdr->file_id != ICP_QAT_UOF_FID) {
+               pr_err("QAT: Invalid header 0x%x\n", hdr->file_id);
+               return -EINVAL;
+       }
+       if (min != ICP_QAT_UOF_MINVER || maj != ICP_QAT_UOF_MAJVER) {
+               pr_err("QAT: bad UOF version, major 0x%x, minor 0x%x\n",
+                      maj, min);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void qat_uclo_wr_sram_by_words(struct icp_qat_fw_loader_handle *handle,
+                                     unsigned int addr, unsigned int *val,
+                                     unsigned int num_in_bytes)
+{
+       unsigned int outval;
+       unsigned char *ptr = (unsigned char *)val;
+
+       while (num_in_bytes) {
+               memcpy(&outval, ptr, 4);
+               SRAM_WRITE(handle, addr, outval);
+               num_in_bytes -= 4;
+               ptr += 4;
+               addr += 4;
+       }
+}
+
+static void qat_uclo_wr_umem_by_words(struct icp_qat_fw_loader_handle *handle,
+                                     unsigned char ae, unsigned int addr,
+                                     unsigned int *val,
+                                     unsigned int num_in_bytes)
+{
+       unsigned int outval;
+       unsigned char *ptr = (unsigned char *)val;
+
+       addr >>= 0x2; /* convert to uword address */
+
+       while (num_in_bytes) {
+               memcpy(&outval, ptr, 4);
+               qat_hal_wr_umem(handle, ae, addr++, 1, &outval);
+               num_in_bytes -= 4;
+               ptr += 4;
+       }
+}
+
+static void qat_uclo_batch_wr_umem(struct icp_qat_fw_loader_handle *handle,
+                                  unsigned char ae,
+                                  struct icp_qat_uof_batch_init
+                                  *umem_init_header)
+{
+       struct icp_qat_uof_batch_init *umem_init;
+
+       if (!umem_init_header)
+               return;
+       umem_init = umem_init_header->next;
+       while (umem_init) {
+               unsigned int addr, *value, size;
+
+               ae = umem_init->ae;
+               addr = umem_init->addr;
+               value = umem_init->value;
+               size = umem_init->size;
+               qat_uclo_wr_umem_by_words(handle, ae, addr, value, size);
+               umem_init = umem_init->next;
+       }
+}
+
+static void
+qat_uclo_cleanup_batch_init_list(struct icp_qat_fw_loader_handle *handle,
+                                struct icp_qat_uof_batch_init **base)
+{
+       struct icp_qat_uof_batch_init *umem_init;
+
+       umem_init = *base;
+       while (umem_init) {
+               struct icp_qat_uof_batch_init *pre;
+
+               pre = umem_init;
+               umem_init = umem_init->next;
+               kfree(pre);
+       }
+       *base = NULL;
+}
+
+static int qat_uclo_parse_num(char *str, unsigned int *num)
+{
+       char buf[16] = {0};
+       unsigned long ae = 0;
+       int i;
+
+       strncpy(buf, str, 15);
+       for (i = 0; i < 16; i++) {
+               if (!isdigit(buf[i])) {
+                       buf[i] = '\0';
+                       break;
+               }
+       }
+       if ((kstrtoul(buf, 10, &ae)))
+               return -EFAULT;
+
+       *num = (unsigned int)ae;
+       return 0;
+}
+
+static int qat_uclo_fetch_initmem_ae(struct icp_qat_fw_loader_handle *handle,
+                                    struct icp_qat_uof_initmem *init_mem,
+                                    unsigned int size_range, unsigned int *ae)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       char *str;
+
+       if ((init_mem->addr + init_mem->num_in_bytes) > (size_range << 0x2)) {
+               pr_err("QAT: initmem is out of range");
+               return -EINVAL;
+       }
+       if (init_mem->scope != ICP_QAT_UOF_LOCAL_SCOPE) {
+               pr_err("QAT: Memory scope for init_mem error\n");
+               return -EINVAL;
+       }
+       str = qat_uclo_get_string(&obj_handle->str_table, init_mem->sym_name);
+       if (!str) {
+               pr_err("QAT: AE name assigned in UOF init table is NULL\n");
+               return -EINVAL;
+       }
+       if (qat_uclo_parse_num(str, ae)) {
+               pr_err("QAT: Parse num for AE number failed\n");
+               return -EINVAL;
+       }
+       if (*ae >= ICP_QAT_UCLO_MAX_AE) {
+               pr_err("QAT: ae %d out of range\n", *ae);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_create_batch_init_list(struct icp_qat_fw_loader_handle
+                                          *handle, struct icp_qat_uof_initmem
+                                          *init_mem, unsigned int ae,
+                                          struct icp_qat_uof_batch_init
+                                          **init_tab_base)
+{
+       struct icp_qat_uof_batch_init *init_header, *tail;
+       struct icp_qat_uof_batch_init *mem_init, *tail_old;
+       struct icp_qat_uof_memvar_attr *mem_val_attr;
+       unsigned int i, flag = 0;
+
+       mem_val_attr =
+               (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
+               sizeof(struct icp_qat_uof_initmem));
+
+       init_header = *init_tab_base;
+       if (!init_header) {
+               init_header = kzalloc(sizeof(*init_header), GFP_KERNEL);
+               if (!init_header)
+                       return -ENOMEM;
+               init_header->size = 1;
+               *init_tab_base = init_header;
+               flag = 1;
+       }
+       tail_old = init_header;
+       while (tail_old->next)
+               tail_old = tail_old->next;
+       tail = tail_old;
+       for (i = 0; i < init_mem->val_attr_num; i++) {
+               mem_init = kzalloc(sizeof(*mem_init), GFP_KERNEL);
+               if (!mem_init)
+                       goto out_err;
+               mem_init->ae = ae;
+               mem_init->addr = init_mem->addr + mem_val_attr->offset_in_byte;
+               mem_init->value = &mem_val_attr->value;
+               mem_init->size = 4;
+               mem_init->next = NULL;
+               tail->next = mem_init;
+               tail = mem_init;
+               init_header->size += qat_hal_get_ins_num();
+               mem_val_attr++;
+       }
+       return 0;
+out_err:
+       while (tail_old) {
+               mem_init = tail_old->next;
+               kfree(tail_old);
+               tail_old = mem_init;
+       }
+       if (flag)
+               kfree(*init_tab_base);
+       return -ENOMEM;
+}
+
+static int qat_uclo_init_lmem_seg(struct icp_qat_fw_loader_handle *handle,
+                                 struct icp_qat_uof_initmem *init_mem)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int ae;
+
+       if (qat_uclo_fetch_initmem_ae(handle, init_mem,
+                                     ICP_QAT_UCLO_MAX_LMEM_REG, &ae))
+               return -EINVAL;
+       if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+                                           &obj_handle->lm_init_tab[ae]))
+               return -EINVAL;
+       return 0;
+}
+
+static int qat_uclo_init_umem_seg(struct icp_qat_fw_loader_handle *handle,
+                                 struct icp_qat_uof_initmem *init_mem)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int ae, ustore_size, uaddr, i;
+
+       ustore_size = obj_handle->ustore_phy_size;
+       if (qat_uclo_fetch_initmem_ae(handle, init_mem, ustore_size, &ae))
+               return -EINVAL;
+       if (qat_uclo_create_batch_init_list(handle, init_mem, ae,
+                                           &obj_handle->umem_init_tab[ae]))
+               return -EINVAL;
+       /* set the highest ustore address referenced */
+       uaddr = (init_mem->addr + init_mem->num_in_bytes) >> 0x2;
+       for (i = 0; i < obj_handle->ae_data[ae].slice_num; i++) {
+               if (obj_handle->ae_data[ae].ae_slices[i].
+                   encap_image->uwords_num < uaddr)
+                       obj_handle->ae_data[ae].ae_slices[i].
+                       encap_image->uwords_num = uaddr;
+       }
+       return 0;
+}
+
+#define ICP_DH895XCC_PESRAM_BAR_SIZE 0x80000
+static int qat_uclo_init_ae_memory(struct icp_qat_fw_loader_handle *handle,
+                                  struct icp_qat_uof_initmem *init_mem)
+{
+       unsigned int i;
+       struct icp_qat_uof_memvar_attr *mem_val_attr;
+
+       mem_val_attr =
+               (struct icp_qat_uof_memvar_attr *)((unsigned long)init_mem +
+               sizeof(struct icp_qat_uof_initmem));
+
+       switch (init_mem->region) {
+       case ICP_QAT_UOF_SRAM_REGION:
+               if ((init_mem->addr + init_mem->num_in_bytes) >
+                   ICP_DH895XCC_PESRAM_BAR_SIZE) {
+                       pr_err("QAT: initmem on SRAM is out of range");
+                       return -EINVAL;
+               }
+               for (i = 0; i < init_mem->val_attr_num; i++) {
+                       qat_uclo_wr_sram_by_words(handle,
+                                                 init_mem->addr +
+                                                 mem_val_attr->offset_in_byte,
+                                                 &mem_val_attr->value, 4);
+                       mem_val_attr++;
+               }
+               break;
+       case ICP_QAT_UOF_LMEM_REGION:
+               if (qat_uclo_init_lmem_seg(handle, init_mem))
+                       return -EINVAL;
+               break;
+       case ICP_QAT_UOF_UMEM_REGION:
+               if (qat_uclo_init_umem_seg(handle, init_mem))
+                       return -EINVAL;
+               break;
+       default:
+               pr_err("QAT: initmem region error. region type=0x%x\n",
+                      init_mem->region);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_init_ustore(struct icp_qat_fw_loader_handle *handle,
+                               struct icp_qat_uclo_encapme *image)
+{
+       unsigned int i;
+       struct icp_qat_uclo_encap_page *page;
+       struct icp_qat_uof_image *uof_image;
+       unsigned char ae;
+       unsigned int ustore_size;
+       unsigned int patt_pos;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       uint64_t *fill_data;
+
+       uof_image = image->img_ptr;
+       fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
+                           GFP_KERNEL);
+       if (!fill_data)
+               return -ENOMEM;
+       for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
+               memcpy(&fill_data[i], &uof_image->fill_pattern,
+                      sizeof(uint64_t));
+       page = image->page;
+
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
+                       continue;
+               ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
+               patt_pos = page->beg_addr_p + page->micro_words_num;
+
+               qat_hal_wr_uwords(handle, (unsigned char)ae, 0,
+                                 page->beg_addr_p, &fill_data[0]);
+               qat_hal_wr_uwords(handle, (unsigned char)ae, patt_pos,
+                                 ustore_size - patt_pos + 1,
+                                 &fill_data[page->beg_addr_p]);
+       }
+       kfree(fill_data);
+       return 0;
+}
+
+static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
+{
+       int i, ae;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
+
+       for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
+               if (initmem->num_in_bytes) {
+                       if (qat_uclo_init_ae_memory(handle, initmem))
+                               return -EINVAL;
+               }
+               initmem = (struct icp_qat_uof_initmem *)((unsigned long)(
+                       (unsigned long)initmem +
+                       sizeof(struct icp_qat_uof_initmem)) +
+                       (sizeof(struct icp_qat_uof_memvar_attr) *
+                       initmem->val_attr_num));
+       }
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (qat_hal_batch_wr_lm(handle, ae,
+                                       obj_handle->lm_init_tab[ae])) {
+                       pr_err("QAT: fail to batch init lmem for AE %d\n", ae);
+                       return -EINVAL;
+               }
+               qat_uclo_cleanup_batch_init_list(handle,
+                                                &obj_handle->lm_init_tab[ae]);
+               qat_uclo_batch_wr_umem(handle, ae,
+                                      obj_handle->umem_init_tab[ae]);
+               qat_uclo_cleanup_batch_init_list(handle,
+                                                &obj_handle->
+                                                umem_init_tab[ae]);
+       }
+       return 0;
+}
+
+static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
+                                char *chunk_id, void *cur)
+{
+       int i;
+       struct icp_qat_uof_chunkhdr *chunk_hdr =
+           (struct icp_qat_uof_chunkhdr *)
+           ((unsigned long)obj_hdr + sizeof(struct icp_qat_uof_objhdr));
+
+       for (i = 0; i < obj_hdr->num_chunks; i++) {
+               if ((cur < (void *)&chunk_hdr[i]) &&
+                   !strncmp(chunk_hdr[i].chunk_id, chunk_id,
+                            ICP_QAT_UOF_OBJID_LEN)) {
+                       return &chunk_hdr[i];
+               }
+       }
+       return NULL;
+}
+
+static unsigned int qat_uclo_calc_checksum(unsigned int reg, int ch)
+{
+       int i;
+       unsigned int topbit = 1 << 0xF;
+       unsigned int inbyte = (unsigned int)((reg >> 0x18) ^ ch);
+
+       reg ^= inbyte << 0x8;
+       for (i = 0; i < 0x8; i++) {
+               if (reg & topbit)
+                       reg = (reg << 1) ^ 0x1021;
+               else
+                       reg <<= 1;
+       }
+       return reg & 0xFFFF;
+}
+
+static unsigned int qat_uclo_calc_str_checksum(char *ptr, int num)
+{
+       unsigned int chksum = 0;
+
+       if (ptr)
+               while (num--)
+                       chksum = qat_uclo_calc_checksum(chksum, *ptr++);
+       return chksum;
+}
+
+static struct icp_qat_uclo_objhdr *
+qat_uclo_map_chunk(char *buf, struct icp_qat_uof_filehdr *file_hdr,
+                  char *chunk_id)
+{
+       struct icp_qat_uof_filechunkhdr *file_chunk;
+       struct icp_qat_uclo_objhdr *obj_hdr;
+       char *chunk;
+       int i;
+
+       file_chunk = (struct icp_qat_uof_filechunkhdr *)
+               (buf + sizeof(struct icp_qat_uof_filehdr));
+       for (i = 0; i < file_hdr->num_chunks; i++) {
+               if (!strncmp(file_chunk->chunk_id, chunk_id,
+                            ICP_QAT_UOF_OBJID_LEN)) {
+                       chunk = buf + file_chunk->offset;
+                       if (file_chunk->checksum != qat_uclo_calc_str_checksum(
+                               chunk, file_chunk->size))
+                               break;
+                       obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
+                       if (!obj_hdr)
+                               break;
+                       obj_hdr->file_buff = chunk;
+                       obj_hdr->checksum = file_chunk->checksum;
+                       obj_hdr->size = file_chunk->size;
+                       return obj_hdr;
+               }
+               file_chunk++;
+       }
+       return NULL;
+}
+
+static unsigned int
+qat_uclo_check_image_compat(struct icp_qat_uof_encap_obj *encap_uof_obj,
+                           struct icp_qat_uof_image *image)
+{
+       struct icp_qat_uof_objtable *uc_var_tab, *imp_var_tab, *imp_expr_tab;
+       struct icp_qat_uof_objtable *neigh_reg_tab;
+       struct icp_qat_uof_code_page *code_page;
+
+       code_page = (struct icp_qat_uof_code_page *)
+                       ((char *)image + sizeof(struct icp_qat_uof_image));
+       uc_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+                    code_page->uc_var_tab_offset);
+       imp_var_tab = (struct icp_qat_uof_objtable *)(encap_uof_obj->beg_uof +
+                     code_page->imp_var_tab_offset);
+       imp_expr_tab = (struct icp_qat_uof_objtable *)
+                      (encap_uof_obj->beg_uof +
+                      code_page->imp_expr_tab_offset);
+       if (uc_var_tab->entry_num || imp_var_tab->entry_num ||
+           imp_expr_tab->entry_num) {
+               pr_err("QAT: UOF can't contain imported variable to be parsed");
+               return -EINVAL;
+       }
+       neigh_reg_tab = (struct icp_qat_uof_objtable *)
+                       (encap_uof_obj->beg_uof +
+                       code_page->neigh_reg_tab_offset);
+       if (neigh_reg_tab->entry_num) {
+               pr_err("QAT: UOF can't contain shared control store feature");
+               return -EINVAL;
+       }
+       if (image->numpages > 1) {
+               pr_err("QAT: UOF can't contain multiple pages");
+               return -EINVAL;
+       }
+       if (ICP_QAT_SHARED_USTORE_MODE(image->ae_mode)) {
+               pr_err("QAT: UOF can't use shared control store feature");
+               return -EFAULT;
+       }
+       if (RELOADABLE_CTX_SHARED_MODE(image->ae_mode)) {
+               pr_err("QAT: UOF can't use reloadable feature");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
+                                    *encap_uof_obj,
+                                    struct icp_qat_uof_image *img,
+                                    struct icp_qat_uclo_encap_page *page)
+{
+       struct icp_qat_uof_code_page *code_page;
+       struct icp_qat_uof_code_area *code_area;
+       struct icp_qat_uof_objtable *uword_block_tab;
+       struct icp_qat_uof_uword_block *uwblock;
+       int i;
+
+       code_page = (struct icp_qat_uof_code_page *)
+                       ((char *)img + sizeof(struct icp_qat_uof_image));
+       page->def_page = code_page->def_page;
+       page->page_region = code_page->page_region;
+       page->beg_addr_v = code_page->beg_addr_v;
+       page->beg_addr_p = code_page->beg_addr_p;
+       code_area = (struct icp_qat_uof_code_area *)(encap_uof_obj->beg_uof +
+                                               code_page->code_area_offset);
+       page->micro_words_num = code_area->micro_words_num;
+       uword_block_tab = (struct icp_qat_uof_objtable *)
+                         (encap_uof_obj->beg_uof +
+                         code_area->uword_block_tab);
+       page->uwblock_num = uword_block_tab->entry_num;
+       uwblock = (struct icp_qat_uof_uword_block *)((char *)uword_block_tab +
+                       sizeof(struct icp_qat_uof_objtable));
+       page->uwblock = (struct icp_qat_uclo_encap_uwblock *)uwblock;
+       for (i = 0; i < uword_block_tab->entry_num; i++)
+               page->uwblock[i].micro_words =
+               (unsigned long)encap_uof_obj->beg_uof + uwblock[i].uword_offset;
+}
+
+static int qat_uclo_map_uimage(struct icp_qat_uclo_objhandle *obj_handle,
+                              struct icp_qat_uclo_encapme *ae_uimage,
+                              int max_image)
+{
+       int i, j;
+       struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
+       struct icp_qat_uof_image *image;
+       struct icp_qat_uof_objtable *ae_regtab;
+       struct icp_qat_uof_objtable *init_reg_sym_tab;
+       struct icp_qat_uof_objtable *sbreak_tab;
+       struct icp_qat_uof_encap_obj *encap_uof_obj =
+                                       &obj_handle->encap_uof_obj;
+
+       for (j = 0; j < max_image; j++) {
+               chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+                                               ICP_QAT_UOF_IMAG, chunk_hdr);
+               if (!chunk_hdr)
+                       break;
+               image = (struct icp_qat_uof_image *)(encap_uof_obj->beg_uof +
+                                                    chunk_hdr->offset);
+               ae_regtab = (struct icp_qat_uof_objtable *)
+                          (image->reg_tab_offset +
+                          obj_handle->obj_hdr->file_buff);
+               ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
+               ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
+                       (((char *)ae_regtab) +
+                       sizeof(struct icp_qat_uof_objtable));
+               init_reg_sym_tab = (struct icp_qat_uof_objtable *)
+                                  (image->init_reg_sym_tab +
+                                  obj_handle->obj_hdr->file_buff);
+               ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
+               ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
+                       (((char *)init_reg_sym_tab) +
+                       sizeof(struct icp_qat_uof_objtable));
+               sbreak_tab = (struct icp_qat_uof_objtable *)
+                       (image->sbreak_tab + obj_handle->obj_hdr->file_buff);
+               ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
+               ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
+                                     (((char *)sbreak_tab) +
+                                     sizeof(struct icp_qat_uof_objtable));
+               ae_uimage[j].img_ptr = image;
+               if (qat_uclo_check_image_compat(encap_uof_obj, image))
+                       goto out_err;
+               ae_uimage[j].page =
+                       kzalloc(sizeof(struct icp_qat_uclo_encap_page),
+                               GFP_KERNEL);
+               if (!ae_uimage[j].page)
+                       goto out_err;
+               qat_uclo_map_image_page(encap_uof_obj, image,
+                                       ae_uimage[j].page);
+       }
+       return j;
+out_err:
+       for (i = 0; i < j; i++)
+               kfree(ae_uimage[i].page);
+       return 0;
+}
+
+static int qat_uclo_map_ae(struct icp_qat_fw_loader_handle *handle, int max_ae)
+{
+       int i, ae;
+       int mflag = 0;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+
+       for (ae = 0; ae <= max_ae; ae++) {
+               if (!test_bit(ae,
+                             (unsigned long *)&handle->hal_handle->ae_mask))
+                       continue;
+               for (i = 0; i < obj_handle->uimage_num; i++) {
+                       if (!test_bit(ae, (unsigned long *)
+                       &obj_handle->ae_uimage[i].img_ptr->ae_assigned))
+                               continue;
+                       mflag = 1;
+                       if (qat_uclo_init_ae_data(obj_handle, ae, i))
+                               return -EINVAL;
+               }
+       }
+       if (!mflag) {
+               pr_err("QAT: uimage uses AE not set");
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static struct icp_qat_uof_strtable *
+qat_uclo_map_str_table(struct icp_qat_uclo_objhdr *obj_hdr,
+                      char *tab_name, struct icp_qat_uof_strtable *str_table)
+{
+       struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+       chunk_hdr = qat_uclo_find_chunk((struct icp_qat_uof_objhdr *)
+                                       obj_hdr->file_buff, tab_name, NULL);
+       if (chunk_hdr) {
+               int hdr_size;
+
+               memcpy(&str_table->table_len, obj_hdr->file_buff +
+                      chunk_hdr->offset, sizeof(str_table->table_len));
+               hdr_size = (char *)&str_table->strings - (char *)str_table;
+               str_table->strings = (unsigned long)obj_hdr->file_buff +
+                                       chunk_hdr->offset + hdr_size;
+               return str_table;
+       }
+       return NULL;
+}
+
+static void
+qat_uclo_map_initmem_table(struct icp_qat_uof_encap_obj *encap_uof_obj,
+                          struct icp_qat_uclo_init_mem_table *init_mem_tab)
+{
+       struct icp_qat_uof_chunkhdr *chunk_hdr;
+
+       chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
+                                       ICP_QAT_UOF_IMEM, NULL);
+       if (chunk_hdr) {
+               memmove(&init_mem_tab->entry_num, encap_uof_obj->beg_uof +
+                       chunk_hdr->offset, sizeof(unsigned int));
+               init_mem_tab->init_mem = (struct icp_qat_uof_initmem *)
+               (encap_uof_obj->beg_uof + chunk_hdr->offset +
+               sizeof(unsigned int));
+       }
+}
+
+static int qat_uclo_check_uof_compat(struct icp_qat_uclo_objhandle *obj_handle)
+{
+       unsigned int maj_ver, prod_type = obj_handle->prod_type;
+
+       if (!(prod_type & obj_handle->encap_uof_obj.obj_hdr->cpu_type)) {
+               pr_err("QAT: UOF type 0x%x not match with cur platform 0x%x\n",
+                      obj_handle->encap_uof_obj.obj_hdr->cpu_type, prod_type);
+               return -EINVAL;
+       }
+       maj_ver = obj_handle->prod_rev & 0xff;
+       if ((obj_handle->encap_uof_obj.obj_hdr->max_cpu_ver < maj_ver) ||
+           (obj_handle->encap_uof_obj.obj_hdr->min_cpu_ver > maj_ver)) {
+               pr_err("QAT: UOF majVer 0x%x out of range\n", maj_ver);
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static int qat_uclo_init_reg(struct icp_qat_fw_loader_handle *handle,
+                            unsigned char ae, unsigned char ctx_mask,
+                            enum icp_qat_uof_regtype reg_type,
+                            unsigned short reg_addr, unsigned int value)
+{
+       switch (reg_type) {
+       case ICP_GPA_ABS:
+       case ICP_GPB_ABS:
+               ctx_mask = 0;
+       case ICP_GPA_REL:
+       case ICP_GPB_REL:
+               return qat_hal_init_gpr(handle, ae, ctx_mask, reg_type,
+                                       reg_addr, value);
+       case ICP_SR_ABS:
+       case ICP_DR_ABS:
+       case ICP_SR_RD_ABS:
+       case ICP_DR_RD_ABS:
+               ctx_mask = 0;
+       case ICP_SR_REL:
+       case ICP_DR_REL:
+       case ICP_SR_RD_REL:
+       case ICP_DR_RD_REL:
+               return qat_hal_init_rd_xfer(handle, ae, ctx_mask, reg_type,
+                                           reg_addr, value);
+       case ICP_SR_WR_ABS:
+       case ICP_DR_WR_ABS:
+               ctx_mask = 0;
+       case ICP_SR_WR_REL:
+       case ICP_DR_WR_REL:
+               return qat_hal_init_wr_xfer(handle, ae, ctx_mask, reg_type,
+                                           reg_addr, value);
+       case ICP_NEIGH_REL:
+               return qat_hal_init_nn(handle, ae, ctx_mask, reg_addr, value);
+       default:
+               pr_err("QAT: UOF uses not supported reg type 0x%x\n", reg_type);
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static int qat_uclo_init_reg_sym(struct icp_qat_fw_loader_handle *handle,
+                                unsigned int ae,
+                                struct icp_qat_uclo_encapme *encap_ae)
+{
+       unsigned int i;
+       unsigned char ctx_mask;
+       struct icp_qat_uof_init_regsym *init_regsym;
+
+       if (ICP_QAT_CTX_MODE(encap_ae->img_ptr->ae_mode) ==
+           ICP_QAT_UCLO_MAX_CTX)
+               ctx_mask = 0xff;
+       else
+               ctx_mask = 0x55;
+
+       for (i = 0; i < encap_ae->init_regsym_num; i++) {
+               unsigned int exp_res;
+
+               init_regsym = &encap_ae->init_regsym[i];
+               exp_res = init_regsym->value;
+               switch (init_regsym->init_type) {
+               case ICP_QAT_UOF_INIT_REG:
+                       qat_uclo_init_reg(handle, ae, ctx_mask,
+                                         (enum icp_qat_uof_regtype)
+                                         init_regsym->reg_type,
+                                         (unsigned short)init_regsym->reg_addr,
+                                         exp_res);
+                       break;
+               case ICP_QAT_UOF_INIT_REG_CTX:
+                       /* check if ctx is appropriate for the ctxMode */
+                       if (!((1 << init_regsym->ctx) & ctx_mask)) {
+                               pr_err("QAT: invalid ctx num = 0x%x\n",
+                                      init_regsym->ctx);
+                               return -EINVAL;
+                       }
+                       qat_uclo_init_reg(handle, ae,
+                                         (unsigned char)
+                                         (1 << init_regsym->ctx),
+                                         (enum icp_qat_uof_regtype)
+                                         init_regsym->reg_type,
+                                         (unsigned short)init_regsym->reg_addr,
+                                         exp_res);
+                       break;
+               case ICP_QAT_UOF_INIT_EXPR:
+                       pr_err("QAT: INIT_EXPR feature not supported\n");
+                       return -EINVAL;
+               case ICP_QAT_UOF_INIT_EXPR_ENDIAN_SWAP:
+                       pr_err("QAT: INIT_EXPR_ENDIAN_SWAP feature not supported\n");
+                       return -EINVAL;
+               default:
+                       break;
+               }
+       }
+       return 0;
+}
+
+static int qat_uclo_init_globals(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int s, ae;
+
+       if (obj_handle->global_inited)
+               return 0;
+       if (obj_handle->init_mem_tab.entry_num) {
+               if (qat_uclo_init_memory(handle)) {
+                       pr_err("QAT: initialize memory failed\n");
+                       return -EINVAL;
+               }
+       }
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
+                       if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+                               continue;
+                       if (qat_uclo_init_reg_sym(handle, ae,
+                                                 obj_handle->ae_data[ae].
+                                                 ae_slices[s].encap_image))
+                               return -EINVAL;
+               }
+       }
+       obj_handle->global_inited = 1;
+       return 0;
+}
+
+static int qat_uclo_set_ae_mode(struct icp_qat_fw_loader_handle *handle)
+{
+       unsigned char ae, nn_mode, s;
+       struct icp_qat_uof_image *uof_image;
+       struct icp_qat_uclo_aedata *ae_data;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!test_bit(ae,
+                             (unsigned long *)&handle->hal_handle->ae_mask))
+                       continue;
+               ae_data = &obj_handle->ae_data[ae];
+               for (s = 0; s < min_t(unsigned int, ae_data->slice_num,
+                                     ICP_QAT_UCLO_MAX_CTX); s++) {
+                       if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
+                               continue;
+                       uof_image = ae_data->ae_slices[s].encap_image->img_ptr;
+                       if (qat_hal_set_ae_ctx_mode(handle, ae,
+                                                   (char)ICP_QAT_CTX_MODE
+                                                   (uof_image->ae_mode))) {
+                               pr_err("QAT: qat_hal_set_ae_ctx_mode error\n");
+                               return -EFAULT;
+                       }
+                       nn_mode = ICP_QAT_NN_MODE(uof_image->ae_mode);
+                       if (qat_hal_set_ae_nn_mode(handle, ae, nn_mode)) {
+                               pr_err("QAT: qat_hal_set_ae_nn_mode error\n");
+                               return -EFAULT;
+                       }
+                       if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM0,
+                                                  (char)ICP_QAT_LOC_MEM0_MODE
+                                                  (uof_image->ae_mode))) {
+                               pr_err("QAT: qat_hal_set_ae_lm_mode LMEM0 error\n");
+                               return -EFAULT;
+                       }
+                       if (qat_hal_set_ae_lm_mode(handle, ae, ICP_LMEM1,
+                                                  (char)ICP_QAT_LOC_MEM1_MODE
+                                                  (uof_image->ae_mode))) {
+                               pr_err("QAT: qat_hal_set_ae_lm_mode LMEM1 error\n");
+                               return -EFAULT;
+                       }
+               }
+       }
+       return 0;
+}
+
+static void qat_uclo_init_uword_num(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       struct icp_qat_uclo_encapme *image;
+       int a;
+
+       for (a = 0; a < obj_handle->uimage_num; a++) {
+               image = &obj_handle->ae_uimage[a];
+               image->uwords_num = image->page->beg_addr_p +
+                                       image->page->micro_words_num;
+       }
+}
+
+static int qat_uclo_parse_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int ae;
+
+       obj_handle->uword_buf = kcalloc(UWORD_CPYBUF_SIZE, sizeof(uint64_t),
+                                       GFP_KERNEL);
+       if (!obj_handle->uword_buf)
+               return -ENOMEM;
+       obj_handle->encap_uof_obj.beg_uof = obj_handle->obj_hdr->file_buff;
+       obj_handle->encap_uof_obj.obj_hdr = (struct icp_qat_uof_objhdr *)
+                                            obj_handle->obj_hdr->file_buff;
+       obj_handle->uword_in_bytes = 6;
+       obj_handle->prod_type = ICP_QAT_AC_C_CPU_TYPE;
+       obj_handle->prod_rev = PID_MAJOR_REV |
+                       (PID_MINOR_REV & handle->hal_handle->revision_id);
+       if (qat_uclo_check_uof_compat(obj_handle)) {
+               pr_err("QAT: UOF incompatible\n");
+               return -EINVAL;
+       }
+       obj_handle->ustore_phy_size = ICP_QAT_UCLO_MAX_USTORE;
+       if (!obj_handle->obj_hdr->file_buff ||
+           !qat_uclo_map_str_table(obj_handle->obj_hdr, ICP_QAT_UOF_STRT,
+                                   &obj_handle->str_table)) {
+               pr_err("QAT: UOF doesn't have effective images\n");
+               goto out_err;
+       }
+       obj_handle->uimage_num =
+               qat_uclo_map_uimage(obj_handle, obj_handle->ae_uimage,
+                                   ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX);
+       if (!obj_handle->uimage_num)
+               goto out_err;
+       if (qat_uclo_map_ae(handle, handle->hal_handle->ae_max_num)) {
+               pr_err("QAT: Bad object\n");
+               goto out_check_uof_aemask_err;
+       }
+       qat_uclo_init_uword_num(handle);
+       qat_uclo_map_initmem_table(&obj_handle->encap_uof_obj,
+                                  &obj_handle->init_mem_tab);
+       if (qat_uclo_set_ae_mode(handle))
+               goto out_check_uof_aemask_err;
+       return 0;
+out_check_uof_aemask_err:
+       for (ae = 0; ae < obj_handle->uimage_num; ae++)
+               kfree(obj_handle->ae_uimage[ae].page);
+out_err:
+       kfree(obj_handle->uword_buf);
+       return -EFAULT;
+}
+
+int qat_uclo_map_uof_obj(struct icp_qat_fw_loader_handle *handle,
+                        void *addr_ptr, int mem_size)
+{
+       struct icp_qat_uof_filehdr *filehdr;
+       struct icp_qat_uclo_objhandle *objhdl;
+
+       BUILD_BUG_ON(ICP_QAT_UCLO_MAX_AE >=
+                    (sizeof(handle->hal_handle->ae_mask) * 8));
+
+       if (!handle || !addr_ptr || mem_size < 24)
+               return -EINVAL;
+       objhdl = kzalloc(sizeof(*objhdl), GFP_KERNEL);
+       if (!objhdl)
+               return -ENOMEM;
+       objhdl->obj_buf = kmemdup(addr_ptr, mem_size, GFP_KERNEL);
+       if (!objhdl->obj_buf)
+               goto out_objbuf_err;
+       filehdr = (struct icp_qat_uof_filehdr *)objhdl->obj_buf;
+       if (qat_uclo_check_format(filehdr))
+               goto out_objhdr_err;
+       objhdl->obj_hdr = qat_uclo_map_chunk((char *)objhdl->obj_buf, filehdr,
+                                            ICP_QAT_UOF_OBJS);
+       if (!objhdl->obj_hdr) {
+               pr_err("QAT: object file chunk is null\n");
+               goto out_objhdr_err;
+       }
+       handle->obj_handle = objhdl;
+       if (qat_uclo_parse_uof_obj(handle))
+               goto out_overlay_obj_err;
+       return 0;
+
+out_overlay_obj_err:
+       handle->obj_handle = NULL;
+       kfree(objhdl->obj_hdr);
+out_objhdr_err:
+       kfree(objhdl->obj_buf);
+out_objbuf_err:
+       kfree(objhdl);
+       return -ENOMEM;
+}
+
+void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int a;
+
+       if (!obj_handle)
+               return;
+
+       kfree(obj_handle->uword_buf);
+       for (a = 0; a < obj_handle->uimage_num; a++)
+               kfree(obj_handle->ae_uimage[a].page);
+
+       for (a = 0; a < handle->hal_handle->ae_max_num; a++)
+               qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
+
+       kfree(obj_handle->obj_hdr);
+       kfree(obj_handle->obj_buf);
+       kfree(obj_handle);
+       handle->obj_handle = NULL;
+}
+
+static void qat_uclo_fill_uwords(struct icp_qat_uclo_objhandle *obj_handle,
+                                struct icp_qat_uclo_encap_page *encap_page,
+                                uint64_t *uword, unsigned int addr_p,
+                                unsigned int raddr, uint64_t fill)
+{
+       uint64_t uwrd = 0;
+       unsigned int i;
+
+       if (!encap_page) {
+               *uword = fill;
+               return;
+       }
+       for (i = 0; i < encap_page->uwblock_num; i++) {
+               if (raddr >= encap_page->uwblock[i].start_addr &&
+                   raddr <= encap_page->uwblock[i].start_addr +
+                   encap_page->uwblock[i].words_num - 1) {
+                       raddr -= encap_page->uwblock[i].start_addr;
+                       raddr *= obj_handle->uword_in_bytes;
+                       memcpy(&uwrd, (void *)(((unsigned long)
+                              encap_page->uwblock[i].micro_words) + raddr),
+                              obj_handle->uword_in_bytes);
+                       uwrd = uwrd & 0xbffffffffffull;
+               }
+       }
+       *uword = uwrd;
+       if (*uword == INVLD_UWORD)
+               *uword = fill;
+}
+
+static void qat_uclo_wr_uimage_raw_page(struct icp_qat_fw_loader_handle *handle,
+                                       struct icp_qat_uclo_encap_page
+                                       *encap_page, unsigned int ae)
+{
+       unsigned int uw_physical_addr, uw_relative_addr, i, words_num, cpylen;
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       uint64_t fill_pat;
+
+       /* load the page starting at appropriate ustore address */
+       /* get fill-pattern from an image -- they are all the same */
+       memcpy(&fill_pat, obj_handle->ae_uimage[0].img_ptr->fill_pattern,
+              sizeof(uint64_t));
+       uw_physical_addr = encap_page->beg_addr_p;
+       uw_relative_addr = 0;
+       words_num = encap_page->micro_words_num;
+       while (words_num) {
+               if (words_num < UWORD_CPYBUF_SIZE)
+                       cpylen = words_num;
+               else
+                       cpylen = UWORD_CPYBUF_SIZE;
+
+               /* load the buffer */
+               for (i = 0; i < cpylen; i++)
+                       qat_uclo_fill_uwords(obj_handle, encap_page,
+                                            &obj_handle->uword_buf[i],
+                                            uw_physical_addr + i,
+                                            uw_relative_addr + i, fill_pat);
+
+               /* copy the buffer to ustore */
+               qat_hal_wr_uwords(handle, (unsigned char)ae,
+                                 uw_physical_addr, cpylen,
+                                 obj_handle->uword_buf);
+
+               uw_physical_addr += cpylen;
+               uw_relative_addr += cpylen;
+               words_num -= cpylen;
+       }
+}
+
+static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
+                                   struct icp_qat_uof_image *image)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int ctx_mask, s;
+       struct icp_qat_uclo_page *page;
+       unsigned char ae;
+       int ctx;
+
+       if (ICP_QAT_CTX_MODE(image->ae_mode) == ICP_QAT_UCLO_MAX_CTX)
+               ctx_mask = 0xff;
+       else
+               ctx_mask = 0x55;
+       /* load the default page and set assigned CTX PC
+        * to the entrypoint address */
+       for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
+               if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
+                       continue;
+               /* find the slice to which this image is assigned */
+               for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
+                       if (image->ctx_assigned & obj_handle->ae_data[ae].
+                           ae_slices[s].ctx_mask_assigned)
+                               break;
+               }
+               if (s >= obj_handle->ae_data[ae].slice_num)
+                       continue;
+               page = obj_handle->ae_data[ae].ae_slices[s].page;
+               if (!page->encap_page->def_page)
+                       continue;
+               qat_uclo_wr_uimage_raw_page(handle, page->encap_page, ae);
+
+               page = obj_handle->ae_data[ae].ae_slices[s].page;
+               for (ctx = 0; ctx < ICP_QAT_UCLO_MAX_CTX; ctx++)
+                       obj_handle->ae_data[ae].ae_slices[s].cur_page[ctx] =
+                                       (ctx_mask & (1 << ctx)) ? page : NULL;
+               qat_hal_set_live_ctx(handle, (unsigned char)ae,
+                                    image->ctx_assigned);
+               qat_hal_set_pc(handle, (unsigned char)ae, image->ctx_assigned,
+                              image->entry_address);
+       }
+}
+
+int qat_uclo_wr_all_uimage(struct icp_qat_fw_loader_handle *handle)
+{
+       struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
+       unsigned int i;
+
+       if (qat_uclo_init_globals(handle))
+               return -EINVAL;
+       for (i = 0; i < obj_handle->uimage_num; i++) {
+               if (!obj_handle->ae_uimage[i].img_ptr)
+                       return -EINVAL;
+               if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
+                       return -EINVAL;
+               qat_uclo_wr_uimage_page(handle,
+                                       obj_handle->ae_uimage[i].img_ptr);
+       }
+       return 0;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/Makefile b/drivers/crypto/qat/qat_dh895xcc/Makefile
new file mode 100644 (file)
index 0000000..25171c5
--- /dev/null
@@ -0,0 +1,8 @@
+ccflags-y := -I$(src)/../qat_common
+obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc.o
+qat_dh895xcc-objs := adf_drv.o \
+               adf_isr.o \
+               adf_dh895xcc_hw_data.o \
+               adf_hw_arbiter.o \
+               qat_admin.o \
+               adf_admin.o
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
new file mode 100644 (file)
index 0000000..978d6c5
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <adf_accel_devices.h>
+#include "adf_drv.h"
+#include "adf_dh895xcc_hw_data.h"
+
+#define ADF_ADMINMSG_LEN 32
+
+struct adf_admin_comms {
+       dma_addr_t phy_addr;
+       void *virt_addr;
+       void __iomem *mailbox_addr;
+       struct mutex lock;      /* protects adf_admin_comms struct */
+};
+
+int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
+                          uint32_t ae, void *in, void *out)
+{
+       struct adf_admin_comms *admin = accel_dev->admin;
+       int offset = ae * ADF_ADMINMSG_LEN * 2;
+       void __iomem *mailbox = admin->mailbox_addr;
+       int mb_offset = ae * ADF_DH895XCC_MAILBOX_STRIDE;
+       int times, received;
+
+       mutex_lock(&admin->lock);
+
+       if (ADF_CSR_RD(mailbox, mb_offset) == 1) {
+               mutex_unlock(&admin->lock);
+               return -EAGAIN;
+       }
+
+       memcpy(admin->virt_addr + offset, in, ADF_ADMINMSG_LEN);
+       ADF_CSR_WR(mailbox, mb_offset, 1);
+       received = 0;
+       for (times = 0; times < 50; times++) {
+               msleep(20);
+               if (ADF_CSR_RD(mailbox, mb_offset) == 0) {
+                       received = 1;
+                       break;
+               }
+       }
+       if (received)
+               memcpy(out, admin->virt_addr + offset +
+                      ADF_ADMINMSG_LEN, ADF_ADMINMSG_LEN);
+       else
+               pr_err("QAT: Failed to send admin msg to accelerator\n");
+
+       mutex_unlock(&admin->lock);
+       return received ? 0 : -EFAULT;
+}
+
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
+{
+       struct adf_admin_comms *admin;
+       struct adf_bar *pmisc = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
+       void __iomem *csr = pmisc->virt_addr;
+       void __iomem *mailbox = csr + ADF_DH895XCC_MAILBOX_BASE_OFFSET;
+       uint64_t reg_val;
+
+       admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
+                            accel_dev->numa_node);
+       if (!admin)
+               return -ENOMEM;
+       admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                              &admin->phy_addr, GFP_KERNEL);
+       if (!admin->virt_addr) {
+               dev_err(&GET_DEV(accel_dev), "Failed to allocate dma buff\n");
+               kfree(admin);
+               return -ENOMEM;
+       }
+       reg_val = (uint64_t)admin->phy_addr;
+       ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGUR_OFFSET, reg_val >> 32);
+       ADF_CSR_WR(csr, ADF_DH895XCC_ADMINMSGLR_OFFSET, reg_val);
+       mutex_init(&admin->lock);
+       admin->mailbox_addr = mailbox;
+       accel_dev->admin = admin;
+       return 0;
+}
+
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev)
+{
+       struct adf_admin_comms *admin = accel_dev->admin;
+
+       if (!admin)
+               return;
+
+       if (admin->virt_addr)
+               dma_free_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
+                                 admin->virt_addr, admin->phy_addr);
+
+       mutex_destroy(&admin->lock);
+       kfree(admin);
+       accel_dev->admin = NULL;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
new file mode 100644 (file)
index 0000000..ef05825
--- /dev/null
@@ -0,0 +1,214 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include "adf_dh895xcc_hw_data.h"
+#include "adf_drv.h"
+
+/* Worker thread to service arbiter mappings based on dev SKUs */
+static const uint32_t thrd_to_arb_map_sku4[] = {
+       0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+       0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+       0x00000000, 0x00000000, 0x00000000, 0x00000000
+};
+
+static const uint32_t thrd_to_arb_map_sku6[] = {
+       0x12222AAA, 0x11666666, 0x12222AAA, 0x11666666,
+       0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222,
+       0x12222AAA, 0x11222222, 0x12222AAA, 0x11222222
+};
+
+static struct adf_hw_device_class dh895xcc_class = {
+       .name = ADF_DH895XCC_DEVICE_NAME,
+       .type = DEV_DH895XCC,
+       .instances = 0
+};
+
+static uint32_t get_accel_mask(uint32_t fuse)
+{
+       return (~fuse) >> ADF_DH895XCC_ACCELERATORS_REG_OFFSET &
+                         ADF_DH895XCC_ACCELERATORS_MASK;
+}
+
+static uint32_t get_ae_mask(uint32_t fuse)
+{
+       return (~fuse) & ADF_DH895XCC_ACCELENGINES_MASK;
+}
+
+static uint32_t get_num_accels(struct adf_hw_device_data *self)
+{
+       uint32_t i, ctr = 0;
+
+       if (!self || !self->accel_mask)
+               return 0;
+
+       for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) {
+               if (self->accel_mask & (1 << i))
+                       ctr++;
+       }
+       return ctr;
+}
+
+static uint32_t get_num_aes(struct adf_hw_device_data *self)
+{
+       uint32_t i, ctr = 0;
+
+       if (!self || !self->ae_mask)
+               return 0;
+
+       for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) {
+               if (self->ae_mask & (1 << i))
+                       ctr++;
+       }
+       return ctr;
+}
+
+static uint32_t get_misc_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCC_PMISC_BAR;
+}
+
+static uint32_t get_etr_bar_id(struct adf_hw_device_data *self)
+{
+       return ADF_DH895XCC_ETR_BAR;
+}
+
+static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
+{
+       int sku = (self->fuses & ADF_DH895XCC_FUSECTL_SKU_MASK)
+           >> ADF_DH895XCC_FUSECTL_SKU_SHIFT;
+
+       switch (sku) {
+       case ADF_DH895XCC_FUSECTL_SKU_1:
+               return DEV_SKU_1;
+       case ADF_DH895XCC_FUSECTL_SKU_2:
+               return DEV_SKU_2;
+       case ADF_DH895XCC_FUSECTL_SKU_3:
+               return DEV_SKU_3;
+       case ADF_DH895XCC_FUSECTL_SKU_4:
+               return DEV_SKU_4;
+       default:
+               return DEV_SKU_UNKNOWN;
+       }
+       return DEV_SKU_UNKNOWN;
+}
+
+void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+                            uint32_t const **arb_map_config)
+{
+       switch (accel_dev->accel_pci_dev.sku) {
+       case DEV_SKU_1:
+               *arb_map_config = thrd_to_arb_map_sku4;
+               break;
+
+       case DEV_SKU_2:
+       case DEV_SKU_4:
+               *arb_map_config = thrd_to_arb_map_sku6;
+               break;
+       default:
+               pr_err("QAT: The configuration doesn't match any SKU");
+               *arb_map_config = NULL;
+       }
+}
+
+static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
+       void __iomem *csr = misc_bar->virt_addr;
+       unsigned int val, i;
+
+       /* Enable Accel Engine error detection & correction */
+       for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+               val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
+               val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
+               ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
+               val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
+               val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
+               ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
+       }
+
+       /* Enable shared memory error detection & correction */
+       for (i = 0; i < hw_device->get_num_accels(hw_device); i++) {
+               val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
+               val |= ADF_DH895XCC_ERRSSMSH_EN;
+               ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
+               val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
+               val |= ADF_DH895XCC_ERRSSMSH_EN;
+               ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
+       }
+}
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class = &dh895xcc_class;
+       hw_data->instance_id = dh895xcc_class.instances++;
+       hw_data->num_banks = ADF_DH895XCC_ETR_MAX_BANKS;
+       hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
+       hw_data->pci_dev_id = ADF_DH895XCC_PCI_DEVICE_ID;
+       hw_data->num_logical_accel = 1;
+       hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
+       hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
+       hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK;
+       hw_data->alloc_irq = adf_isr_resource_alloc;
+       hw_data->free_irq = adf_isr_resource_free;
+       hw_data->enable_error_correction = adf_enable_error_correction;
+       hw_data->hw_arb_ring_enable = adf_update_ring_arb_enable;
+       hw_data->hw_arb_ring_disable = adf_update_ring_arb_enable;
+       hw_data->get_accel_mask = get_accel_mask;
+       hw_data->get_ae_mask = get_ae_mask;
+       hw_data->get_num_accels = get_num_accels;
+       hw_data->get_num_aes = get_num_aes;
+       hw_data->get_etr_bar_id = get_etr_bar_id;
+       hw_data->get_misc_bar_id = get_misc_bar_id;
+       hw_data->get_sku = get_sku;
+       hw_data->fw_name = ADF_DH895XCC_FW;
+}
+
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
+{
+       hw_data->dev_class->instances--;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
new file mode 100644 (file)
index 0000000..b707f29
--- /dev/null
@@ -0,0 +1,86 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895x_HW_DATA_H_
+#define ADF_DH895x_HW_DATA_H_
+
+/* PCIe configuration space */
+#define ADF_DH895XCC_RX_RINGS_OFFSET 8
+#define ADF_DH895XCC_TX_RINGS_MASK 0xFF
+#define ADF_DH895XCC_FUSECTL_OFFSET 0x40
+#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
+#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
+#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
+#define ADF_DH895XCC_FUSECTL_SKU_2 0x1
+#define ADF_DH895XCC_FUSECTL_SKU_3 0x2
+#define ADF_DH895XCC_FUSECTL_SKU_4 0x3
+#define ADF_DH895XCC_MAX_ACCELERATORS 6
+#define ADF_DH895XCC_MAX_ACCELENGINES 12
+#define ADF_DH895XCC_ACCELERATORS_REG_OFFSET 13
+#define ADF_DH895XCC_ACCELERATORS_MASK 0x3F
+#define ADF_DH895XCC_ACCELENGINES_MASK 0xFFF
+#define ADF_DH895XCC_LEGFUSE_OFFSET 0x4C
+#define ADF_DH895XCC_ETR_MAX_BANKS 32
+#define ADF_DH895XCC_SMIAPF0_MASK_OFFSET (0x3A000 + 0x28)
+#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
+#define ADF_DH895XCC_SMIA0_MASK 0xFFFF
+#define ADF_DH895XCC_SMIA1_MASK 0x1
+/* Error detection and correction */
+#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
+#define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
+#define ADF_DH895XCC_ENABLE_AE_ECC_ERR (1 << 28)
+#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (1 << 24 | 1 << 12)
+#define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18)
+#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
+#define ADF_DH895XCC_ERRSSMSH_EN (1 << 3)
+
+/* Admin Messages Registers */
+#define ADF_DH895XCC_ADMINMSGUR_OFFSET (0x3A000 + 0x574)
+#define ADF_DH895XCC_ADMINMSGLR_OFFSET (0x3A000 + 0x578)
+#define ADF_DH895XCC_MAILBOX_BASE_OFFSET 0x20970
+#define ADF_DH895XCC_MAILBOX_STRIDE 0x1000
+#define ADF_DH895XCC_FW "qat_895xcc.bin"
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
new file mode 100644 (file)
index 0000000..0d0435a
--- /dev/null
@@ -0,0 +1,449 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_transport_access_macros.h>
+#include "adf_dh895xcc_hw_data.h"
+#include "adf_drv.h"
+
+static const char adf_driver_name[] = ADF_DH895XCC_DEVICE_NAME;
+
+#define ADF_SYSTEM_DEVICE(device_id) \
+       {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+static const struct pci_device_id adf_pci_tbl[] = {
+       ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID),
+       {0,}
+};
+MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
+
+static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
+static void adf_remove(struct pci_dev *dev);
+
+static struct pci_driver adf_driver = {
+       .id_table = adf_pci_tbl,
+       .name = adf_driver_name,
+       .probe = adf_probe,
+       .remove = adf_remove
+};
+
+static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
+       int i;
+
+       adf_exit_admin_comms(accel_dev);
+       adf_exit_arb(accel_dev);
+       adf_cleanup_etr_data(accel_dev);
+
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               if (bar->virt_addr)
+                       pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
+       }
+
+       if (accel_dev->hw_device) {
+               switch (accel_dev->hw_device->pci_dev_id) {
+               case ADF_DH895XCC_PCI_DEVICE_ID:
+                       adf_clean_hw_data_dh895xcc(accel_dev->hw_device);
+                       break;
+               default:
+                       break;
+               }
+               kfree(accel_dev->hw_device);
+       }
+       adf_cfg_dev_remove(accel_dev);
+       debugfs_remove(accel_dev->debugfs_dir);
+       adf_devmgr_rm_dev(accel_dev);
+       pci_release_regions(accel_pci_dev->pci_dev);
+       pci_disable_device(accel_pci_dev->pci_dev);
+       kfree(accel_dev);
+}
+
+static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
+{
+       unsigned int bus_per_cpu = 0;
+       struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
+
+       if (!c->phys_proc_id)
+               return 0;
+
+       bus_per_cpu = 256 / (c->phys_proc_id + 1);
+
+       if (bus_per_cpu != 0)
+               return pdev->bus->number / bus_per_cpu;
+       return 0;
+}
+
+static int qat_dev_start(struct adf_accel_dev *accel_dev)
+{
+       int cpus = num_online_cpus();
+       int banks = GET_MAX_BANKS(accel_dev);
+       int instances = min(cpus, banks);
+       char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
+       int i;
+       unsigned long val;
+
+       if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
+               goto err;
+       if (adf_cfg_section_add(accel_dev, "Accelerator0"))
+               goto err;
+       for (i = 0; i < instances; i++) {
+               val = i;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
+                        i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
+               val = 128;
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = 512;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = 0;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = 2;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = 4;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_TX, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = 8;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = 10;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = 12;
+               snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_RND_RX, i);
+               if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+
+               val = ADF_COALESCING_DEF_TIME;
+               snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
+               if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
+                                               key, (void *)&val, ADF_DEC))
+                       goto err;
+       }
+
+       val = i;
+       if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
+                                       ADF_NUM_CY, (void *)&val, ADF_DEC))
+               goto err;
+
+       set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
+       return adf_dev_start(accel_dev);
+err:
+       dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
+       return -EINVAL;
+}
+
+static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+       struct adf_accel_dev *accel_dev;
+       struct adf_accel_pci *accel_pci_dev;
+       struct adf_hw_device_data *hw_data;
+       void __iomem *pmisc_bar_addr = NULL;
+       char name[ADF_DEVICE_NAME_LENGTH];
+       unsigned int i, bar_nr;
+       uint8_t node;
+       int ret;
+
+       switch (ent->device) {
+       case ADF_DH895XCC_PCI_DEVICE_ID:
+               break;
+       default:
+               dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
+               return -ENODEV;
+       }
+
+       node = adf_get_dev_node_id(pdev);
+       accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node);
+       if (!accel_dev)
+               return -ENOMEM;
+
+       accel_dev->numa_node = node;
+       INIT_LIST_HEAD(&accel_dev->crypto_list);
+
+       /* Add accel device to accel table.
+        * This should be called before adf_cleanup_accel is called */
+       if (adf_devmgr_add_dev(accel_dev)) {
+               dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
+               kfree(accel_dev);
+               return -EFAULT;
+       }
+
+       accel_dev->owner = THIS_MODULE;
+       /* Allocate and configure device configuration structure */
+       hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node);
+       if (!hw_data) {
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       accel_dev->hw_device = hw_data;
+       switch (ent->device) {
+       case ADF_DH895XCC_PCI_DEVICE_ID:
+               adf_init_hw_data_dh895xcc(accel_dev->hw_device);
+               break;
+       default:
+               return -ENODEV;
+       }
+       accel_pci_dev = &accel_dev->accel_pci_dev;
+       pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
+       pci_read_config_dword(pdev, ADF_DH895XCC_FUSECTL_OFFSET,
+                             &hw_data->fuses);
+
+       /* Get Accelerators and Accelerators Engines masks */
+       hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
+       hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
+       accel_pci_dev->sku = hw_data->get_sku(hw_data);
+       accel_pci_dev->pci_dev = pdev;
+       /* If the device has no acceleration engines then ignore it. */
+       if (!hw_data->accel_mask || !hw_data->ae_mask ||
+           ((~hw_data->ae_mask) & 0x01)) {
+               dev_err(&pdev->dev, "No acceleration units found");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* Create dev top level debugfs entry */
+       snprintf(name, sizeof(name), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX,
+                hw_data->dev_class->name, hw_data->instance_id);
+       accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
+       if (!accel_dev->debugfs_dir) {
+               dev_err(&pdev->dev, "Could not create debugfs dir\n");
+               ret = -EINVAL;
+               goto out_err;
+       }
+
+       /* Create device configuration table */
+       ret = adf_cfg_dev_add(accel_dev);
+       if (ret)
+               goto out_err;
+
+       /* enable PCI device */
+       if (pci_enable_device(pdev)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* set dma identifier */
+       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+               if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
+                       dev_err(&pdev->dev, "No usable DMA configuration\n");
+                       ret = -EFAULT;
+                       goto out_err;
+               } else {
+                       pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+               }
+
+       } else {
+               pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+       }
+
+       if (pci_request_regions(pdev, adf_driver_name)) {
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       /* Read accelerator capabilities mask */
+       pci_read_config_dword(pdev, ADF_DH895XCC_LEGFUSE_OFFSET,
+                             &hw_data->accel_capabilities_mask);
+
+       /* Find and map all the device's BARS */
+       for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
+               struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
+
+               bar_nr = i * 2;
+               bar->base_addr = pci_resource_start(pdev, bar_nr);
+               if (!bar->base_addr)
+                       break;
+               bar->size = pci_resource_len(pdev, bar_nr);
+               bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
+               if (!bar->virt_addr) {
+                       dev_err(&pdev->dev, "Failed to map BAR %d\n", i);
+                       ret = -EFAULT;
+                       goto out_err;
+               }
+               if (i == ADF_DH895XCC_PMISC_BAR)
+                       pmisc_bar_addr = bar->virt_addr;
+       }
+       pci_set_master(pdev);
+
+       if (adf_enable_aer(accel_dev, &adf_driver)) {
+               dev_err(&pdev->dev, "Failed to enable aer\n");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       if (adf_init_etr_data(accel_dev)) {
+               dev_err(&pdev->dev, "Failed initialize etr\n");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       if (adf_init_admin_comms(accel_dev)) {
+               dev_err(&pdev->dev, "Failed initialize admin comms\n");
+               ret = -EFAULT;
+               goto out_err;
+       }
+
+       if (adf_init_arb(accel_dev)) {
+               dev_err(&pdev->dev, "Failed initialize hw arbiter\n");
+               ret = -EFAULT;
+               goto out_err;
+       }
+       if (pci_save_state(pdev)) {
+               dev_err(&pdev->dev, "Failed to save pci state\n");
+               ret = -ENOMEM;
+               goto out_err;
+       }
+
+       /* Enable bundle and misc interrupts */
+       ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
+                  ADF_DH895XCC_SMIA0_MASK);
+       ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
+                  ADF_DH895XCC_SMIA1_MASK);
+
+       ret = qat_dev_start(accel_dev);
+       if (ret) {
+               adf_dev_stop(accel_dev);
+               goto out_err;
+       }
+
+       return 0;
+out_err:
+       adf_cleanup_accel(accel_dev);
+       return ret;
+}
+
+static void __exit adf_remove(struct pci_dev *pdev)
+{
+       struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
+
+       if (!accel_dev) {
+               pr_err("QAT: Driver removal failed\n");
+               return;
+       }
+       if (adf_dev_stop(accel_dev))
+               dev_err(&GET_DEV(accel_dev), "Failed to stop QAT accel dev\n");
+       adf_disable_aer(accel_dev);
+       adf_cleanup_accel(accel_dev);
+}
+
+static int __init adfdrv_init(void)
+{
+       request_module("intel_qat");
+       if (qat_admin_register())
+               return -EFAULT;
+
+       if (pci_register_driver(&adf_driver)) {
+               pr_err("QAT: Driver initialization failed\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void __exit adfdrv_release(void)
+{
+       pci_unregister_driver(&adf_driver);
+       qat_admin_unregister();
+}
+
+module_init(adfdrv_init);
+module_exit(adfdrv_release);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Intel");
+MODULE_FIRMWARE("qat_895xcc.bin");
+MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.h b/drivers/crypto/qat/qat_dh895xcc/adf_drv.h
new file mode 100644 (file)
index 0000000..a2fbb6c
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef ADF_DH895x_DRV_H_
+#define ADF_DH895x_DRV_H_
+#include <adf_accel_devices.h>
+#include <adf_transport.h>
+
+void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data);
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev);
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev);
+void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring);
+void adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev,
+                            uint32_t const **arb_map_config);
+int adf_init_admin_comms(struct adf_accel_dev *accel_dev);
+void adf_exit_admin_comms(struct adf_accel_dev *accel_dev);
+int adf_put_admin_msg_sync(struct adf_accel_dev *accel_dev,
+                          uint32_t ae, void *in, void *out);
+int qat_admin_register(void);
+int qat_admin_unregister(void);
+int adf_init_arb(struct adf_accel_dev *accel_dev);
+void adf_exit_arb(struct adf_accel_dev *accel_dev);
+#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c b/drivers/crypto/qat/qat_dh895xcc/adf_hw_arbiter.c
new file mode 100644 (file)
index 0000000..1864bdb
--- /dev/null
@@ -0,0 +1,159 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <adf_accel_devices.h>
+#include <adf_transport_internal.h>
+#include "adf_drv.h"
+
+#define ADF_ARB_NUM 4
+#define ADF_ARB_REQ_RING_NUM 8
+#define ADF_ARB_REG_SIZE 0x4
+#define ADF_ARB_WTR_SIZE 0x20
+#define ADF_ARB_OFFSET 0x30000
+#define ADF_ARB_REG_SLOT 0x1000
+#define ADF_ARB_WTR_OFFSET 0x010
+#define ADF_ARB_RO_EN_OFFSET 0x090
+#define ADF_ARB_WQCFG_OFFSET 0x100
+#define ADF_ARB_WRK_2_SER_MAP_OFFSET 0x180
+#define ADF_ARB_WRK_2_SER_MAP 10
+#define ADF_ARB_RINGSRVARBEN_OFFSET 0x19C
+
+#define WRITE_CSR_ARB_RINGSRVARBEN(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, ADF_ARB_RINGSRVARBEN_OFFSET + \
+       (ADF_ARB_REG_SLOT * index), value)
+
+#define WRITE_CSR_ARB_RESPORDERING(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+       ADF_ARB_RO_EN_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WEIGHT(csr_addr, arb, index, value) \
+       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+       ADF_ARB_WTR_OFFSET) + (ADF_ARB_WTR_SIZE * arb) + \
+       (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_SARCONFIG(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, ADF_ARB_OFFSET + \
+       (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WRK_2_SER_MAP(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+       ADF_ARB_WRK_2_SER_MAP_OFFSET) + \
+       (ADF_ARB_REG_SIZE * index), value)
+
+#define WRITE_CSR_ARB_WQCFG(csr_addr, index, value) \
+       ADF_CSR_WR(csr_addr, (ADF_ARB_OFFSET + \
+       ADF_ARB_WQCFG_OFFSET) + (ADF_ARB_REG_SIZE * index), value)
+
+int adf_init_arb(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *csr = accel_dev->transport->banks[0].csr_addr;
+       uint32_t arb_cfg = 0x1 << 31 | 0x4 << 4 | 0x1;
+       uint32_t arb, i;
+       const uint32_t *thd_2_arb_cfg;
+
+       /* Service arb configured for 32 bytes responses and
+        * ring flow control check enabled. */
+       for (arb = 0; arb < ADF_ARB_NUM; arb++)
+               WRITE_CSR_ARB_SARCONFIG(csr, arb, arb_cfg);
+
+       /* Setup service weighting */
+       for (arb = 0; arb < ADF_ARB_NUM; arb++)
+               for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
+                       WRITE_CSR_ARB_WEIGHT(csr, arb, i, 0xFFFFFFFF);
+
+       /* Setup ring response ordering */
+       for (i = 0; i < ADF_ARB_REQ_RING_NUM; i++)
+               WRITE_CSR_ARB_RESPORDERING(csr, i, 0xFFFFFFFF);
+
+       /* Setup worker queue registers */
+       for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+               WRITE_CSR_ARB_WQCFG(csr, i, i);
+
+       /* Map worker threads to service arbiters */
+       adf_get_arbiter_mapping(accel_dev, &thd_2_arb_cfg);
+
+       if (!thd_2_arb_cfg)
+               return -EFAULT;
+
+       for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+               WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, *(thd_2_arb_cfg + i));
+
+       return 0;
+}
+
+void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring)
+{
+       WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr,
+                                  ring->bank->bank_number,
+                                  ring->bank->ring_mask & 0xFF);
+}
+
+void adf_exit_arb(struct adf_accel_dev *accel_dev)
+{
+       void __iomem *csr;
+       unsigned int i;
+
+       if (!accel_dev->transport)
+               return;
+
+       csr = accel_dev->transport->banks[0].csr_addr;
+
+       /* Reset arbiter configuration */
+       for (i = 0; i < ADF_ARB_NUM; i++)
+               WRITE_CSR_ARB_SARCONFIG(csr, i, 0);
+
+       /* Shutdown work queue */
+       for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+               WRITE_CSR_ARB_WQCFG(csr, i, 0);
+
+       /* Unmap worker threads to service arbiters */
+       for (i = 0; i < ADF_ARB_WRK_2_SER_MAP; i++)
+               WRITE_CSR_ARB_WRK_2_SER_MAP(csr, i, 0);
+
+       /* Disable arbitration on all rings */
+       for (i = 0; i < GET_MAX_BANKS(accel_dev); i++)
+               WRITE_CSR_ARB_RINGSRVARBEN(csr, i, 0);
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
new file mode 100644 (file)
index 0000000..d4172de
--- /dev/null
@@ -0,0 +1,266 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include <adf_cfg.h>
+#include <adf_cfg_strings.h>
+#include <adf_cfg_common.h>
+#include <adf_transport_access_macros.h>
+#include <adf_transport_internal.h>
+#include "adf_drv.h"
+
+static int adf_enable_msix(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       uint32_t msix_num_entries = hw_data->num_banks + 1;
+       int i;
+
+       for (i = 0; i < msix_num_entries; i++)
+               pci_dev_info->msix_entries.entries[i].entry = i;
+
+       if (pci_enable_msix(pci_dev_info->pci_dev,
+                           pci_dev_info->msix_entries.entries,
+                           msix_num_entries)) {
+               pr_err("QAT: Failed to enable MSIX IRQ\n");
+               return -EFAULT;
+       }
+       return 0;
+}
+
+static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
+{
+       pci_disable_msix(pci_dev_info->pci_dev);
+}
+
+static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
+{
+       struct adf_etr_bank_data *bank = bank_ptr;
+
+       WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number, 0);
+       tasklet_hi_schedule(&bank->resp_hanlder);
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
+{
+       struct adf_accel_dev *accel_dev = dev_ptr;
+
+       pr_info("QAT: qat_dev%d spurious AE interrupt\n", accel_dev->accel_id);
+       return IRQ_HANDLED;
+}
+
+static int adf_request_irqs(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
+       struct adf_etr_data *etr_data = accel_dev->transport;
+       int ret, i;
+       char *name;
+
+       /* Request msix irq for all banks */
+       for (i = 0; i < hw_data->num_banks; i++) {
+               struct adf_etr_bank_data *bank = &etr_data->banks[i];
+               unsigned int cpu, cpus = num_online_cpus();
+
+               name = *(pci_dev_info->msix_entries.names + i);
+               snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+                        "qat%d-bundle%d", accel_dev->accel_id, i);
+               ret = request_irq(msixe[i].vector,
+                                 adf_msix_isr_bundle, 0, name, bank);
+               if (ret) {
+                       pr_err("QAT: failed to enable irq %d for %s\n",
+                              msixe[i].vector, name);
+                       return ret;
+               }
+
+               cpu = ((accel_dev->accel_id * hw_data->num_banks) + i) % cpus;
+               irq_set_affinity_hint(msixe[i].vector, get_cpu_mask(cpu));
+       }
+
+       /* Request msix irq for AE */
+       name = *(pci_dev_info->msix_entries.names + i);
+       snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
+                "qat%d-ae-cluster", accel_dev->accel_id);
+       ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
+       if (ret) {
+               pr_err("QAT: failed to enable irq %d, for %s\n",
+                      msixe[i].vector, name);
+               return ret;
+       }
+       return ret;
+}
+
+static void adf_free_irqs(struct adf_accel_dev *accel_dev)
+{
+       struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
+       struct adf_etr_data *etr_data = accel_dev->transport;
+       int i;
+
+       for (i = 0; i < hw_data->num_banks; i++) {
+               irq_set_affinity_hint(msixe[i].vector, NULL);
+               free_irq(msixe[i].vector, &etr_data->banks[i]);
+       }
+       irq_set_affinity_hint(msixe[i].vector, NULL);
+       free_irq(msixe[i].vector, accel_dev);
+}
+
+static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
+{
+       int i;
+       char **names;
+       struct msix_entry *entries;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       uint32_t msix_num_entries = hw_data->num_banks + 1;
+
+       entries = kzalloc_node(msix_num_entries * sizeof(*entries),
+                              GFP_KERNEL, accel_dev->numa_node);
+       if (!entries)
+               return -ENOMEM;
+
+       names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
+       if (!names) {
+               kfree(entries);
+               return -ENOMEM;
+       }
+       for (i = 0; i < msix_num_entries; i++) {
+               *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
+               if (!(*(names + i)))
+                       goto err;
+       }
+       accel_dev->accel_pci_dev.msix_entries.entries = entries;
+       accel_dev->accel_pci_dev.msix_entries.names = names;
+       return 0;
+err:
+       for (i = 0; i < msix_num_entries; i++) {
+               if (*(names + i))
+                       kfree(*(names + i));
+       }
+       kfree(entries);
+       kfree(names);
+       return -ENOMEM;
+}
+
+static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
+{
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       uint32_t msix_num_entries = hw_data->num_banks + 1;
+       char **names = accel_dev->accel_pci_dev.msix_entries.names;
+       int i;
+
+       kfree(accel_dev->accel_pci_dev.msix_entries.entries);
+       for (i = 0; i < msix_num_entries; i++) {
+               if (*(names + i))
+                       kfree(*(names + i));
+       }
+       kfree(names);
+}
+
+static int adf_setup_bh(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *priv_data = accel_dev->transport;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int i;
+
+       for (i = 0; i < hw_data->num_banks; i++)
+               tasklet_init(&priv_data->banks[i].resp_hanlder,
+                            adf_response_handler,
+                            (unsigned long)&priv_data->banks[i]);
+       return 0;
+}
+
+static void adf_cleanup_bh(struct adf_accel_dev *accel_dev)
+{
+       struct adf_etr_data *priv_data = accel_dev->transport;
+       struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+       int i;
+
+       for (i = 0; i < hw_data->num_banks; i++) {
+               tasklet_disable(&priv_data->banks[i].resp_hanlder);
+               tasklet_kill(&priv_data->banks[i].resp_hanlder);
+       }
+}
+
+void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
+{
+       adf_free_irqs(accel_dev);
+       adf_cleanup_bh(accel_dev);
+       adf_disable_msix(&accel_dev->accel_pci_dev);
+       adf_isr_free_msix_entry_table(accel_dev);
+}
+
+int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
+{
+       int ret;
+
+       ret = adf_isr_alloc_msix_entry_table(accel_dev);
+       if (ret)
+               return ret;
+       if (adf_enable_msix(accel_dev))
+               goto err_out;
+
+       if (adf_setup_bh(accel_dev))
+               goto err_out;
+
+       if (adf_request_irqs(accel_dev))
+               goto err_out;
+
+       return 0;
+err_out:
+       adf_isr_resource_free(accel_dev);
+       return -EFAULT;
+}
diff --git a/drivers/crypto/qat/qat_dh895xcc/qat_admin.c b/drivers/crypto/qat/qat_dh895xcc/qat_admin.c
new file mode 100644 (file)
index 0000000..55b7a8e
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+  This file is provided under a dual BSD/GPLv2 license.  When using or
+  redistributing this file, you may do so under either license.
+
+  GPL LICENSE SUMMARY
+  Copyright(c) 2014 Intel Corporation.
+  This program is free software; you can redistribute it and/or modify
+  it under the terms of version 2 of the GNU General Public License as
+  published by the Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  Contact Information:
+  qat-linux@intel.com
+
+  BSD LICENSE
+  Copyright(c) 2014 Intel Corporation.
+  Redistribution and use in source and binary forms, with or without
+  modification, are permitted provided that the following conditions
+  are met:
+
+    * Redistributions of source code must retain the above copyright
+      notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above copyright
+      notice, this list of conditions and the following disclaimer in
+      the documentation and/or other materials provided with the
+      distribution.
+    * Neither the name of Intel Corporation nor the names of its
+      contributors may be used to endorse or promote products derived
+      from this software without specific prior written permission.
+
+  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <icp_qat_fw_init_admin.h>
+#include <adf_accel_devices.h>
+#include <adf_common_drv.h>
+#include "adf_drv.h"
+
+static struct service_hndl qat_admin;
+
+static int qat_send_admin_cmd(struct adf_accel_dev *accel_dev, int cmd)
+{
+       struct adf_hw_device_data *hw_device = accel_dev->hw_device;
+       struct icp_qat_fw_init_admin_req req;
+       struct icp_qat_fw_init_admin_resp resp;
+       int i;
+
+       memset(&req, 0, sizeof(struct icp_qat_fw_init_admin_req));
+       req.init_admin_cmd_id = cmd;
+       for (i = 0; i < hw_device->get_num_aes(hw_device); i++) {
+               memset(&resp, 0, sizeof(struct icp_qat_fw_init_admin_resp));
+               if (adf_put_admin_msg_sync(accel_dev, i, &req, &resp) ||
+                   resp.init_resp_hdr.status)
+                       return -EFAULT;
+       }
+       return 0;
+}
+
+static int qat_admin_start(struct adf_accel_dev *accel_dev)
+{
+       return qat_send_admin_cmd(accel_dev, ICP_QAT_FW_INIT_ME);
+}
+
+static int qat_admin_event_handler(struct adf_accel_dev *accel_dev,
+                                  enum adf_event event)
+{
+       int ret;
+
+       switch (event) {
+       case ADF_EVENT_START:
+               ret = qat_admin_start(accel_dev);
+               break;
+       case ADF_EVENT_STOP:
+       case ADF_EVENT_INIT:
+       case ADF_EVENT_SHUTDOWN:
+       default:
+               ret = 0;
+       }
+       return ret;
+}
+
+int qat_admin_register(void)
+{
+       memset(&qat_admin, 0, sizeof(struct service_hndl));
+       qat_admin.event_hld = qat_admin_event_handler;
+       qat_admin.name = "qat_admin";
+       qat_admin.admin = 1;
+       return adf_service_register(&qat_admin);
+}
+
+int qat_admin_unregister(void)
+{
+       return adf_service_unregister(&qat_admin);
+}
diff --git a/drivers/crypto/qce/Makefile b/drivers/crypto/qce/Makefile
new file mode 100644 (file)
index 0000000..348dc31
--- /dev/null
@@ -0,0 +1,6 @@
+obj-$(CONFIG_CRYPTO_DEV_QCE) += qcrypto.o
+qcrypto-objs := core.o \
+               common.o \
+               dma.o \
+               sha.o \
+               ablkcipher.o
diff --git a/drivers/crypto/qce/ablkcipher.c b/drivers/crypto/qce/ablkcipher.c
new file mode 100644 (file)
index 0000000..ad592de
--- /dev/null
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <crypto/aes.h>
+#include <crypto/algapi.h>
+#include <crypto/des.h>
+
+#include "cipher.h"
+
+static LIST_HEAD(ablkcipher_algs);
+
+static void qce_ablkcipher_done(void *data)
+{
+       struct crypto_async_request *async_req = data;
+       struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+       struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+       struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+       struct qce_device *qce = tmpl->qce;
+       enum dma_data_direction dir_src, dir_dst;
+       u32 status;
+       int error;
+       bool diff_dst;
+
+       diff_dst = (req->src != req->dst) ? true : false;
+       dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+       dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+       error = qce_dma_terminate_all(&qce->dma);
+       if (error)
+               dev_dbg(qce->dev, "ablkcipher dma termination error (%d)\n",
+                       error);
+
+       if (diff_dst)
+               qce_unmapsg(qce->dev, rctx->src_sg, rctx->src_nents, dir_src,
+                           rctx->dst_chained);
+       qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
+                   rctx->dst_chained);
+
+       sg_free_table(&rctx->dst_tbl);
+
+       error = qce_check_status(qce, &status);
+       if (error < 0)
+               dev_dbg(qce->dev, "ablkcipher operation error (%x)\n", status);
+
+       qce->async_req_done(tmpl->qce, error);
+}
+
+static int
+qce_ablkcipher_async_req_handle(struct crypto_async_request *async_req)
+{
+       struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+       struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+       struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
+       struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+       struct qce_device *qce = tmpl->qce;
+       enum dma_data_direction dir_src, dir_dst;
+       struct scatterlist *sg;
+       bool diff_dst;
+       gfp_t gfp;
+       int ret;
+
+       rctx->iv = req->info;
+       rctx->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
+       rctx->cryptlen = req->nbytes;
+
+       diff_dst = (req->src != req->dst) ? true : false;
+       dir_src = diff_dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
+       dir_dst = diff_dst ? DMA_FROM_DEVICE : DMA_BIDIRECTIONAL;
+
+       rctx->src_nents = qce_countsg(req->src, req->nbytes,
+                                     &rctx->src_chained);
+       if (diff_dst) {
+               rctx->dst_nents = qce_countsg(req->dst, req->nbytes,
+                                             &rctx->dst_chained);
+       } else {
+               rctx->dst_nents = rctx->src_nents;
+               rctx->dst_chained = rctx->src_chained;
+       }
+
+       rctx->dst_nents += 1;
+
+       gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ?
+                                               GFP_KERNEL : GFP_ATOMIC;
+
+       ret = sg_alloc_table(&rctx->dst_tbl, rctx->dst_nents, gfp);
+       if (ret)
+               return ret;
+
+       sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
+
+       sg = qce_sgtable_add(&rctx->dst_tbl, req->dst);
+       if (IS_ERR(sg)) {
+               ret = PTR_ERR(sg);
+               goto error_free;
+       }
+
+       sg = qce_sgtable_add(&rctx->dst_tbl, &rctx->result_sg);
+       if (IS_ERR(sg)) {
+               ret = PTR_ERR(sg);
+               goto error_free;
+       }
+
+       sg_mark_end(sg);
+       rctx->dst_sg = rctx->dst_tbl.sgl;
+
+       ret = qce_mapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
+                       rctx->dst_chained);
+       if (ret < 0)
+               goto error_free;
+
+       if (diff_dst) {
+               ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src,
+                               rctx->src_chained);
+               if (ret < 0)
+                       goto error_unmap_dst;
+               rctx->src_sg = req->src;
+       } else {
+               rctx->src_sg = rctx->dst_sg;
+       }
+
+       ret = qce_dma_prep_sgs(&qce->dma, rctx->src_sg, rctx->src_nents,
+                              rctx->dst_sg, rctx->dst_nents,
+                              qce_ablkcipher_done, async_req);
+       if (ret)
+               goto error_unmap_src;
+
+       qce_dma_issue_pending(&qce->dma);
+
+       ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0);
+       if (ret)
+               goto error_terminate;
+
+       return 0;
+
+error_terminate:
+       qce_dma_terminate_all(&qce->dma);
+error_unmap_src:
+       if (diff_dst)
+               qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src,
+                           rctx->src_chained);
+error_unmap_dst:
+       qce_unmapsg(qce->dev, rctx->dst_sg, rctx->dst_nents, dir_dst,
+                   rctx->dst_chained);
+error_free:
+       sg_free_table(&rctx->dst_tbl);
+       return ret;
+}
+
+static int qce_ablkcipher_setkey(struct crypto_ablkcipher *ablk, const u8 *key,
+                                unsigned int keylen)
+{
+       struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablk);
+       struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+       unsigned long flags = to_cipher_tmpl(tfm)->alg_flags;
+       int ret;
+
+       if (!key || !keylen)
+               return -EINVAL;
+
+       if (IS_AES(flags)) {
+               switch (keylen) {
+               case AES_KEYSIZE_128:
+               case AES_KEYSIZE_256:
+                       break;
+               default:
+                       goto fallback;
+               }
+       } else if (IS_DES(flags)) {
+               u32 tmp[DES_EXPKEY_WORDS];
+
+               ret = des_ekey(tmp, key);
+               if (!ret && crypto_ablkcipher_get_flags(ablk) &
+                   CRYPTO_TFM_REQ_WEAK_KEY)
+                       goto weakkey;
+       }
+
+       ctx->enc_keylen = keylen;
+       memcpy(ctx->enc_key, key, keylen);
+       return 0;
+fallback:
+       ret = crypto_ablkcipher_setkey(ctx->fallback, key, keylen);
+       if (!ret)
+               ctx->enc_keylen = keylen;
+       return ret;
+weakkey:
+       crypto_ablkcipher_set_flags(ablk, CRYPTO_TFM_RES_WEAK_KEY);
+       return -EINVAL;
+}
+
+static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt)
+{
+       struct crypto_tfm *tfm =
+                       crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req));
+       struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+       struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+       struct qce_alg_template *tmpl = to_cipher_tmpl(tfm);
+       int ret;
+
+       rctx->flags = tmpl->alg_flags;
+       rctx->flags |= encrypt ? QCE_ENCRYPT : QCE_DECRYPT;
+
+       if (IS_AES(rctx->flags) && ctx->enc_keylen != AES_KEYSIZE_128 &&
+           ctx->enc_keylen != AES_KEYSIZE_256) {
+               ablkcipher_request_set_tfm(req, ctx->fallback);
+               ret = encrypt ? crypto_ablkcipher_encrypt(req) :
+                               crypto_ablkcipher_decrypt(req);
+               ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm));
+               return ret;
+       }
+
+       return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+       return qce_ablkcipher_crypt(req, 1);
+}
+
+static int qce_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+       return qce_ablkcipher_crypt(req, 0);
+}
+
+static int qce_ablkcipher_init(struct crypto_tfm *tfm)
+{
+       struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       memset(ctx, 0, sizeof(*ctx));
+       tfm->crt_ablkcipher.reqsize = sizeof(struct qce_cipher_reqctx);
+
+       ctx->fallback = crypto_alloc_ablkcipher(crypto_tfm_alg_name(tfm),
+                                               CRYPTO_ALG_TYPE_ABLKCIPHER,
+                                               CRYPTO_ALG_ASYNC |
+                                               CRYPTO_ALG_NEED_FALLBACK);
+       if (IS_ERR(ctx->fallback))
+               return PTR_ERR(ctx->fallback);
+
+       return 0;
+}
+
+static void qce_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+       struct qce_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       crypto_free_ablkcipher(ctx->fallback);
+}
+
+struct qce_ablkcipher_def {
+       unsigned long flags;
+       const char *name;
+       const char *drv_name;
+       unsigned int blocksize;
+       unsigned int ivsize;
+       unsigned int min_keysize;
+       unsigned int max_keysize;
+};
+
+static const struct qce_ablkcipher_def ablkcipher_def[] = {
+       {
+               .flags          = QCE_ALG_AES | QCE_MODE_ECB,
+               .name           = "ecb(aes)",
+               .drv_name       = "ecb-aes-qce",
+               .blocksize      = AES_BLOCK_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+       },
+       {
+               .flags          = QCE_ALG_AES | QCE_MODE_CBC,
+               .name           = "cbc(aes)",
+               .drv_name       = "cbc-aes-qce",
+               .blocksize      = AES_BLOCK_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+       },
+       {
+               .flags          = QCE_ALG_AES | QCE_MODE_CTR,
+               .name           = "ctr(aes)",
+               .drv_name       = "ctr-aes-qce",
+               .blocksize      = AES_BLOCK_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+       },
+       {
+               .flags          = QCE_ALG_AES | QCE_MODE_XTS,
+               .name           = "xts(aes)",
+               .drv_name       = "xts-aes-qce",
+               .blocksize      = AES_BLOCK_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+       },
+       {
+               .flags          = QCE_ALG_DES | QCE_MODE_ECB,
+               .name           = "ecb(des)",
+               .drv_name       = "ecb-des-qce",
+               .blocksize      = DES_BLOCK_SIZE,
+               .ivsize         = 0,
+               .min_keysize    = DES_KEY_SIZE,
+               .max_keysize    = DES_KEY_SIZE,
+       },
+       {
+               .flags          = QCE_ALG_DES | QCE_MODE_CBC,
+               .name           = "cbc(des)",
+               .drv_name       = "cbc-des-qce",
+               .blocksize      = DES_BLOCK_SIZE,
+               .ivsize         = DES_BLOCK_SIZE,
+               .min_keysize    = DES_KEY_SIZE,
+               .max_keysize    = DES_KEY_SIZE,
+       },
+       {
+               .flags          = QCE_ALG_3DES | QCE_MODE_ECB,
+               .name           = "ecb(des3_ede)",
+               .drv_name       = "ecb-3des-qce",
+               .blocksize      = DES3_EDE_BLOCK_SIZE,
+               .ivsize         = 0,
+               .min_keysize    = DES3_EDE_KEY_SIZE,
+               .max_keysize    = DES3_EDE_KEY_SIZE,
+       },
+       {
+               .flags          = QCE_ALG_3DES | QCE_MODE_CBC,
+               .name           = "cbc(des3_ede)",
+               .drv_name       = "cbc-3des-qce",
+               .blocksize      = DES3_EDE_BLOCK_SIZE,
+               .ivsize         = DES3_EDE_BLOCK_SIZE,
+               .min_keysize    = DES3_EDE_KEY_SIZE,
+               .max_keysize    = DES3_EDE_KEY_SIZE,
+       },
+};
+
+static int qce_ablkcipher_register_one(const struct qce_ablkcipher_def *def,
+                                      struct qce_device *qce)
+{
+       struct qce_alg_template *tmpl;
+       struct crypto_alg *alg;
+       int ret;
+
+       tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+       if (!tmpl)
+               return -ENOMEM;
+
+       alg = &tmpl->alg.crypto;
+
+       snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+       snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+                def->drv_name);
+
+       alg->cra_blocksize = def->blocksize;
+       alg->cra_ablkcipher.ivsize = def->ivsize;
+       alg->cra_ablkcipher.min_keysize = def->min_keysize;
+       alg->cra_ablkcipher.max_keysize = def->max_keysize;
+       alg->cra_ablkcipher.setkey = qce_ablkcipher_setkey;
+       alg->cra_ablkcipher.encrypt = qce_ablkcipher_encrypt;
+       alg->cra_ablkcipher.decrypt = qce_ablkcipher_decrypt;
+
+       alg->cra_priority = 300;
+       alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC |
+                        CRYPTO_ALG_NEED_FALLBACK;
+       alg->cra_ctxsize = sizeof(struct qce_cipher_ctx);
+       alg->cra_alignmask = 0;
+       alg->cra_type = &crypto_ablkcipher_type;
+       alg->cra_module = THIS_MODULE;
+       alg->cra_init = qce_ablkcipher_init;
+       alg->cra_exit = qce_ablkcipher_exit;
+       INIT_LIST_HEAD(&alg->cra_list);
+
+       INIT_LIST_HEAD(&tmpl->entry);
+       tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_ABLKCIPHER;
+       tmpl->alg_flags = def->flags;
+       tmpl->qce = qce;
+
+       ret = crypto_register_alg(alg);
+       if (ret) {
+               kfree(tmpl);
+               dev_err(qce->dev, "%s registration failed\n", alg->cra_name);
+               return ret;
+       }
+
+       list_add_tail(&tmpl->entry, &ablkcipher_algs);
+       dev_dbg(qce->dev, "%s is registered\n", alg->cra_name);
+       return 0;
+}
+
+static void qce_ablkcipher_unregister(struct qce_device *qce)
+{
+       struct qce_alg_template *tmpl, *n;
+
+       list_for_each_entry_safe(tmpl, n, &ablkcipher_algs, entry) {
+               crypto_unregister_alg(&tmpl->alg.crypto);
+               list_del(&tmpl->entry);
+               kfree(tmpl);
+       }
+}
+
+static int qce_ablkcipher_register(struct qce_device *qce)
+{
+       int ret, i;
+
+       for (i = 0; i < ARRAY_SIZE(ablkcipher_def); i++) {
+               ret = qce_ablkcipher_register_one(&ablkcipher_def[i], qce);
+               if (ret)
+                       goto err;
+       }
+
+       return 0;
+err:
+       qce_ablkcipher_unregister(qce);
+       return ret;
+}
+
+const struct qce_algo_ops ablkcipher_ops = {
+       .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
+       .register_algs = qce_ablkcipher_register,
+       .unregister_algs = qce_ablkcipher_unregister,
+       .async_req_handle = qce_ablkcipher_async_req_handle,
+};
diff --git a/drivers/crypto/qce/cipher.h b/drivers/crypto/qce/cipher.h
new file mode 100644 (file)
index 0000000..d5757cf
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CIPHER_H_
+#define _CIPHER_H_
+
+#include "common.h"
+#include "core.h"
+
+#define QCE_MAX_KEY_SIZE       64
+
+struct qce_cipher_ctx {
+       u8 enc_key[QCE_MAX_KEY_SIZE];
+       unsigned int enc_keylen;
+       struct crypto_ablkcipher *fallback;
+};
+
+/**
+ * struct qce_cipher_reqctx - holds private cipher objects per request
+ * @flags: operation flags
+ * @iv: pointer to the IV
+ * @ivsize: IV size
+ * @src_nents: source entries
+ * @dst_nents: destination entries
+ * @src_chained: is source chained
+ * @dst_chained: is destination chained
+ * @result_sg: scatterlist used for result buffer
+ * @dst_tbl: destination sg table
+ * @dst_sg: destination sg pointer table beginning
+ * @src_tbl: source sg table
+ * @src_sg: source sg pointer table beginning;
+ * @cryptlen: crypto length
+ */
+struct qce_cipher_reqctx {
+       unsigned long flags;
+       u8 *iv;
+       unsigned int ivsize;
+       int src_nents;
+       int dst_nents;
+       bool src_chained;
+       bool dst_chained;
+       struct scatterlist result_sg;
+       struct sg_table dst_tbl;
+       struct scatterlist *dst_sg;
+       struct sg_table src_tbl;
+       struct scatterlist *src_sg;
+       unsigned int cryptlen;
+};
+
+static inline struct qce_alg_template *to_cipher_tmpl(struct crypto_tfm *tfm)
+{
+       struct crypto_alg *alg = tfm->__crt_alg;
+       return container_of(alg, struct qce_alg_template, alg.crypto);
+}
+
+extern const struct qce_algo_ops ablkcipher_ops;
+
+#endif /* _CIPHER_H_ */
diff --git a/drivers/crypto/qce/common.c b/drivers/crypto/qce/common.c
new file mode 100644 (file)
index 0000000..1fb5fde
--- /dev/null
@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+
+#include "cipher.h"
+#include "common.h"
+#include "core.h"
+#include "regs-v5.h"
+#include "sha.h"
+
+#define QCE_SECTOR_SIZE                512
+
+static inline u32 qce_read(struct qce_device *qce, u32 offset)
+{
+       return readl(qce->base + offset);
+}
+
+static inline void qce_write(struct qce_device *qce, u32 offset, u32 val)
+{
+       writel(val, qce->base + offset);
+}
+
+static inline void qce_write_array(struct qce_device *qce, u32 offset,
+                                  const u32 *val, unsigned int len)
+{
+       int i;
+
+       for (i = 0; i < len; i++)
+               qce_write(qce, offset + i * sizeof(u32), val[i]);
+}
+
+static inline void
+qce_clear_array(struct qce_device *qce, u32 offset, unsigned int len)
+{
+       int i;
+
+       for (i = 0; i < len; i++)
+               qce_write(qce, offset + i * sizeof(u32), 0);
+}
+
+static u32 qce_encr_cfg(unsigned long flags, u32 aes_key_size)
+{
+       u32 cfg = 0;
+
+       if (IS_AES(flags)) {
+               if (aes_key_size == AES_KEYSIZE_128)
+                       cfg |= ENCR_KEY_SZ_AES128 << ENCR_KEY_SZ_SHIFT;
+               else if (aes_key_size == AES_KEYSIZE_256)
+                       cfg |= ENCR_KEY_SZ_AES256 << ENCR_KEY_SZ_SHIFT;
+       }
+
+       if (IS_AES(flags))
+               cfg |= ENCR_ALG_AES << ENCR_ALG_SHIFT;
+       else if (IS_DES(flags) || IS_3DES(flags))
+               cfg |= ENCR_ALG_DES << ENCR_ALG_SHIFT;
+
+       if (IS_DES(flags))
+               cfg |= ENCR_KEY_SZ_DES << ENCR_KEY_SZ_SHIFT;
+
+       if (IS_3DES(flags))
+               cfg |= ENCR_KEY_SZ_3DES << ENCR_KEY_SZ_SHIFT;
+
+       switch (flags & QCE_MODE_MASK) {
+       case QCE_MODE_ECB:
+               cfg |= ENCR_MODE_ECB << ENCR_MODE_SHIFT;
+               break;
+       case QCE_MODE_CBC:
+               cfg |= ENCR_MODE_CBC << ENCR_MODE_SHIFT;
+               break;
+       case QCE_MODE_CTR:
+               cfg |= ENCR_MODE_CTR << ENCR_MODE_SHIFT;
+               break;
+       case QCE_MODE_XTS:
+               cfg |= ENCR_MODE_XTS << ENCR_MODE_SHIFT;
+               break;
+       case QCE_MODE_CCM:
+               cfg |= ENCR_MODE_CCM << ENCR_MODE_SHIFT;
+               cfg |= LAST_CCM_XFR << LAST_CCM_SHIFT;
+               break;
+       default:
+               return ~0;
+       }
+
+       return cfg;
+}
+
+static u32 qce_auth_cfg(unsigned long flags, u32 key_size)
+{
+       u32 cfg = 0;
+
+       if (IS_AES(flags) && (IS_CCM(flags) || IS_CMAC(flags)))
+               cfg |= AUTH_ALG_AES << AUTH_ALG_SHIFT;
+       else
+               cfg |= AUTH_ALG_SHA << AUTH_ALG_SHIFT;
+
+       if (IS_CCM(flags) || IS_CMAC(flags)) {
+               if (key_size == AES_KEYSIZE_128)
+                       cfg |= AUTH_KEY_SZ_AES128 << AUTH_KEY_SIZE_SHIFT;
+               else if (key_size == AES_KEYSIZE_256)
+                       cfg |= AUTH_KEY_SZ_AES256 << AUTH_KEY_SIZE_SHIFT;
+       }
+
+       if (IS_SHA1(flags) || IS_SHA1_HMAC(flags))
+               cfg |= AUTH_SIZE_SHA1 << AUTH_SIZE_SHIFT;
+       else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags))
+               cfg |= AUTH_SIZE_SHA256 << AUTH_SIZE_SHIFT;
+       else if (IS_CMAC(flags))
+               cfg |= AUTH_SIZE_ENUM_16_BYTES << AUTH_SIZE_SHIFT;
+
+       if (IS_SHA1(flags) || IS_SHA256(flags))
+               cfg |= AUTH_MODE_HASH << AUTH_MODE_SHIFT;
+       else if (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags) ||
+                IS_CBC(flags) || IS_CTR(flags))
+               cfg |= AUTH_MODE_HMAC << AUTH_MODE_SHIFT;
+       else if (IS_AES(flags) && IS_CCM(flags))
+               cfg |= AUTH_MODE_CCM << AUTH_MODE_SHIFT;
+       else if (IS_AES(flags) && IS_CMAC(flags))
+               cfg |= AUTH_MODE_CMAC << AUTH_MODE_SHIFT;
+
+       if (IS_SHA(flags) || IS_SHA_HMAC(flags))
+               cfg |= AUTH_POS_BEFORE << AUTH_POS_SHIFT;
+
+       if (IS_CCM(flags))
+               cfg |= QCE_MAX_NONCE_WORDS << AUTH_NONCE_NUM_WORDS_SHIFT;
+
+       if (IS_CBC(flags) || IS_CTR(flags) || IS_CCM(flags) ||
+           IS_CMAC(flags))
+               cfg |= BIT(AUTH_LAST_SHIFT) | BIT(AUTH_FIRST_SHIFT);
+
+       return cfg;
+}
+
+static u32 qce_config_reg(struct qce_device *qce, int little)
+{
+       u32 beats = (qce->burst_size >> 3) - 1;
+       u32 pipe_pair = qce->pipe_pair_id;
+       u32 config;
+
+       config = (beats << REQ_SIZE_SHIFT) & REQ_SIZE_MASK;
+       config |= BIT(MASK_DOUT_INTR_SHIFT) | BIT(MASK_DIN_INTR_SHIFT) |
+                 BIT(MASK_OP_DONE_INTR_SHIFT) | BIT(MASK_ERR_INTR_SHIFT);
+       config |= (pipe_pair << PIPE_SET_SELECT_SHIFT) & PIPE_SET_SELECT_MASK;
+       config &= ~HIGH_SPD_EN_N_SHIFT;
+
+       if (little)
+               config |= BIT(LITTLE_ENDIAN_MODE_SHIFT);
+
+       return config;
+}
+
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len)
+{
+       __be32 *d = dst;
+       const u8 *s = src;
+       unsigned int n;
+
+       n = len / sizeof(u32);
+       for (; n > 0; n--) {
+               *d = cpu_to_be32p((const __u32 *) s);
+               s += sizeof(__u32);
+               d++;
+       }
+}
+
+static void qce_xts_swapiv(__be32 *dst, const u8 *src, unsigned int ivsize)
+{
+       u8 swap[QCE_AES_IV_LENGTH];
+       u32 i, j;
+
+       if (ivsize > QCE_AES_IV_LENGTH)
+               return;
+
+       memset(swap, 0, QCE_AES_IV_LENGTH);
+
+       for (i = (QCE_AES_IV_LENGTH - ivsize), j = ivsize - 1;
+            i < QCE_AES_IV_LENGTH; i++, j--)
+               swap[i] = src[j];
+
+       qce_cpu_to_be32p_array(dst, swap, QCE_AES_IV_LENGTH);
+}
+
+static void qce_xtskey(struct qce_device *qce, const u8 *enckey,
+                      unsigned int enckeylen, unsigned int cryptlen)
+{
+       u32 xtskey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(u32)] = {0};
+       unsigned int xtsklen = enckeylen / (2 * sizeof(u32));
+       unsigned int xtsdusize;
+
+       qce_cpu_to_be32p_array((__be32 *)xtskey, enckey + enckeylen / 2,
+                              enckeylen / 2);
+       qce_write_array(qce, REG_ENCR_XTS_KEY0, xtskey, xtsklen);
+
+       /* xts du size 512B */
+       xtsdusize = min_t(u32, QCE_SECTOR_SIZE, cryptlen);
+       qce_write(qce, REG_ENCR_XTS_DU_SIZE, xtsdusize);
+}
+
+static void qce_setup_config(struct qce_device *qce)
+{
+       u32 config;
+
+       /* get big endianness */
+       config = qce_config_reg(qce, 0);
+
+       /* clear status */
+       qce_write(qce, REG_STATUS, 0);
+       qce_write(qce, REG_CONFIG, config);
+}
+
+static inline void qce_crypto_go(struct qce_device *qce)
+{
+       qce_write(qce, REG_GOPROC, BIT(GO_SHIFT) | BIT(RESULTS_DUMP_SHIFT));
+}
+
+static int qce_setup_regs_ahash(struct crypto_async_request *async_req,
+                               u32 totallen, u32 offset)
+{
+       struct ahash_request *req = ahash_request_cast(async_req);
+       struct crypto_ahash *ahash = __crypto_ahash_cast(async_req->tfm);
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+       struct qce_device *qce = tmpl->qce;
+       unsigned int digestsize = crypto_ahash_digestsize(ahash);
+       unsigned int blocksize = crypto_tfm_alg_blocksize(async_req->tfm);
+       __be32 auth[SHA256_DIGEST_SIZE / sizeof(__be32)] = {0};
+       __be32 mackey[QCE_SHA_HMAC_KEY_SIZE / sizeof(__be32)] = {0};
+       u32 auth_cfg = 0, config;
+       unsigned int iv_words;
+
+       /* if not the last, the size has to be on the block boundary */
+       if (!rctx->last_blk && req->nbytes % blocksize)
+               return -EINVAL;
+
+       qce_setup_config(qce);
+
+       if (IS_CMAC(rctx->flags)) {
+               qce_write(qce, REG_AUTH_SEG_CFG, 0);
+               qce_write(qce, REG_ENCR_SEG_CFG, 0);
+               qce_write(qce, REG_ENCR_SEG_SIZE, 0);
+               qce_clear_array(qce, REG_AUTH_IV0, 16);
+               qce_clear_array(qce, REG_AUTH_KEY0, 16);
+               qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
+
+               auth_cfg = qce_auth_cfg(rctx->flags, rctx->authklen);
+       }
+
+       if (IS_SHA_HMAC(rctx->flags) || IS_CMAC(rctx->flags)) {
+               u32 authkey_words = rctx->authklen / sizeof(u32);
+
+               qce_cpu_to_be32p_array(mackey, rctx->authkey, rctx->authklen);
+               qce_write_array(qce, REG_AUTH_KEY0, (u32 *)mackey,
+                               authkey_words);
+       }
+
+       if (IS_CMAC(rctx->flags))
+               goto go_proc;
+
+       if (rctx->first_blk)
+               memcpy(auth, rctx->digest, digestsize);
+       else
+               qce_cpu_to_be32p_array(auth, rctx->digest, digestsize);
+
+       iv_words = (IS_SHA1(rctx->flags) || IS_SHA1_HMAC(rctx->flags)) ? 5 : 8;
+       qce_write_array(qce, REG_AUTH_IV0, (u32 *)auth, iv_words);
+
+       if (rctx->first_blk)
+               qce_clear_array(qce, REG_AUTH_BYTECNT0, 4);
+       else
+               qce_write_array(qce, REG_AUTH_BYTECNT0,
+                               (u32 *)rctx->byte_count, 2);
+
+       auth_cfg = qce_auth_cfg(rctx->flags, 0);
+
+       if (rctx->last_blk)
+               auth_cfg |= BIT(AUTH_LAST_SHIFT);
+       else
+               auth_cfg &= ~BIT(AUTH_LAST_SHIFT);
+
+       if (rctx->first_blk)
+               auth_cfg |= BIT(AUTH_FIRST_SHIFT);
+       else
+               auth_cfg &= ~BIT(AUTH_FIRST_SHIFT);
+
+go_proc:
+       qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
+       qce_write(qce, REG_AUTH_SEG_SIZE, req->nbytes);
+       qce_write(qce, REG_AUTH_SEG_START, 0);
+       qce_write(qce, REG_ENCR_SEG_CFG, 0);
+       qce_write(qce, REG_SEG_SIZE, req->nbytes);
+
+       /* get little endianness */
+       config = qce_config_reg(qce, 1);
+       qce_write(qce, REG_CONFIG, config);
+
+       qce_crypto_go(qce);
+
+       return 0;
+}
+
+static int qce_setup_regs_ablkcipher(struct crypto_async_request *async_req,
+                                    u32 totallen, u32 offset)
+{
+       struct ablkcipher_request *req = ablkcipher_request_cast(async_req);
+       struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req);
+       struct qce_cipher_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+       struct qce_alg_template *tmpl = to_cipher_tmpl(async_req->tfm);
+       struct qce_device *qce = tmpl->qce;
+       __be32 enckey[QCE_MAX_CIPHER_KEY_SIZE / sizeof(__be32)] = {0};
+       __be32 enciv[QCE_MAX_IV_SIZE / sizeof(__be32)] = {0};
+       unsigned int enckey_words, enciv_words;
+       unsigned int keylen;
+       u32 encr_cfg = 0, auth_cfg = 0, config;
+       unsigned int ivsize = rctx->ivsize;
+       unsigned long flags = rctx->flags;
+
+       qce_setup_config(qce);
+
+       if (IS_XTS(flags))
+               keylen = ctx->enc_keylen / 2;
+       else
+               keylen = ctx->enc_keylen;
+
+       qce_cpu_to_be32p_array(enckey, ctx->enc_key, keylen);
+       enckey_words = keylen / sizeof(u32);
+
+       qce_write(qce, REG_AUTH_SEG_CFG, auth_cfg);
+
+       encr_cfg = qce_encr_cfg(flags, keylen);
+
+       if (IS_DES(flags)) {
+               enciv_words = 2;
+               enckey_words = 2;
+       } else if (IS_3DES(flags)) {
+               enciv_words = 2;
+               enckey_words = 6;
+       } else if (IS_AES(flags)) {
+               if (IS_XTS(flags))
+                       qce_xtskey(qce, ctx->enc_key, ctx->enc_keylen,
+                                  rctx->cryptlen);
+               enciv_words = 4;
+       } else {
+               return -EINVAL;
+       }
+
+       qce_write_array(qce, REG_ENCR_KEY0, (u32 *)enckey, enckey_words);
+
+       if (!IS_ECB(flags)) {
+               if (IS_XTS(flags))
+                       qce_xts_swapiv(enciv, rctx->iv, ivsize);
+               else
+                       qce_cpu_to_be32p_array(enciv, rctx->iv, ivsize);
+
+               qce_write_array(qce, REG_CNTR0_IV0, (u32 *)enciv, enciv_words);
+       }
+
+       if (IS_ENCRYPT(flags))
+               encr_cfg |= BIT(ENCODE_SHIFT);
+
+       qce_write(qce, REG_ENCR_SEG_CFG, encr_cfg);
+       qce_write(qce, REG_ENCR_SEG_SIZE, rctx->cryptlen);
+       qce_write(qce, REG_ENCR_SEG_START, offset & 0xffff);
+
+       if (IS_CTR(flags)) {
+               qce_write(qce, REG_CNTR_MASK, ~0);
+               qce_write(qce, REG_CNTR_MASK0, ~0);
+               qce_write(qce, REG_CNTR_MASK1, ~0);
+               qce_write(qce, REG_CNTR_MASK2, ~0);
+       }
+
+       qce_write(qce, REG_SEG_SIZE, totallen);
+
+       /* get little endianness */
+       config = qce_config_reg(qce, 1);
+       qce_write(qce, REG_CONFIG, config);
+
+       qce_crypto_go(qce);
+
+       return 0;
+}
+
+int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
+             u32 offset)
+{
+       switch (type) {
+       case CRYPTO_ALG_TYPE_ABLKCIPHER:
+               return qce_setup_regs_ablkcipher(async_req, totallen, offset);
+       case CRYPTO_ALG_TYPE_AHASH:
+               return qce_setup_regs_ahash(async_req, totallen, offset);
+       default:
+               return -EINVAL;
+       }
+}
+
+#define STATUS_ERRORS  \
+               (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
+
+int qce_check_status(struct qce_device *qce, u32 *status)
+{
+       int ret = 0;
+
+       *status = qce_read(qce, REG_STATUS);
+
+       /*
+        * Don't use result dump status. The operation may not be complete.
+        * Instead, use the status we just read from device. In case, we need to
+        * use result_status from result dump the result_status needs to be byte
+        * swapped, since we set the device to little endian.
+        */
+       if (*status & STATUS_ERRORS || !(*status & BIT(OPERATION_DONE_SHIFT)))
+               ret = -ENXIO;
+
+       return ret;
+}
+
+void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step)
+{
+       u32 val;
+
+       val = qce_read(qce, REG_VERSION);
+       *major = (val & CORE_MAJOR_REV_MASK) >> CORE_MAJOR_REV_SHIFT;
+       *minor = (val & CORE_MINOR_REV_MASK) >> CORE_MINOR_REV_SHIFT;
+       *step = (val & CORE_STEP_REV_MASK) >> CORE_STEP_REV_SHIFT;
+}
diff --git a/drivers/crypto/qce/common.h b/drivers/crypto/qce/common.h
new file mode 100644 (file)
index 0000000..a4addd4
--- /dev/null
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _COMMON_H_
+#define _COMMON_H_
+
+#include <linux/crypto.h>
+#include <linux/types.h>
+#include <crypto/aes.h>
+#include <crypto/hash.h>
+
+/* key size in bytes */
+#define QCE_SHA_HMAC_KEY_SIZE          64
+#define QCE_MAX_CIPHER_KEY_SIZE                AES_KEYSIZE_256
+
+/* IV length in bytes */
+#define QCE_AES_IV_LENGTH              AES_BLOCK_SIZE
+/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define QCE_MAX_IV_SIZE                        AES_BLOCK_SIZE
+
+/* maximum nonce bytes  */
+#define QCE_MAX_NONCE                  16
+#define QCE_MAX_NONCE_WORDS            (QCE_MAX_NONCE / sizeof(u32))
+
+/* burst size alignment requirement */
+#define QCE_MAX_ALIGN_SIZE             64
+
+/* cipher algorithms */
+#define QCE_ALG_DES                    BIT(0)
+#define QCE_ALG_3DES                   BIT(1)
+#define QCE_ALG_AES                    BIT(2)
+
+/* hash and hmac algorithms */
+#define QCE_HASH_SHA1                  BIT(3)
+#define QCE_HASH_SHA256                        BIT(4)
+#define QCE_HASH_SHA1_HMAC             BIT(5)
+#define QCE_HASH_SHA256_HMAC           BIT(6)
+#define QCE_HASH_AES_CMAC              BIT(7)
+
+/* cipher modes */
+#define QCE_MODE_CBC                   BIT(8)
+#define QCE_MODE_ECB                   BIT(9)
+#define QCE_MODE_CTR                   BIT(10)
+#define QCE_MODE_XTS                   BIT(11)
+#define QCE_MODE_CCM                   BIT(12)
+#define QCE_MODE_MASK                  GENMASK(12, 8)
+
+/* cipher encryption/decryption operations */
+#define QCE_ENCRYPT                    BIT(13)
+#define QCE_DECRYPT                    BIT(14)
+
+#define IS_DES(flags)                  (flags & QCE_ALG_DES)
+#define IS_3DES(flags)                 (flags & QCE_ALG_3DES)
+#define IS_AES(flags)                  (flags & QCE_ALG_AES)
+
+#define IS_SHA1(flags)                 (flags & QCE_HASH_SHA1)
+#define IS_SHA256(flags)               (flags & QCE_HASH_SHA256)
+#define IS_SHA1_HMAC(flags)            (flags & QCE_HASH_SHA1_HMAC)
+#define IS_SHA256_HMAC(flags)          (flags & QCE_HASH_SHA256_HMAC)
+#define IS_CMAC(flags)                 (flags & QCE_HASH_AES_CMAC)
+#define IS_SHA(flags)                  (IS_SHA1(flags) || IS_SHA256(flags))
+#define IS_SHA_HMAC(flags)             \
+               (IS_SHA1_HMAC(flags) || IS_SHA256_HMAC(flags))
+
+#define IS_CBC(mode)                   (mode & QCE_MODE_CBC)
+#define IS_ECB(mode)                   (mode & QCE_MODE_ECB)
+#define IS_CTR(mode)                   (mode & QCE_MODE_CTR)
+#define IS_XTS(mode)                   (mode & QCE_MODE_XTS)
+#define IS_CCM(mode)                   (mode & QCE_MODE_CCM)
+
+#define IS_ENCRYPT(dir)                        (dir & QCE_ENCRYPT)
+#define IS_DECRYPT(dir)                        (dir & QCE_DECRYPT)
+
+struct qce_alg_template {
+       struct list_head entry;
+       u32 crypto_alg_type;
+       unsigned long alg_flags;
+       const u32 *std_iv;
+       union {
+               struct crypto_alg crypto;
+               struct ahash_alg ahash;
+       } alg;
+       struct qce_device *qce;
+};
+
+void qce_cpu_to_be32p_array(__be32 *dst, const u8 *src, unsigned int len);
+int qce_check_status(struct qce_device *qce, u32 *status);
+void qce_get_version(struct qce_device *qce, u32 *major, u32 *minor, u32 *step);
+int qce_start(struct crypto_async_request *async_req, u32 type, u32 totallen,
+             u32 offset);
+
+#endif /* _COMMON_H_ */
diff --git a/drivers/crypto/qce/core.c b/drivers/crypto/qce/core.c
new file mode 100644 (file)
index 0000000..33ae354
--- /dev/null
@@ -0,0 +1,286 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <crypto/algapi.h>
+#include <crypto/internal/hash.h>
+#include <crypto/sha.h>
+
+#include "core.h"
+#include "cipher.h"
+#include "sha.h"
+
+#define QCE_MAJOR_VERSION5     0x05
+#define QCE_QUEUE_LENGTH       1
+
+static const struct qce_algo_ops *qce_ops[] = {
+       &ablkcipher_ops,
+       &ahash_ops,
+};
+
+static void qce_unregister_algs(struct qce_device *qce)
+{
+       const struct qce_algo_ops *ops;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+               ops = qce_ops[i];
+               ops->unregister_algs(qce);
+       }
+}
+
+static int qce_register_algs(struct qce_device *qce)
+{
+       const struct qce_algo_ops *ops;
+       int i, ret = -ENODEV;
+
+       for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+               ops = qce_ops[i];
+               ret = ops->register_algs(qce);
+               if (ret)
+                       break;
+       }
+
+       return ret;
+}
+
+static int qce_handle_request(struct crypto_async_request *async_req)
+{
+       int ret = -EINVAL, i;
+       const struct qce_algo_ops *ops;
+       u32 type = crypto_tfm_alg_type(async_req->tfm);
+
+       for (i = 0; i < ARRAY_SIZE(qce_ops); i++) {
+               ops = qce_ops[i];
+               if (type != ops->type)
+                       continue;
+               ret = ops->async_req_handle(async_req);
+               break;
+       }
+
+       return ret;
+}
+
+static int qce_handle_queue(struct qce_device *qce,
+                           struct crypto_async_request *req)
+{
+       struct crypto_async_request *async_req, *backlog;
+       unsigned long flags;
+       int ret = 0, err;
+
+       spin_lock_irqsave(&qce->lock, flags);
+
+       if (req)
+               ret = crypto_enqueue_request(&qce->queue, req);
+
+       /* busy, do not dequeue request */
+       if (qce->req) {
+               spin_unlock_irqrestore(&qce->lock, flags);
+               return ret;
+       }
+
+       backlog = crypto_get_backlog(&qce->queue);
+       async_req = crypto_dequeue_request(&qce->queue);
+       if (async_req)
+               qce->req = async_req;
+
+       spin_unlock_irqrestore(&qce->lock, flags);
+
+       if (!async_req)
+               return ret;
+
+       if (backlog) {
+               spin_lock_bh(&qce->lock);
+               backlog->complete(backlog, -EINPROGRESS);
+               spin_unlock_bh(&qce->lock);
+       }
+
+       err = qce_handle_request(async_req);
+       if (err) {
+               qce->result = err;
+               tasklet_schedule(&qce->done_tasklet);
+       }
+
+       return ret;
+}
+
+static void qce_tasklet_req_done(unsigned long data)
+{
+       struct qce_device *qce = (struct qce_device *)data;
+       struct crypto_async_request *req;
+       unsigned long flags;
+
+       spin_lock_irqsave(&qce->lock, flags);
+       req = qce->req;
+       qce->req = NULL;
+       spin_unlock_irqrestore(&qce->lock, flags);
+
+       if (req)
+               req->complete(req, qce->result);
+
+       qce_handle_queue(qce, NULL);
+}
+
+static int qce_async_request_enqueue(struct qce_device *qce,
+                                    struct crypto_async_request *req)
+{
+       return qce_handle_queue(qce, req);
+}
+
+static void qce_async_request_done(struct qce_device *qce, int ret)
+{
+       qce->result = ret;
+       tasklet_schedule(&qce->done_tasklet);
+}
+
+static int qce_check_version(struct qce_device *qce)
+{
+       u32 major, minor, step;
+
+       qce_get_version(qce, &major, &minor, &step);
+
+       /*
+        * the driver does not support v5 with minor 0 because it has special
+        * alignment requirements.
+        */
+       if (major != QCE_MAJOR_VERSION5 || minor == 0)
+               return -ENODEV;
+
+       qce->burst_size = QCE_BAM_BURST_SIZE;
+       qce->pipe_pair_id = 1;
+
+       dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n",
+               major, minor, step);
+
+       return 0;
+}
+
+static int qce_crypto_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct qce_device *qce;
+       struct resource *res;
+       int ret;
+
+       qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL);
+       if (!qce)
+               return -ENOMEM;
+
+       qce->dev = dev;
+       platform_set_drvdata(pdev, qce);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       qce->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(qce->base))
+               return PTR_ERR(qce->base);
+
+       ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+       if (ret < 0)
+               return ret;
+
+       qce->core = devm_clk_get(qce->dev, "core");
+       if (IS_ERR(qce->core))
+               return PTR_ERR(qce->core);
+
+       qce->iface = devm_clk_get(qce->dev, "iface");
+       if (IS_ERR(qce->iface))
+               return PTR_ERR(qce->iface);
+
+       qce->bus = devm_clk_get(qce->dev, "bus");
+       if (IS_ERR(qce->bus))
+               return PTR_ERR(qce->bus);
+
+       ret = clk_prepare_enable(qce->core);
+       if (ret)
+               return ret;
+
+       ret = clk_prepare_enable(qce->iface);
+       if (ret)
+               goto err_clks_core;
+
+       ret = clk_prepare_enable(qce->bus);
+       if (ret)
+               goto err_clks_iface;
+
+       ret = qce_dma_request(qce->dev, &qce->dma);
+       if (ret)
+               goto err_clks;
+
+       ret = qce_check_version(qce);
+       if (ret)
+               goto err_clks;
+
+       spin_lock_init(&qce->lock);
+       tasklet_init(&qce->done_tasklet, qce_tasklet_req_done,
+                    (unsigned long)qce);
+       crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH);
+
+       qce->async_req_enqueue = qce_async_request_enqueue;
+       qce->async_req_done = qce_async_request_done;
+
+       ret = qce_register_algs(qce);
+       if (ret)
+               goto err_dma;
+
+       return 0;
+
+err_dma:
+       qce_dma_release(&qce->dma);
+err_clks:
+       clk_disable_unprepare(qce->bus);
+err_clks_iface:
+       clk_disable_unprepare(qce->iface);
+err_clks_core:
+       clk_disable_unprepare(qce->core);
+       return ret;
+}
+
+static int qce_crypto_remove(struct platform_device *pdev)
+{
+       struct qce_device *qce = platform_get_drvdata(pdev);
+
+       tasklet_kill(&qce->done_tasklet);
+       qce_unregister_algs(qce);
+       qce_dma_release(&qce->dma);
+       clk_disable_unprepare(qce->bus);
+       clk_disable_unprepare(qce->iface);
+       clk_disable_unprepare(qce->core);
+       return 0;
+}
+
+static const struct of_device_id qce_crypto_of_match[] = {
+       { .compatible = "qcom,crypto-v5.1", },
+       {}
+};
+MODULE_DEVICE_TABLE(of, qce_crypto_of_match);
+
+static struct platform_driver qce_crypto_driver = {
+       .probe = qce_crypto_probe,
+       .remove = qce_crypto_remove,
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = KBUILD_MODNAME,
+               .of_match_table = qce_crypto_of_match,
+       },
+};
+module_platform_driver(qce_crypto_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Qualcomm crypto engine driver");
+MODULE_ALIAS("platform:" KBUILD_MODNAME);
+MODULE_AUTHOR("The Linux Foundation");
diff --git a/drivers/crypto/qce/core.h b/drivers/crypto/qce/core.h
new file mode 100644 (file)
index 0000000..549965d
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CORE_H_
+#define _CORE_H_
+
+#include "dma.h"
+
+/**
+ * struct qce_device - crypto engine device structure
+ * @queue: crypto request queue
+ * @lock: the lock protects queue and req
+ * @done_tasklet: done tasklet object
+ * @req: current active request
+ * @result: result of current transform
+ * @base: virtual IO base
+ * @dev: pointer to device structure
+ * @core: core device clock
+ * @iface: interface clock
+ * @bus: bus clock
+ * @dma: pointer to dma data
+ * @burst_size: the crypto burst size
+ * @pipe_pair_id: which pipe pair id the device using
+ * @async_req_enqueue: invoked by every algorithm to enqueue a request
+ * @async_req_done: invoked by every algorithm to finish its request
+ */
+struct qce_device {
+       struct crypto_queue queue;
+       spinlock_t lock;
+       struct tasklet_struct done_tasklet;
+       struct crypto_async_request *req;
+       int result;
+       void __iomem *base;
+       struct device *dev;
+       struct clk *core, *iface, *bus;
+       struct qce_dma_data dma;
+       int burst_size;
+       unsigned int pipe_pair_id;
+       int (*async_req_enqueue)(struct qce_device *qce,
+                                struct crypto_async_request *req);
+       void (*async_req_done)(struct qce_device *qce, int ret);
+};
+
+/**
+ * struct qce_algo_ops - algorithm operations per crypto type
+ * @type: should be CRYPTO_ALG_TYPE_XXX
+ * @register_algs: invoked by core to register the algorithms
+ * @unregister_algs: invoked by core to unregister the algorithms
+ * @async_req_handle: invoked by core to handle enqueued request
+ */
+struct qce_algo_ops {
+       u32 type;
+       int (*register_algs)(struct qce_device *qce);
+       void (*unregister_algs)(struct qce_device *qce);
+       int (*async_req_handle)(struct crypto_async_request *async_req);
+};
+
+#endif /* _CORE_H_ */
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
new file mode 100644 (file)
index 0000000..0fb21e1
--- /dev/null
@@ -0,0 +1,186 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dmaengine.h>
+#include <crypto/scatterwalk.h>
+
+#include "dma.h"
+
+int qce_dma_request(struct device *dev, struct qce_dma_data *dma)
+{
+       int ret;
+
+       dma->txchan = dma_request_slave_channel_reason(dev, "tx");
+       if (IS_ERR(dma->txchan))
+               return PTR_ERR(dma->txchan);
+
+       dma->rxchan = dma_request_slave_channel_reason(dev, "rx");
+       if (IS_ERR(dma->rxchan)) {
+               ret = PTR_ERR(dma->rxchan);
+               goto error_rx;
+       }
+
+       dma->result_buf = kmalloc(QCE_RESULT_BUF_SZ + QCE_IGNORE_BUF_SZ,
+                                 GFP_KERNEL);
+       if (!dma->result_buf) {
+               ret = -ENOMEM;
+               goto error_nomem;
+       }
+
+       dma->ignore_buf = dma->result_buf + QCE_RESULT_BUF_SZ;
+
+       return 0;
+error_nomem:
+       dma_release_channel(dma->rxchan);
+error_rx:
+       dma_release_channel(dma->txchan);
+       return ret;
+}
+
+void qce_dma_release(struct qce_dma_data *dma)
+{
+       dma_release_channel(dma->txchan);
+       dma_release_channel(dma->rxchan);
+       kfree(dma->result_buf);
+}
+
+int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
+             enum dma_data_direction dir, bool chained)
+{
+       int err;
+
+       if (chained) {
+               while (sg) {
+                       err = dma_map_sg(dev, sg, 1, dir);
+                       if (!err)
+                               return -EFAULT;
+                       sg = scatterwalk_sg_next(sg);
+               }
+       } else {
+               err = dma_map_sg(dev, sg, nents, dir);
+               if (!err)
+                       return -EFAULT;
+       }
+
+       return nents;
+}
+
+void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
+                enum dma_data_direction dir, bool chained)
+{
+       if (chained)
+               while (sg) {
+                       dma_unmap_sg(dev, sg, 1, dir);
+                       sg = scatterwalk_sg_next(sg);
+               }
+       else
+               dma_unmap_sg(dev, sg, nents, dir);
+}
+
+int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
+{
+       struct scatterlist *sg = sglist;
+       int nents = 0;
+
+       if (chained)
+               *chained = false;
+
+       while (nbytes > 0 && sg) {
+               nents++;
+               nbytes -= sg->length;
+               if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
+                       *chained = true;
+               sg = scatterwalk_sg_next(sg);
+       }
+
+       return nents;
+}
+
+struct scatterlist *
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *new_sgl)
+{
+       struct scatterlist *sg = sgt->sgl, *sg_last = NULL;
+
+       while (sg) {
+               if (!sg_page(sg))
+                       break;
+               sg = sg_next(sg);
+       }
+
+       if (!sg)
+               return ERR_PTR(-EINVAL);
+
+       while (new_sgl && sg) {
+               sg_set_page(sg, sg_page(new_sgl), new_sgl->length,
+                           new_sgl->offset);
+               sg_last = sg;
+               sg = sg_next(sg);
+               new_sgl = sg_next(new_sgl);
+       }
+
+       return sg_last;
+}
+
+static int qce_dma_prep_sg(struct dma_chan *chan, struct scatterlist *sg,
+                          int nents, unsigned long flags,
+                          enum dma_transfer_direction dir,
+                          dma_async_tx_callback cb, void *cb_param)
+{
+       struct dma_async_tx_descriptor *desc;
+       dma_cookie_t cookie;
+
+       if (!sg || !nents)
+               return -EINVAL;
+
+       desc = dmaengine_prep_slave_sg(chan, sg, nents, dir, flags);
+       if (!desc)
+               return -EINVAL;
+
+       desc->callback = cb;
+       desc->callback_param = cb_param;
+       cookie = dmaengine_submit(desc);
+
+       return dma_submit_error(cookie);
+}
+
+int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *rx_sg,
+                    int rx_nents, struct scatterlist *tx_sg, int tx_nents,
+                    dma_async_tx_callback cb, void *cb_param)
+{
+       struct dma_chan *rxchan = dma->rxchan;
+       struct dma_chan *txchan = dma->txchan;
+       unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+       int ret;
+
+       ret = qce_dma_prep_sg(rxchan, rx_sg, rx_nents, flags, DMA_MEM_TO_DEV,
+                            NULL, NULL);
+       if (ret)
+               return ret;
+
+       return qce_dma_prep_sg(txchan, tx_sg, tx_nents, flags, DMA_DEV_TO_MEM,
+                              cb, cb_param);
+}
+
+void qce_dma_issue_pending(struct qce_dma_data *dma)
+{
+       dma_async_issue_pending(dma->rxchan);
+       dma_async_issue_pending(dma->txchan);
+}
+
+int qce_dma_terminate_all(struct qce_dma_data *dma)
+{
+       int ret;
+
+       ret = dmaengine_terminate_all(dma->rxchan);
+       return ret ?: dmaengine_terminate_all(dma->txchan);
+}
diff --git a/drivers/crypto/qce/dma.h b/drivers/crypto/qce/dma.h
new file mode 100644 (file)
index 0000000..805e378
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DMA_H_
+#define _DMA_H_
+
+/* maximum data transfer block size between BAM and CE */
+#define QCE_BAM_BURST_SIZE             64
+
+#define QCE_AUTHIV_REGS_CNT            16
+#define QCE_AUTH_BYTECOUNT_REGS_CNT    4
+#define QCE_CNTRIV_REGS_CNT            4
+
+struct qce_result_dump {
+       u32 auth_iv[QCE_AUTHIV_REGS_CNT];
+       u32 auth_byte_count[QCE_AUTH_BYTECOUNT_REGS_CNT];
+       u32 encr_cntr_iv[QCE_CNTRIV_REGS_CNT];
+       u32 status;
+       u32 status2;
+};
+
+#define QCE_IGNORE_BUF_SZ      (2 * QCE_BAM_BURST_SIZE)
+#define QCE_RESULT_BUF_SZ      \
+               ALIGN(sizeof(struct qce_result_dump), QCE_BAM_BURST_SIZE)
+
+struct qce_dma_data {
+       struct dma_chan *txchan;
+       struct dma_chan *rxchan;
+       struct qce_result_dump *result_buf;
+       void *ignore_buf;
+};
+
+int qce_dma_request(struct device *dev, struct qce_dma_data *dma);
+void qce_dma_release(struct qce_dma_data *dma);
+int qce_dma_prep_sgs(struct qce_dma_data *dma, struct scatterlist *sg_in,
+                    int in_ents, struct scatterlist *sg_out, int out_ents,
+                    dma_async_tx_callback cb, void *cb_param);
+void qce_dma_issue_pending(struct qce_dma_data *dma);
+int qce_dma_terminate_all(struct qce_dma_data *dma);
+int qce_countsg(struct scatterlist *sg_list, int nbytes, bool *chained);
+void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
+                enum dma_data_direction dir, bool chained);
+int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
+             enum dma_data_direction dir, bool chained);
+struct scatterlist *
+qce_sgtable_add(struct sg_table *sgt, struct scatterlist *sg_add);
+
+#endif /* _DMA_H_ */
diff --git a/drivers/crypto/qce/regs-v5.h b/drivers/crypto/qce/regs-v5.h
new file mode 100644 (file)
index 0000000..f0e19e3
--- /dev/null
@@ -0,0 +1,334 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _REGS_V5_H_
+#define _REGS_V5_H_
+
+#include <linux/bitops.h>
+
+#define REG_VERSION                    0x000
+#define REG_STATUS                     0x100
+#define REG_STATUS2                    0x104
+#define REG_ENGINES_AVAIL              0x108
+#define REG_FIFO_SIZES                 0x10c
+#define REG_SEG_SIZE                   0x110
+#define REG_GOPROC                     0x120
+#define REG_ENCR_SEG_CFG               0x200
+#define REG_ENCR_SEG_SIZE              0x204
+#define REG_ENCR_SEG_START             0x208
+#define REG_CNTR0_IV0                  0x20c
+#define REG_CNTR1_IV1                  0x210
+#define REG_CNTR2_IV2                  0x214
+#define REG_CNTR3_IV3                  0x218
+#define REG_CNTR_MASK                  0x21C
+#define REG_ENCR_CCM_INT_CNTR0         0x220
+#define REG_ENCR_CCM_INT_CNTR1         0x224
+#define REG_ENCR_CCM_INT_CNTR2         0x228
+#define REG_ENCR_CCM_INT_CNTR3         0x22c
+#define REG_ENCR_XTS_DU_SIZE           0x230
+#define REG_CNTR_MASK2                 0x234
+#define REG_CNTR_MASK1                 0x238
+#define REG_CNTR_MASK0                 0x23c
+#define REG_AUTH_SEG_CFG               0x300
+#define REG_AUTH_SEG_SIZE              0x304
+#define REG_AUTH_SEG_START             0x308
+#define REG_AUTH_IV0                   0x310
+#define REG_AUTH_IV1                   0x314
+#define REG_AUTH_IV2                   0x318
+#define REG_AUTH_IV3                   0x31c
+#define REG_AUTH_IV4                   0x320
+#define REG_AUTH_IV5                   0x324
+#define REG_AUTH_IV6                   0x328
+#define REG_AUTH_IV7                   0x32c
+#define REG_AUTH_IV8                   0x330
+#define REG_AUTH_IV9                   0x334
+#define REG_AUTH_IV10                  0x338
+#define REG_AUTH_IV11                  0x33c
+#define REG_AUTH_IV12                  0x340
+#define REG_AUTH_IV13                  0x344
+#define REG_AUTH_IV14                  0x348
+#define REG_AUTH_IV15                  0x34c
+#define REG_AUTH_INFO_NONCE0           0x350
+#define REG_AUTH_INFO_NONCE1           0x354
+#define REG_AUTH_INFO_NONCE2           0x358
+#define REG_AUTH_INFO_NONCE3           0x35c
+#define REG_AUTH_BYTECNT0              0x390
+#define REG_AUTH_BYTECNT1              0x394
+#define REG_AUTH_BYTECNT2              0x398
+#define REG_AUTH_BYTECNT3              0x39c
+#define REG_AUTH_EXP_MAC0              0x3a0
+#define REG_AUTH_EXP_MAC1              0x3a4
+#define REG_AUTH_EXP_MAC2              0x3a8
+#define REG_AUTH_EXP_MAC3              0x3ac
+#define REG_AUTH_EXP_MAC4              0x3b0
+#define REG_AUTH_EXP_MAC5              0x3b4
+#define REG_AUTH_EXP_MAC6              0x3b8
+#define REG_AUTH_EXP_MAC7              0x3bc
+#define REG_CONFIG                     0x400
+#define REG_GOPROC_QC_KEY              0x1000
+#define REG_GOPROC_OEM_KEY             0x2000
+#define REG_ENCR_KEY0                  0x3000
+#define REG_ENCR_KEY1                  0x3004
+#define REG_ENCR_KEY2                  0x3008
+#define REG_ENCR_KEY3                  0x300c
+#define REG_ENCR_KEY4                  0x3010
+#define REG_ENCR_KEY5                  0x3014
+#define REG_ENCR_KEY6                  0x3018
+#define REG_ENCR_KEY7                  0x301c
+#define REG_ENCR_XTS_KEY0              0x3020
+#define REG_ENCR_XTS_KEY1              0x3024
+#define REG_ENCR_XTS_KEY2              0x3028
+#define REG_ENCR_XTS_KEY3              0x302c
+#define REG_ENCR_XTS_KEY4              0x3030
+#define REG_ENCR_XTS_KEY5              0x3034
+#define REG_ENCR_XTS_KEY6              0x3038
+#define REG_ENCR_XTS_KEY7              0x303c
+#define REG_AUTH_KEY0                  0x3040
+#define REG_AUTH_KEY1                  0x3044
+#define REG_AUTH_KEY2                  0x3048
+#define REG_AUTH_KEY3                  0x304c
+#define REG_AUTH_KEY4                  0x3050
+#define REG_AUTH_KEY5                  0x3054
+#define REG_AUTH_KEY6                  0x3058
+#define REG_AUTH_KEY7                  0x305c
+#define REG_AUTH_KEY8                  0x3060
+#define REG_AUTH_KEY9                  0x3064
+#define REG_AUTH_KEY10                 0x3068
+#define REG_AUTH_KEY11                 0x306c
+#define REG_AUTH_KEY12                 0x3070
+#define REG_AUTH_KEY13                 0x3074
+#define REG_AUTH_KEY14                 0x3078
+#define REG_AUTH_KEY15                 0x307c
+
+/* Register bits - REG_VERSION */
+#define CORE_STEP_REV_SHIFT            0
+#define CORE_STEP_REV_MASK             GENMASK(15, 0)
+#define CORE_MINOR_REV_SHIFT           16
+#define CORE_MINOR_REV_MASK            GENMASK(23, 16)
+#define CORE_MAJOR_REV_SHIFT           24
+#define CORE_MAJOR_REV_MASK            GENMASK(31, 24)
+
+/* Register bits - REG_STATUS */
+#define MAC_FAILED_SHIFT               31
+#define DOUT_SIZE_AVAIL_SHIFT          26
+#define DOUT_SIZE_AVAIL_MASK           GENMASK(30, 26)
+#define DIN_SIZE_AVAIL_SHIFT           21
+#define DIN_SIZE_AVAIL_MASK            GENMASK(25, 21)
+#define HSD_ERR_SHIFT                  20
+#define ACCESS_VIOL_SHIFT              19
+#define PIPE_ACTIVE_ERR_SHIFT          18
+#define CFG_CHNG_ERR_SHIFT             17
+#define DOUT_ERR_SHIFT                 16
+#define DIN_ERR_SHIFT                  15
+#define AXI_ERR_SHIFT                  14
+#define CRYPTO_STATE_SHIFT             10
+#define CRYPTO_STATE_MASK              GENMASK(13, 10)
+#define ENCR_BUSY_SHIFT                        9
+#define AUTH_BUSY_SHIFT                        8
+#define DOUT_INTR_SHIFT                        7
+#define DIN_INTR_SHIFT                 6
+#define OP_DONE_INTR_SHIFT             5
+#define ERR_INTR_SHIFT                 4
+#define DOUT_RDY_SHIFT                 3
+#define DIN_RDY_SHIFT                  2
+#define OPERATION_DONE_SHIFT           1
+#define SW_ERR_SHIFT                   0
+
+/* Register bits - REG_STATUS2 */
+#define AXI_EXTRA_SHIFT                        1
+#define LOCKED_SHIFT                   2
+
+/* Register bits - REG_CONFIG */
+#define REQ_SIZE_SHIFT                 17
+#define REQ_SIZE_MASK                  GENMASK(20, 17)
+#define REQ_SIZE_ENUM_1_BEAT           0
+#define REQ_SIZE_ENUM_2_BEAT           1
+#define REQ_SIZE_ENUM_3_BEAT           2
+#define REQ_SIZE_ENUM_4_BEAT           3
+#define REQ_SIZE_ENUM_5_BEAT           4
+#define REQ_SIZE_ENUM_6_BEAT           5
+#define REQ_SIZE_ENUM_7_BEAT           6
+#define REQ_SIZE_ENUM_8_BEAT           7
+#define REQ_SIZE_ENUM_9_BEAT           8
+#define REQ_SIZE_ENUM_10_BEAT          9
+#define REQ_SIZE_ENUM_11_BEAT          10
+#define REQ_SIZE_ENUM_12_BEAT          11
+#define REQ_SIZE_ENUM_13_BEAT          12
+#define REQ_SIZE_ENUM_14_BEAT          13
+#define REQ_SIZE_ENUM_15_BEAT          14
+#define REQ_SIZE_ENUM_16_BEAT          15
+
+#define MAX_QUEUED_REQ_SHIFT           14
+#define MAX_QUEUED_REQ_MASK            GENMASK(24, 16)
+#define ENUM_1_QUEUED_REQS             0
+#define ENUM_2_QUEUED_REQS             1
+#define ENUM_3_QUEUED_REQS             2
+
+#define IRQ_ENABLES_SHIFT              10
+#define IRQ_ENABLES_MASK               GENMASK(13, 10)
+
+#define LITTLE_ENDIAN_MODE_SHIFT       9
+#define PIPE_SET_SELECT_SHIFT          5
+#define PIPE_SET_SELECT_MASK           GENMASK(8, 5)
+
+#define HIGH_SPD_EN_N_SHIFT            4
+#define MASK_DOUT_INTR_SHIFT           3
+#define MASK_DIN_INTR_SHIFT            2
+#define MASK_OP_DONE_INTR_SHIFT                1
+#define MASK_ERR_INTR_SHIFT            0
+
+/* Register bits - REG_AUTH_SEG_CFG */
+#define COMP_EXP_MAC_SHIFT             24
+#define COMP_EXP_MAC_DISABLED          0
+#define COMP_EXP_MAC_ENABLED           1
+
+#define F9_DIRECTION_SHIFT             23
+#define F9_DIRECTION_UPLINK            0
+#define F9_DIRECTION_DOWNLINK          1
+
+#define AUTH_NONCE_NUM_WORDS_SHIFT     20
+#define AUTH_NONCE_NUM_WORDS_MASK      GENMASK(22, 20)
+
+#define USE_PIPE_KEY_AUTH_SHIFT                19
+#define USE_HW_KEY_AUTH_SHIFT          18
+#define AUTH_FIRST_SHIFT               17
+#define AUTH_LAST_SHIFT                        16
+
+#define AUTH_POS_SHIFT                 14
+#define AUTH_POS_MASK                  GENMASK(15, 14)
+#define AUTH_POS_BEFORE                        0
+#define AUTH_POS_AFTER                 1
+
+#define AUTH_SIZE_SHIFT                        9
+#define AUTH_SIZE_MASK                 GENMASK(13, 9)
+#define AUTH_SIZE_SHA1                 0
+#define AUTH_SIZE_SHA256               1
+#define AUTH_SIZE_ENUM_1_BYTES         0
+#define AUTH_SIZE_ENUM_2_BYTES         1
+#define AUTH_SIZE_ENUM_3_BYTES         2
+#define AUTH_SIZE_ENUM_4_BYTES         3
+#define AUTH_SIZE_ENUM_5_BYTES         4
+#define AUTH_SIZE_ENUM_6_BYTES         5
+#define AUTH_SIZE_ENUM_7_BYTES         6
+#define AUTH_SIZE_ENUM_8_BYTES         7
+#define AUTH_SIZE_ENUM_9_BYTES         8
+#define AUTH_SIZE_ENUM_10_BYTES                9
+#define AUTH_SIZE_ENUM_11_BYTES                10
+#define AUTH_SIZE_ENUM_12_BYTES                11
+#define AUTH_SIZE_ENUM_13_BYTES                12
+#define AUTH_SIZE_ENUM_14_BYTES                13
+#define AUTH_SIZE_ENUM_15_BYTES                14
+#define AUTH_SIZE_ENUM_16_BYTES                15
+
+#define AUTH_MODE_SHIFT                        6
+#define AUTH_MODE_MASK                 GENMASK(8, 6)
+#define AUTH_MODE_HASH                 0
+#define AUTH_MODE_HMAC                 1
+#define AUTH_MODE_CCM                  0
+#define AUTH_MODE_CMAC                 1
+
+#define AUTH_KEY_SIZE_SHIFT            3
+#define AUTH_KEY_SIZE_MASK             GENMASK(5, 3)
+#define AUTH_KEY_SZ_AES128             0
+#define AUTH_KEY_SZ_AES256             2
+
+#define AUTH_ALG_SHIFT                 0
+#define AUTH_ALG_MASK                  GENMASK(2, 0)
+#define AUTH_ALG_NONE                  0
+#define AUTH_ALG_SHA                   1
+#define AUTH_ALG_AES                   2
+#define AUTH_ALG_KASUMI                        3
+#define AUTH_ALG_SNOW3G                        4
+#define AUTH_ALG_ZUC                   5
+
+/* Register bits - REG_ENCR_XTS_DU_SIZE */
+#define ENCR_XTS_DU_SIZE_SHIFT         0
+#define ENCR_XTS_DU_SIZE_MASK          GENMASK(19, 0)
+
+/* Register bits - REG_ENCR_SEG_CFG */
+#define F8_KEYSTREAM_ENABLE_SHIFT      17
+#define F8_KEYSTREAM_DISABLED          0
+#define F8_KEYSTREAM_ENABLED           1
+
+#define F8_DIRECTION_SHIFT             16
+#define F8_DIRECTION_UPLINK            0
+#define F8_DIRECTION_DOWNLINK          1
+
+#define USE_PIPE_KEY_ENCR_SHIFT                15
+#define USE_PIPE_KEY_ENCR_ENABLED      1
+#define USE_KEY_REGISTERS              0
+
+#define USE_HW_KEY_ENCR_SHIFT          14
+#define USE_KEY_REG                    0
+#define USE_HW_KEY                     1
+
+#define LAST_CCM_SHIFT                 13
+#define LAST_CCM_XFR                   1
+#define INTERM_CCM_XFR                 0
+
+#define CNTR_ALG_SHIFT                 11
+#define CNTR_ALG_MASK                  GENMASK(12, 11)
+#define CNTR_ALG_NIST                  0
+
+#define ENCODE_SHIFT                   10
+
+#define ENCR_MODE_SHIFT                        6
+#define ENCR_MODE_MASK                 GENMASK(9, 6)
+#define ENCR_MODE_ECB                  0
+#define ENCR_MODE_CBC                  1
+#define ENCR_MODE_CTR                  2
+#define ENCR_MODE_XTS                  3
+#define ENCR_MODE_CCM                  4
+
+#define ENCR_KEY_SZ_SHIFT              3
+#define ENCR_KEY_SZ_MASK               GENMASK(5, 3)
+#define ENCR_KEY_SZ_DES                        0
+#define ENCR_KEY_SZ_3DES               1
+#define ENCR_KEY_SZ_AES128             0
+#define ENCR_KEY_SZ_AES256             2
+
+#define ENCR_ALG_SHIFT                 0
+#define ENCR_ALG_MASK                  GENMASK(2, 0)
+#define ENCR_ALG_NONE                  0
+#define ENCR_ALG_DES                   1
+#define ENCR_ALG_AES                   2
+#define ENCR_ALG_KASUMI                        4
+#define ENCR_ALG_SNOW_3G               5
+#define ENCR_ALG_ZUC                   6
+
+/* Register bits - REG_GOPROC */
+#define GO_SHIFT                       0
+#define CLR_CNTXT_SHIFT                        1
+#define RESULTS_DUMP_SHIFT             2
+
+/* Register bits - REG_ENGINES_AVAIL */
+#define ENCR_AES_SEL_SHIFT             0
+#define DES_SEL_SHIFT                  1
+#define ENCR_SNOW3G_SEL_SHIFT          2
+#define ENCR_KASUMI_SEL_SHIFT          3
+#define SHA_SEL_SHIFT                  4
+#define SHA512_SEL_SHIFT               5
+#define AUTH_AES_SEL_SHIFT             6
+#define AUTH_SNOW3G_SEL_SHIFT          7
+#define AUTH_KASUMI_SEL_SHIFT          8
+#define BAM_PIPE_SETS_SHIFT            9
+#define BAM_PIPE_SETS_MASK             GENMASK(12, 9)
+#define AXI_WR_BEATS_SHIFT             13
+#define AXI_WR_BEATS_MASK              GENMASK(18, 13)
+#define AXI_RD_BEATS_SHIFT             19
+#define AXI_RD_BEATS_MASK              GENMASK(24, 19)
+#define ENCR_ZUC_SEL_SHIFT             26
+#define AUTH_ZUC_SEL_SHIFT             27
+#define ZUC_ENABLE_SHIFT               28
+
+#endif /* _REGS_V5_H_ */
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
new file mode 100644 (file)
index 0000000..f338593
--- /dev/null
@@ -0,0 +1,588 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <crypto/internal/hash.h>
+
+#include "common.h"
+#include "core.h"
+#include "sha.h"
+
+/* crypto hw padding constant for first operation */
+#define SHA_PADDING            64
+#define SHA_PADDING_MASK       (SHA_PADDING - 1)
+
+static LIST_HEAD(ahash_algs);
+
+static const u32 std_iv_sha1[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+       SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4, 0, 0, 0
+};
+
+static const u32 std_iv_sha256[SHA256_DIGEST_SIZE / sizeof(u32)] = {
+       SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
+       SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7
+};
+
+static void qce_ahash_done(void *data)
+{
+       struct crypto_async_request *async_req = data;
+       struct ahash_request *req = ahash_request_cast(async_req);
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+       struct qce_device *qce = tmpl->qce;
+       struct qce_result_dump *result = qce->dma.result_buf;
+       unsigned int digestsize = crypto_ahash_digestsize(ahash);
+       int error;
+       u32 status;
+
+       error = qce_dma_terminate_all(&qce->dma);
+       if (error)
+               dev_dbg(qce->dev, "ahash dma termination error (%d)\n", error);
+
+       qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
+                   rctx->src_chained);
+       qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+
+       memcpy(rctx->digest, result->auth_iv, digestsize);
+       if (req->result)
+               memcpy(req->result, result->auth_iv, digestsize);
+
+       rctx->byte_count[0] = cpu_to_be32(result->auth_byte_count[0]);
+       rctx->byte_count[1] = cpu_to_be32(result->auth_byte_count[1]);
+
+       error = qce_check_status(qce, &status);
+       if (error < 0)
+               dev_dbg(qce->dev, "ahash operation error (%x)\n", status);
+
+       req->src = rctx->src_orig;
+       req->nbytes = rctx->nbytes_orig;
+       rctx->last_blk = false;
+       rctx->first_blk = false;
+
+       qce->async_req_done(tmpl->qce, error);
+}
+
+static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
+{
+       struct ahash_request *req = ahash_request_cast(async_req);
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
+       struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
+       struct qce_device *qce = tmpl->qce;
+       unsigned long flags = rctx->flags;
+       int ret;
+
+       if (IS_SHA_HMAC(flags)) {
+               rctx->authkey = ctx->authkey;
+               rctx->authklen = QCE_SHA_HMAC_KEY_SIZE;
+       } else if (IS_CMAC(flags)) {
+               rctx->authkey = ctx->authkey;
+               rctx->authklen = AES_KEYSIZE_128;
+       }
+
+       rctx->src_nents = qce_countsg(req->src, req->nbytes,
+                                     &rctx->src_chained);
+       ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
+                       rctx->src_chained);
+       if (ret < 0)
+               return ret;
+
+       sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);
+
+       ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+       if (ret < 0)
+               goto error_unmap_src;
+
+       ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
+                              &rctx->result_sg, 1, qce_ahash_done, async_req);
+       if (ret)
+               goto error_unmap_dst;
+
+       qce_dma_issue_pending(&qce->dma);
+
+       ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
+       if (ret)
+               goto error_terminate;
+
+       return 0;
+
+error_terminate:
+       qce_dma_terminate_all(&qce->dma);
+error_unmap_dst:
+       qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
+error_unmap_src:
+       qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
+                   rctx->src_chained);
+       return ret;
+}
+
+static int qce_ahash_init(struct ahash_request *req)
+{
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+       const u32 *std_iv = tmpl->std_iv;
+
+       memset(rctx, 0, sizeof(*rctx));
+       rctx->first_blk = true;
+       rctx->last_blk = false;
+       rctx->flags = tmpl->alg_flags;
+       memcpy(rctx->digest, std_iv, sizeof(rctx->digest));
+
+       return 0;
+}
+
+static int qce_ahash_export(struct ahash_request *req, void *out)
+{
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       unsigned long flags = rctx->flags;
+       unsigned int digestsize = crypto_ahash_digestsize(ahash);
+       unsigned int blocksize =
+                       crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
+
+       if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
+               struct sha1_state *out_state = out;
+
+               out_state->count = rctx->count;
+               qce_cpu_to_be32p_array((__be32 *)out_state->state,
+                                      rctx->digest, digestsize);
+               memcpy(out_state->buffer, rctx->buf, blocksize);
+       } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
+               struct sha256_state *out_state = out;
+
+               out_state->count = rctx->count;
+               qce_cpu_to_be32p_array((__be32 *)out_state->state,
+                                      rctx->digest, digestsize);
+               memcpy(out_state->buf, rctx->buf, blocksize);
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int qce_import_common(struct ahash_request *req, u64 in_count,
+                            const u32 *state, const u8 *buffer, bool hmac)
+{
+       struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       unsigned int digestsize = crypto_ahash_digestsize(ahash);
+       unsigned int blocksize;
+       u64 count = in_count;
+
+       blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(ahash));
+       rctx->count = in_count;
+       memcpy(rctx->buf, buffer, blocksize);
+
+       if (in_count <= blocksize) {
+               rctx->first_blk = 1;
+       } else {
+               rctx->first_blk = 0;
+               /*
+                * For HMAC, there is a hardware padding done when first block
+                * is set. Therefore the byte_count must be incremened by 64
+                * after the first block operation.
+                */
+               if (hmac)
+                       count += SHA_PADDING;
+       }
+
+       rctx->byte_count[0] = (__force __be32)(count & ~SHA_PADDING_MASK);
+       rctx->byte_count[1] = (__force __be32)(count >> 32);
+       qce_cpu_to_be32p_array((__be32 *)rctx->digest, (const u8 *)state,
+                              digestsize);
+       rctx->buflen = (unsigned int)(in_count & (blocksize - 1));
+
+       return 0;
+}
+
+static int qce_ahash_import(struct ahash_request *req, const void *in)
+{
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       unsigned long flags = rctx->flags;
+       bool hmac = IS_SHA_HMAC(flags);
+       int ret = -EINVAL;
+
+       if (IS_SHA1(flags) || IS_SHA1_HMAC(flags)) {
+               const struct sha1_state *state = in;
+
+               ret = qce_import_common(req, state->count, state->state,
+                                       state->buffer, hmac);
+       } else if (IS_SHA256(flags) || IS_SHA256_HMAC(flags)) {
+               const struct sha256_state *state = in;
+
+               ret = qce_import_common(req, state->count, state->state,
+                                       state->buf, hmac);
+       }
+
+       return ret;
+}
+
+static int qce_ahash_update(struct ahash_request *req)
+{
+       struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+       struct qce_device *qce = tmpl->qce;
+       struct scatterlist *sg_last, *sg;
+       unsigned int total, len;
+       unsigned int hash_later;
+       unsigned int nbytes;
+       unsigned int blocksize;
+
+       blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+       rctx->count += req->nbytes;
+
+       /* check for buffer from previous updates and append it */
+       total = req->nbytes + rctx->buflen;
+
+       if (total <= blocksize) {
+               scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src,
+                                        0, req->nbytes, 0);
+               rctx->buflen += req->nbytes;
+               return 0;
+       }
+
+       /* save the original req structure fields */
+       rctx->src_orig = req->src;
+       rctx->nbytes_orig = req->nbytes;
+
+       /*
+        * if we have data from previous update copy them on buffer. The old
+        * data will be combined with current request bytes.
+        */
+       if (rctx->buflen)
+               memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
+
+       /* calculate how many bytes will be hashed later */
+       hash_later = total % blocksize;
+       if (hash_later) {
+               unsigned int src_offset = req->nbytes - hash_later;
+               scatterwalk_map_and_copy(rctx->buf, req->src, src_offset,
+                                        hash_later, 0);
+       }
+
+       /* here nbytes is multiple of blocksize */
+       nbytes = total - hash_later;
+
+       len = rctx->buflen;
+       sg = sg_last = req->src;
+
+       while (len < nbytes && sg) {
+               if (len + sg_dma_len(sg) > nbytes)
+                       break;
+               len += sg_dma_len(sg);
+               sg_last = sg;
+               sg = scatterwalk_sg_next(sg);
+       }
+
+       if (!sg_last)
+               return -EINVAL;
+
+       sg_mark_end(sg_last);
+
+       if (rctx->buflen) {
+               sg_init_table(rctx->sg, 2);
+               sg_set_buf(rctx->sg, rctx->tmpbuf, rctx->buflen);
+               scatterwalk_sg_chain(rctx->sg, 2, req->src);
+               req->src = rctx->sg;
+       }
+
+       req->nbytes = nbytes;
+       rctx->buflen = hash_later;
+
+       return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ahash_final(struct ahash_request *req)
+{
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+       struct qce_device *qce = tmpl->qce;
+
+       if (!rctx->buflen)
+               return 0;
+
+       rctx->last_blk = true;
+
+       rctx->src_orig = req->src;
+       rctx->nbytes_orig = req->nbytes;
+
+       memcpy(rctx->tmpbuf, rctx->buf, rctx->buflen);
+       sg_init_one(rctx->sg, rctx->tmpbuf, rctx->buflen);
+
+       req->src = rctx->sg;
+       req->nbytes = rctx->buflen;
+
+       return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+static int qce_ahash_digest(struct ahash_request *req)
+{
+       struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
+       struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm);
+       struct qce_device *qce = tmpl->qce;
+       int ret;
+
+       ret = qce_ahash_init(req);
+       if (ret)
+               return ret;
+
+       rctx->src_orig = req->src;
+       rctx->nbytes_orig = req->nbytes;
+       rctx->first_blk = true;
+       rctx->last_blk = true;
+
+       return qce->async_req_enqueue(tmpl->qce, &req->base);
+}
+
+struct qce_ahash_result {
+       struct completion completion;
+       int error;
+};
+
+static void qce_digest_complete(struct crypto_async_request *req, int error)
+{
+       struct qce_ahash_result *result = req->data;
+
+       if (error == -EINPROGRESS)
+               return;
+
+       result->error = error;
+       complete(&result->completion);
+}
+
+static int qce_ahash_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+                                unsigned int keylen)
+{
+       unsigned int digestsize = crypto_ahash_digestsize(tfm);
+       struct qce_sha_ctx *ctx = crypto_tfm_ctx(&tfm->base);
+       struct qce_ahash_result result;
+       struct ahash_request *req;
+       struct scatterlist sg;
+       unsigned int blocksize;
+       struct crypto_ahash *ahash_tfm;
+       u8 *buf;
+       int ret;
+       const char *alg_name;
+
+       blocksize = crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+       memset(ctx->authkey, 0, sizeof(ctx->authkey));
+
+       if (keylen <= blocksize) {
+               memcpy(ctx->authkey, key, keylen);
+               return 0;
+       }
+
+       if (digestsize == SHA1_DIGEST_SIZE)
+               alg_name = "sha1-qce";
+       else if (digestsize == SHA256_DIGEST_SIZE)
+               alg_name = "sha256-qce";
+       else
+               return -EINVAL;
+
+       ahash_tfm = crypto_alloc_ahash(alg_name, CRYPTO_ALG_TYPE_AHASH,
+                                      CRYPTO_ALG_TYPE_AHASH_MASK);
+       if (IS_ERR(ahash_tfm))
+               return PTR_ERR(ahash_tfm);
+
+       req = ahash_request_alloc(ahash_tfm, GFP_KERNEL);
+       if (!req) {
+               ret = -ENOMEM;
+               goto err_free_ahash;
+       }
+
+       init_completion(&result.completion);
+       ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                  qce_digest_complete, &result);
+       crypto_ahash_clear_flags(ahash_tfm, ~0);
+
+       buf = kzalloc(keylen + QCE_MAX_ALIGN_SIZE, GFP_KERNEL);
+       if (!buf) {
+               ret = -ENOMEM;
+               goto err_free_req;
+       }
+
+       memcpy(buf, key, keylen);
+       sg_init_one(&sg, buf, keylen);
+       ahash_request_set_crypt(req, &sg, ctx->authkey, keylen);
+
+       ret = crypto_ahash_digest(req);
+       if (ret == -EINPROGRESS || ret == -EBUSY) {
+               ret = wait_for_completion_interruptible(&result.completion);
+               if (!ret)
+                       ret = result.error;
+       }
+
+       if (ret)
+               crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+
+       kfree(buf);
+err_free_req:
+       ahash_request_free(req);
+err_free_ahash:
+       crypto_free_ahash(ahash_tfm);
+       return ret;
+}
+
+static int qce_ahash_cra_init(struct crypto_tfm *tfm)
+{
+       struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+       struct qce_sha_ctx *ctx = crypto_tfm_ctx(tfm);
+
+       crypto_ahash_set_reqsize(ahash, sizeof(struct qce_sha_reqctx));
+       memset(ctx, 0, sizeof(*ctx));
+       return 0;
+}
+
+struct qce_ahash_def {
+       unsigned long flags;
+       const char *name;
+       const char *drv_name;
+       unsigned int digestsize;
+       unsigned int blocksize;
+       unsigned int statesize;
+       const u32 *std_iv;
+};
+
+static const struct qce_ahash_def ahash_def[] = {
+       {
+               .flags          = QCE_HASH_SHA1,
+               .name           = "sha1",
+               .drv_name       = "sha1-qce",
+               .digestsize     = SHA1_DIGEST_SIZE,
+               .blocksize      = SHA1_BLOCK_SIZE,
+               .statesize      = sizeof(struct sha1_state),
+               .std_iv         = std_iv_sha1,
+       },
+       {
+               .flags          = QCE_HASH_SHA256,
+               .name           = "sha256",
+               .drv_name       = "sha256-qce",
+               .digestsize     = SHA256_DIGEST_SIZE,
+               .blocksize      = SHA256_BLOCK_SIZE,
+               .statesize      = sizeof(struct sha256_state),
+               .std_iv         = std_iv_sha256,
+       },
+       {
+               .flags          = QCE_HASH_SHA1_HMAC,
+               .name           = "hmac(sha1)",
+               .drv_name       = "hmac-sha1-qce",
+               .digestsize     = SHA1_DIGEST_SIZE,
+               .blocksize      = SHA1_BLOCK_SIZE,
+               .statesize      = sizeof(struct sha1_state),
+               .std_iv         = std_iv_sha1,
+       },
+       {
+               .flags          = QCE_HASH_SHA256_HMAC,
+               .name           = "hmac(sha256)",
+               .drv_name       = "hmac-sha256-qce",
+               .digestsize     = SHA256_DIGEST_SIZE,
+               .blocksize      = SHA256_BLOCK_SIZE,
+               .statesize      = sizeof(struct sha256_state),
+               .std_iv         = std_iv_sha256,
+       },
+};
+
+static int qce_ahash_register_one(const struct qce_ahash_def *def,
+                                 struct qce_device *qce)
+{
+       struct qce_alg_template *tmpl;
+       struct ahash_alg *alg;
+       struct crypto_alg *base;
+       int ret;
+
+       tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
+       if (!tmpl)
+               return -ENOMEM;
+
+       tmpl->std_iv = def->std_iv;
+
+       alg = &tmpl->alg.ahash;
+       alg->init = qce_ahash_init;
+       alg->update = qce_ahash_update;
+       alg->final = qce_ahash_final;
+       alg->digest = qce_ahash_digest;
+       alg->export = qce_ahash_export;
+       alg->import = qce_ahash_import;
+       if (IS_SHA_HMAC(def->flags))
+               alg->setkey = qce_ahash_hmac_setkey;
+       alg->halg.digestsize = def->digestsize;
+       alg->halg.statesize = def->statesize;
+
+       base = &alg->halg.base;
+       base->cra_blocksize = def->blocksize;
+       base->cra_priority = 300;
+       base->cra_flags = CRYPTO_ALG_ASYNC;
+       base->cra_ctxsize = sizeof(struct qce_sha_ctx);
+       base->cra_alignmask = 0;
+       base->cra_module = THIS_MODULE;
+       base->cra_init = qce_ahash_cra_init;
+       INIT_LIST_HEAD(&base->cra_list);
+
+       snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name);
+       snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
+                def->drv_name);
+
+       INIT_LIST_HEAD(&tmpl->entry);
+       tmpl->crypto_alg_type = CRYPTO_ALG_TYPE_AHASH;
+       tmpl->alg_flags = def->flags;
+       tmpl->qce = qce;
+
+       ret = crypto_register_ahash(alg);
+       if (ret) {
+               kfree(tmpl);
+               dev_err(qce->dev, "%s registration failed\n", base->cra_name);
+               return ret;
+       }
+
+       list_add_tail(&tmpl->entry, &ahash_algs);
+       dev_dbg(qce->dev, "%s is registered\n", base->cra_name);
+       return 0;
+}
+
+static void qce_ahash_unregister(struct qce_device *qce)
+{
+       struct qce_alg_template *tmpl, *n;
+
+       list_for_each_entry_safe(tmpl, n, &ahash_algs, entry) {
+               crypto_unregister_ahash(&tmpl->alg.ahash);
+               list_del(&tmpl->entry);
+               kfree(tmpl);
+       }
+}
+
+static int qce_ahash_register(struct qce_device *qce)
+{
+       int ret, i;
+
+       for (i = 0; i < ARRAY_SIZE(ahash_def); i++) {
+               ret = qce_ahash_register_one(&ahash_def[i], qce);
+               if (ret)
+                       goto err;
+       }
+
+       return 0;
+err:
+       qce_ahash_unregister(qce);
+       return ret;
+}
+
+const struct qce_algo_ops ahash_ops = {
+       .type = CRYPTO_ALG_TYPE_AHASH,
+       .register_algs = qce_ahash_register,
+       .unregister_algs = qce_ahash_unregister,
+       .async_req_handle = qce_ahash_async_req_handle,
+};
diff --git a/drivers/crypto/qce/sha.h b/drivers/crypto/qce/sha.h
new file mode 100644 (file)
index 0000000..286f0d5
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SHA_H_
+#define _SHA_H_
+
+#include <crypto/scatterwalk.h>
+#include <crypto/sha.h>
+
+#include "common.h"
+#include "core.h"
+
+#define QCE_SHA_MAX_BLOCKSIZE          SHA256_BLOCK_SIZE
+#define QCE_SHA_MAX_DIGESTSIZE         SHA256_DIGEST_SIZE
+
+struct qce_sha_ctx {
+       u8 authkey[QCE_SHA_MAX_BLOCKSIZE];
+};
+
+/**
+ * struct qce_sha_reqctx - holds private ahash objects per request
+ * @buf: used during update, import and export
+ * @tmpbuf: buffer for internal use
+ * @digest: calculated digest buffer
+ * @buflen: length of the buffer
+ * @flags: operation flags
+ * @src_orig: original request sg list
+ * @nbytes_orig: original request number of bytes
+ * @src_chained: is source scatterlist chained
+ * @src_nents: source number of entries
+ * @byte_count: byte count
+ * @count: save count in states during update, import and export
+ * @first_blk: is it the first block
+ * @last_blk: is it the last block
+ * @sg: used to chain sg lists
+ * @authkey: pointer to auth key in sha ctx
+ * @authklen: auth key length
+ * @result_sg: scatterlist used for result buffer
+ */
+struct qce_sha_reqctx {
+       u8 buf[QCE_SHA_MAX_BLOCKSIZE];
+       u8 tmpbuf[QCE_SHA_MAX_BLOCKSIZE];
+       u8 digest[QCE_SHA_MAX_DIGESTSIZE];
+       unsigned int buflen;
+       unsigned long flags;
+       struct scatterlist *src_orig;
+       unsigned int nbytes_orig;
+       bool src_chained;
+       int src_nents;
+       __be32 byte_count[2];
+       u64 count;
+       bool first_blk;
+       bool last_blk;
+       struct scatterlist sg[2];
+       u8 *authkey;
+       unsigned int authklen;
+       struct scatterlist result_sg;
+};
+
+static inline struct qce_alg_template *to_ahash_tmpl(struct crypto_tfm *tfm)
+{
+       struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+       struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash),
+                                            struct ahash_alg, halg);
+
+       return container_of(alg, struct qce_alg_template, alg.ahash);
+}
+
+extern const struct qce_algo_ops ahash_ops;
+
+#endif /* _SHA_H_ */
index a999f53..92105f3 100644 (file)
@@ -190,7 +190,7 @@ static void add_session_id(struct cryp_ctx *ctx)
 static irqreturn_t cryp_interrupt_handler(int irq, void *param)
 {
        struct cryp_ctx *ctx;
-       int i;
+       int count;
        struct cryp_device_data *device_data;
 
        if (param == NULL) {
@@ -215,12 +215,11 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
        if (cryp_pending_irq_src(device_data,
                                 CRYP_IRQ_SRC_OUTPUT_FIFO)) {
                if (ctx->outlen / ctx->blocksize > 0) {
-                       for (i = 0; i < ctx->blocksize / 4; i++) {
-                               *(ctx->outdata) = readl_relaxed(
-                                               &device_data->base->dout);
-                               ctx->outdata += 4;
-                               ctx->outlen -= 4;
-                       }
+                       count = ctx->blocksize / 4;
+
+                       readsl(&device_data->base->dout, ctx->outdata, count);
+                       ctx->outdata += count;
+                       ctx->outlen -= count;
 
                        if (ctx->outlen == 0) {
                                cryp_disable_irq_src(device_data,
@@ -230,12 +229,12 @@ static irqreturn_t cryp_interrupt_handler(int irq, void *param)
        } else if (cryp_pending_irq_src(device_data,
                                        CRYP_IRQ_SRC_INPUT_FIFO)) {
                if (ctx->datalen / ctx->blocksize > 0) {
-                       for (i = 0 ; i < ctx->blocksize / 4; i++) {
-                               writel_relaxed(ctx->indata,
-                                               &device_data->base->din);
-                               ctx->indata += 4;
-                               ctx->datalen -= 4;
-                       }
+                       count = ctx->blocksize / 4;
+
+                       writesl(&device_data->base->din, ctx->indata, count);
+
+                       ctx->indata += count;
+                       ctx->datalen -= count;
 
                        if (ctx->datalen == 0)
                                cryp_disable_irq_src(device_data,
index d028f36..8f8b0b6 100644 (file)
@@ -86,6 +86,9 @@
 
 #define USBSS_IRQ_PD_COMP      (1 <<  2)
 
+/* Packet Descriptor */
+#define PD2_ZERO_LENGTH                (1 << 19)
+
 struct cppi41_channel {
        struct dma_chan chan;
        struct dma_async_tx_descriptor txd;
@@ -307,7 +310,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
                        __iormb();
 
                while (val) {
-                       u32 desc;
+                       u32 desc, len;
 
                        q_num = __fls(val);
                        val &= ~(1 << q_num);
@@ -319,9 +322,13 @@ static irqreturn_t cppi41_irq(int irq, void *data)
                                                q_num, desc);
                                continue;
                        }
-                       c->residue = pd_trans_len(c->desc->pd6) -
-                               pd_trans_len(c->desc->pd0);
 
+                       if (c->desc->pd2 & PD2_ZERO_LENGTH)
+                               len = 0;
+                       else
+                               len = pd_trans_len(c->desc->pd0);
+
+                       c->residue = pd_trans_len(c->desc->pd6) - len;
                        dma_cookie_complete(&c->txd);
                        c->txd.callback(c->txd.callback_param);
                }
index 1287146..14867e3 100644 (file)
@@ -255,6 +255,7 @@ struct sdma_channel {
        enum dma_slave_buswidth         word_size;
        unsigned int                    buf_tail;
        unsigned int                    num_bd;
+       unsigned int                    period_len;
        struct sdma_buffer_descriptor   *bd;
        dma_addr_t                      bd_phys;
        unsigned int                    pc_from_device, pc_to_device;
@@ -592,6 +593,12 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
 }
 
 static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
+{
+       if (sdmac->desc.callback)
+               sdmac->desc.callback(sdmac->desc.callback_param);
+}
+
+static void sdma_update_channel_loop(struct sdma_channel *sdmac)
 {
        struct sdma_buffer_descriptor *bd;
 
@@ -611,9 +618,6 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
                bd->mode.status |= BD_DONE;
                sdmac->buf_tail++;
                sdmac->buf_tail %= sdmac->num_bd;
-
-               if (sdmac->desc.callback)
-                       sdmac->desc.callback(sdmac->desc.callback_param);
        }
 }
 
@@ -669,6 +673,9 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
                int channel = fls(stat) - 1;
                struct sdma_channel *sdmac = &sdma->channel[channel];
 
+               if (sdmac->flags & IMX_DMA_SG_LOOP)
+                       sdma_update_channel_loop(sdmac);
+
                tasklet_schedule(&sdmac->tasklet);
 
                __clear_bit(channel, &stat);
@@ -1129,6 +1136,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
        sdmac->status = DMA_IN_PROGRESS;
 
        sdmac->buf_tail = 0;
+       sdmac->period_len = period_len;
 
        sdmac->flags |= IMX_DMA_SG_LOOP;
        sdmac->direction = direction;
@@ -1225,9 +1233,15 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
                                      struct dma_tx_state *txstate)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
+       u32 residue;
+
+       if (sdmac->flags & IMX_DMA_SG_LOOP)
+               residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len;
+       else
+               residue = sdmac->chn_count - sdmac->chn_real_count;
 
        dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
-                       sdmac->chn_count - sdmac->chn_real_count);
+                        residue);
 
        return sdmac->status;
 }
index 878f090..e339c6b 100644 (file)
@@ -186,6 +186,13 @@ config EDAC_I3200
          Support for error detection and correction on the Intel
          3200 and 3210 server chipsets.
 
+config EDAC_IE31200
+       tristate "Intel e312xx"
+       depends on EDAC_MM_EDAC && PCI && X86
+       help
+         Support for error detection and correction on the Intel
+         E3-1200 based DRAM controllers.
+
 config EDAC_X38
        tristate "Intel X38"
        depends on EDAC_MM_EDAC && PCI && X86
index 4154ed6..c479a24 100644 (file)
@@ -37,6 +37,7 @@ obj-$(CONFIG_EDAC_I82875P)            += i82875p_edac.o
 obj-$(CONFIG_EDAC_I82975X)             += i82975x_edac.o
 obj-$(CONFIG_EDAC_I3000)               += i3000_edac.o
 obj-$(CONFIG_EDAC_I3200)               += i3200_edac.o
+obj-$(CONFIG_EDAC_IE31200)             += ie31200_edac.o
 obj-$(CONFIG_EDAC_X38)                 += x38_edac.o
 obj-$(CONFIG_EDAC_I82860)              += i82860_edac.o
 obj-$(CONFIG_EDAC_R82600)              += r82600_edac.o
index a66941f..e6d1691 100644 (file)
@@ -28,7 +28,7 @@ static int edac_set_debug_level(const char *buf, struct kernel_param *kp)
        if (ret)
                return ret;
 
-       if (val < 0 || val > 4)
+       if (val > 4)
                return -EINVAL;
 
        return param_set_int(buf, kp);
diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
new file mode 100644 (file)
index 0000000..a981dc6
--- /dev/null
@@ -0,0 +1,536 @@
+/*
+ * Intel E3-1200
+ * Copyright (C) 2014 Jason Baron <jbaron@akamai.com>
+ *
+ * Support for the E3-1200 processor family. Heavily based on previous
+ * Intel EDAC drivers.
+ *
+ * Since the DRAM controller is on the cpu chip, we can use its PCI device
+ * id to identify these processors.
+ *
+ * PCI DRAM controller device ids (Taken from The PCI ID Repository - http://pci-ids.ucw.cz/)
+ *
+ * 0108: Xeon E3-1200 Processor Family DRAM Controller
+ * 010c: Xeon E3-1200/2nd Generation Core Processor Family DRAM Controller
+ * 0150: Xeon E3-1200 v2/3rd Gen Core processor DRAM Controller
+ * 0158: Xeon E3-1200 v2/Ivy Bridge DRAM Controller
+ * 015c: Xeon E3-1200 v2/3rd Gen Core processor DRAM Controller
+ * 0c04: Xeon E3-1200 v3/4th Gen Core Processor DRAM Controller
+ * 0c08: Xeon E3-1200 v3 Processor DRAM Controller
+ *
+ * Based on Intel specification:
+ * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/xeon-e3-1200v3-vol-2-datasheet.pdf
+ * http://www.intel.com/content/www/us/en/processors/xeon/xeon-e3-1200-family-vol-2-datasheet.html
+ *
+ * According to the above datasheet (p.16):
+ * "
+ * 6. Software must not access B0/D0/F0 32-bit memory-mapped registers with
+ * requests that cross a DW boundary.
+ * "
+ *
+ * Thus, we make use of the explicit: lo_hi_readq(), which breaks the readq into
+ * 2 readl() calls. This restriction may be lifted in subsequent chip releases,
+ * but lo_hi_readq() ensures that we are safe across all e3-1200 processors.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/edac.h>
+
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+#include "edac_core.h"
+
+#define IE31200_REVISION "1.0"
+#define EDAC_MOD_STR "ie31200_edac"
+
+#define ie31200_printk(level, fmt, arg...) \
+       edac_printk(level, "ie31200", fmt, ##arg)
+
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_1 0x0108
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_2 0x010c
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_3 0x0150
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_4 0x0158
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_5 0x015c
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_6 0x0c04
+#define PCI_DEVICE_ID_INTEL_IE31200_HB_7 0x0c08
+
+#define IE31200_DIMMS                  4
+#define IE31200_RANKS                  8
+#define IE31200_RANKS_PER_CHANNEL      4
+#define IE31200_DIMMS_PER_CHANNEL      2
+#define IE31200_CHANNELS               2
+
+/* Intel IE31200 register addresses - device 0 function 0 - DRAM Controller */
+#define IE31200_MCHBAR_LOW             0x48
+#define IE31200_MCHBAR_HIGH            0x4c
+#define IE31200_MCHBAR_MASK            GENMASK_ULL(38, 15)
+#define IE31200_MMR_WINDOW_SIZE                BIT(15)
+
+/*
+ * Error Status Register (16b)
+ *
+ * 15    reserved
+ * 14    Isochronous TBWRR Run Behind FIFO Full
+ *       (ITCV)
+ * 13    Isochronous TBWRR Run Behind FIFO Put
+ *       (ITSTV)
+ * 12    reserved
+ * 11    MCH Thermal Sensor Event
+ *       for SMI/SCI/SERR (GTSE)
+ * 10    reserved
+ *  9    LOCK to non-DRAM Memory Flag (LCKF)
+ *  8    reserved
+ *  7    DRAM Throttle Flag (DTF)
+ *  6:2  reserved
+ *  1    Multi-bit DRAM ECC Error Flag (DMERR)
+ *  0    Single-bit DRAM ECC Error Flag (DSERR)
+ */
+#define IE31200_ERRSTS                 0xc8
+#define IE31200_ERRSTS_UE              BIT(1)
+#define IE31200_ERRSTS_CE              BIT(0)
+#define IE31200_ERRSTS_BITS            (IE31200_ERRSTS_UE | IE31200_ERRSTS_CE)
+
+/*
+ * Channel 0 ECC Error Log (64b)
+ *
+ * 63:48 Error Column Address (ERRCOL)
+ * 47:32 Error Row Address (ERRROW)
+ * 31:29 Error Bank Address (ERRBANK)
+ * 28:27 Error Rank Address (ERRRANK)
+ * 26:24 reserved
+ * 23:16 Error Syndrome (ERRSYND)
+ * 15: 2 reserved
+ *    1  Multiple Bit Error Status (MERRSTS)
+ *    0  Correctable Error Status (CERRSTS)
+ */
+#define IE31200_C0ECCERRLOG                    0x40c8
+#define IE31200_C1ECCERRLOG                    0x44c8
+#define IE31200_ECCERRLOG_CE                   BIT(0)
+#define IE31200_ECCERRLOG_UE                   BIT(1)
+#define IE31200_ECCERRLOG_RANK_BITS            GENMASK_ULL(28, 27)
+#define IE31200_ECCERRLOG_RANK_SHIFT           27
+#define IE31200_ECCERRLOG_SYNDROME_BITS                GENMASK_ULL(23, 16)
+#define IE31200_ECCERRLOG_SYNDROME_SHIFT       16
+
+#define IE31200_ECCERRLOG_SYNDROME(log)                   \
+       ((log & IE31200_ECCERRLOG_SYNDROME_BITS) >> \
+        IE31200_ECCERRLOG_SYNDROME_SHIFT)
+
+#define IE31200_CAPID0                 0xe4
+#define IE31200_CAPID0_PDCD            BIT(4)
+#define IE31200_CAPID0_DDPCD           BIT(6)
+#define IE31200_CAPID0_ECC             BIT(1)
+
+#define IE31200_MAD_DIMM_0_OFFSET      0x5004
+#define IE31200_MAD_DIMM_SIZE          GENMASK_ULL(7, 0)
+#define IE31200_MAD_DIMM_A_RANK                BIT(17)
+#define IE31200_MAD_DIMM_A_WIDTH       BIT(19)
+
+#define IE31200_PAGES(n)               (n << (28 - PAGE_SHIFT))
+
+static int nr_channels;
+
+struct ie31200_priv {
+       void __iomem *window;
+};
+
+enum ie31200_chips {
+       IE31200 = 0,
+};
+
+struct ie31200_dev_info {
+       const char *ctl_name;
+};
+
+struct ie31200_error_info {
+       u16 errsts;
+       u16 errsts2;
+       u64 eccerrlog[IE31200_CHANNELS];
+};
+
+static const struct ie31200_dev_info ie31200_devs[] = {
+       [IE31200] = {
+               .ctl_name = "IE31200"
+       },
+};
+
+struct dimm_data {
+       u8 size; /* in 256MB multiples */
+       u8 dual_rank : 1,
+          x16_width : 1; /* 0 means x8 width */
+};
+
+static int how_many_channels(struct pci_dev *pdev)
+{
+       int n_channels;
+       unsigned char capid0_2b; /* 2nd byte of CAPID0 */
+
+       pci_read_config_byte(pdev, IE31200_CAPID0 + 1, &capid0_2b);
+
+       /* check PDCD: Dual Channel Disable */
+       if (capid0_2b & IE31200_CAPID0_PDCD) {
+               edac_dbg(0, "In single channel mode\n");
+               n_channels = 1;
+       } else {
+               edac_dbg(0, "In dual channel mode\n");
+               n_channels = 2;
+       }
+
+       /* check DDPCD - check if both channels are filled */
+       if (capid0_2b & IE31200_CAPID0_DDPCD)
+               edac_dbg(0, "2 DIMMS per channel disabled\n");
+       else
+               edac_dbg(0, "2 DIMMS per channel enabled\n");
+
+       return n_channels;
+}
+
+static bool ecc_capable(struct pci_dev *pdev)
+{
+       unsigned char capid0_4b; /* 4th byte of CAPID0 */
+
+       pci_read_config_byte(pdev, IE31200_CAPID0 + 3, &capid0_4b);
+       if (capid0_4b & IE31200_CAPID0_ECC)
+               return false;
+       return true;
+}
+
+static int eccerrlog_row(int channel, u64 log)
+{
+       int rank = ((log & IE31200_ECCERRLOG_RANK_BITS) >>
+               IE31200_ECCERRLOG_RANK_SHIFT);
+       return rank | (channel * IE31200_RANKS_PER_CHANNEL);
+}
+
+static void ie31200_clear_error_info(struct mem_ctl_info *mci)
+{
+       /*
+        * Clear any error bits.
+        * (Yes, we really clear bits by writing 1 to them.)
+        */
+       pci_write_bits16(to_pci_dev(mci->pdev), IE31200_ERRSTS,
+                        IE31200_ERRSTS_BITS, IE31200_ERRSTS_BITS);
+}
+
+static void ie31200_get_and_clear_error_info(struct mem_ctl_info *mci,
+                                            struct ie31200_error_info *info)
+{
+       struct pci_dev *pdev;
+       struct ie31200_priv *priv = mci->pvt_info;
+       void __iomem *window = priv->window;
+
+       pdev = to_pci_dev(mci->pdev);
+
+       /*
+        * This is a mess because there is no atomic way to read all the
+        * registers at once and the registers can transition from CE being
+        * overwritten by UE.
+        */
+       pci_read_config_word(pdev, IE31200_ERRSTS, &info->errsts);
+       if (!(info->errsts & IE31200_ERRSTS_BITS))
+               return;
+
+       info->eccerrlog[0] = lo_hi_readq(window + IE31200_C0ECCERRLOG);
+       if (nr_channels == 2)
+               info->eccerrlog[1] = lo_hi_readq(window + IE31200_C1ECCERRLOG);
+
+       pci_read_config_word(pdev, IE31200_ERRSTS, &info->errsts2);
+
+       /*
+        * If the error is the same for both reads then the first set
+        * of reads is valid.  If there is a change then there is a CE
+        * with no info and the second set of reads is valid and
+        * should be UE info.
+        */
+       if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
+               info->eccerrlog[0] = lo_hi_readq(window + IE31200_C0ECCERRLOG);
+               if (nr_channels == 2)
+                       info->eccerrlog[1] =
+                               lo_hi_readq(window + IE31200_C1ECCERRLOG);
+       }
+
+       ie31200_clear_error_info(mci);
+}
+
+static void ie31200_process_error_info(struct mem_ctl_info *mci,
+                                      struct ie31200_error_info *info)
+{
+       int channel;
+       u64 log;
+
+       if (!(info->errsts & IE31200_ERRSTS_BITS))
+               return;
+
+       if ((info->errsts ^ info->errsts2) & IE31200_ERRSTS_BITS) {
+               edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
+                                    -1, -1, -1, "UE overwrote CE", "");
+               info->errsts = info->errsts2;
+       }
+
+       for (channel = 0; channel < nr_channels; channel++) {
+               log = info->eccerrlog[channel];
+               if (log & IE31200_ECCERRLOG_UE) {
+                       edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
+                                            0, 0, 0,
+                                            eccerrlog_row(channel, log),
+                                            channel, -1,
+                                            "ie31200 UE", "");
+               } else if (log & IE31200_ECCERRLOG_CE) {
+                       edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
+                                            0, 0,
+                                            IE31200_ECCERRLOG_SYNDROME(log),
+                                            eccerrlog_row(channel, log),
+                                            channel, -1,
+                                            "ie31200 CE", "");
+               }
+       }
+}
+
+static void ie31200_check(struct mem_ctl_info *mci)
+{
+       struct ie31200_error_info info;
+
+       edac_dbg(1, "MC%d\n", mci->mc_idx);
+       ie31200_get_and_clear_error_info(mci, &info);
+       ie31200_process_error_info(mci, &info);
+}
+
+static void __iomem *ie31200_map_mchbar(struct pci_dev *pdev)
+{
+       union {
+               u64 mchbar;
+               struct {
+                       u32 mchbar_low;
+                       u32 mchbar_high;
+               };
+       } u;
+       void __iomem *window;
+
+       pci_read_config_dword(pdev, IE31200_MCHBAR_LOW, &u.mchbar_low);
+       pci_read_config_dword(pdev, IE31200_MCHBAR_HIGH, &u.mchbar_high);
+       u.mchbar &= IE31200_MCHBAR_MASK;
+
+       if (u.mchbar != (resource_size_t)u.mchbar) {
+               ie31200_printk(KERN_ERR, "mmio space beyond accessible range (0x%llx)\n",
+                              (unsigned long long)u.mchbar);
+               return NULL;
+       }
+
+       window = ioremap_nocache(u.mchbar, IE31200_MMR_WINDOW_SIZE);
+       if (!window)
+               ie31200_printk(KERN_ERR, "Cannot map mmio space at 0x%llx\n",
+                              (unsigned long long)u.mchbar);
+
+       return window;
+}
+
+static int ie31200_probe1(struct pci_dev *pdev, int dev_idx)
+{
+       int i, j, ret;
+       struct mem_ctl_info *mci = NULL;
+       struct edac_mc_layer layers[2];
+       struct dimm_data dimm_info[IE31200_CHANNELS][IE31200_DIMMS_PER_CHANNEL];
+       void __iomem *window;
+       struct ie31200_priv *priv;
+       u32 addr_decode;
+
+       edac_dbg(0, "MC:\n");
+
+       if (!ecc_capable(pdev)) {
+               ie31200_printk(KERN_INFO, "No ECC support\n");
+               return -ENODEV;
+       }
+
+       nr_channels = how_many_channels(pdev);
+       layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
+       layers[0].size = IE31200_DIMMS;
+       layers[0].is_virt_csrow = true;
+       layers[1].type = EDAC_MC_LAYER_CHANNEL;
+       layers[1].size = nr_channels;
+       layers[1].is_virt_csrow = false;
+       mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
+                           sizeof(struct ie31200_priv));
+       if (!mci)
+               return -ENOMEM;
+
+       window = ie31200_map_mchbar(pdev);
+       if (!window) {
+               ret = -ENODEV;
+               goto fail_free;
+       }
+
+       edac_dbg(3, "MC: init mci\n");
+       mci->pdev = &pdev->dev;
+       mci->mtype_cap = MEM_FLAG_DDR3;
+       mci->edac_ctl_cap = EDAC_FLAG_SECDED;
+       mci->edac_cap = EDAC_FLAG_SECDED;
+       mci->mod_name = EDAC_MOD_STR;
+       mci->mod_ver = IE31200_REVISION;
+       mci->ctl_name = ie31200_devs[dev_idx].ctl_name;
+       mci->dev_name = pci_name(pdev);
+       mci->edac_check = ie31200_check;
+       mci->ctl_page_to_phys = NULL;
+       priv = mci->pvt_info;
+       priv->window = window;
+
+       /* populate DIMM info */
+       for (i = 0; i < IE31200_CHANNELS; i++) {
+               addr_decode = readl(window + IE31200_MAD_DIMM_0_OFFSET +
+                                       (i * 4));
+               edac_dbg(0, "addr_decode: 0x%x\n", addr_decode);
+               for (j = 0; j < IE31200_DIMMS_PER_CHANNEL; j++) {
+                       dimm_info[i][j].size = (addr_decode >> (j * 8)) &
+                                               IE31200_MAD_DIMM_SIZE;
+                       dimm_info[i][j].dual_rank = (addr_decode &
+                               (IE31200_MAD_DIMM_A_RANK << j)) ? 1 : 0;
+                       dimm_info[i][j].x16_width = (addr_decode &
+                               (IE31200_MAD_DIMM_A_WIDTH << j)) ? 1 : 0;
+                       edac_dbg(0, "size: 0x%x, rank: %d, width: %d\n",
+                                dimm_info[i][j].size,
+                                dimm_info[i][j].dual_rank,
+                                dimm_info[i][j].x16_width);
+               }
+       }
+
+       /*
+        * The dram rank boundary (DRB) reg values are boundary addresses
+        * for each DRAM rank with a granularity of 64MB.  DRB regs are
+        * cumulative; the last one will contain the total memory
+        * contained in all ranks.
+        */
+       for (i = 0; i < IE31200_DIMMS_PER_CHANNEL; i++) {
+               for (j = 0; j < IE31200_CHANNELS; j++) {
+                       struct dimm_info *dimm;
+                       unsigned long nr_pages;
+
+                       nr_pages = IE31200_PAGES(dimm_info[j][i].size);
+                       if (nr_pages == 0)
+                               continue;
+
+                       if (dimm_info[j][i].dual_rank) {
+                               nr_pages = nr_pages / 2;
+                               dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+                                                    mci->n_layers, (i * 2) + 1,
+                                                    j, 0);
+                               dimm->nr_pages = nr_pages;
+                               edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
+                               dimm->grain = 8; /* just a guess */
+                               dimm->mtype = MEM_DDR3;
+                               dimm->dtype = DEV_UNKNOWN;
+                               dimm->edac_mode = EDAC_UNKNOWN;
+                       }
+                       dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
+                                            mci->n_layers, i * 2, j, 0);
+                       dimm->nr_pages = nr_pages;
+                       edac_dbg(0, "set nr pages: 0x%lx\n", nr_pages);
+                       dimm->grain = 8; /* same guess */
+                       dimm->mtype = MEM_DDR3;
+                       dimm->dtype = DEV_UNKNOWN;
+                       dimm->edac_mode = EDAC_UNKNOWN;
+               }
+       }
+
+       ie31200_clear_error_info(mci);
+
+       if (edac_mc_add_mc(mci)) {
+               edac_dbg(3, "MC: failed edac_mc_add_mc()\n");
+               ret = -ENODEV;
+               goto fail_unmap;
+       }
+
+       /* get this far and it's successful */
+       edac_dbg(3, "MC: success\n");
+       return 0;
+
+fail_unmap:
+       iounmap(window);
+
+fail_free:
+       edac_mc_free(mci);
+
+       return ret;
+}
+
+static int ie31200_init_one(struct pci_dev *pdev,
+                           const struct pci_device_id *ent)
+{
+       edac_dbg(0, "MC:\n");
+
+       if (pci_enable_device(pdev) < 0)
+               return -EIO;
+
+       return ie31200_probe1(pdev, ent->driver_data);
+}
+
+static void ie31200_remove_one(struct pci_dev *pdev)
+{
+       struct mem_ctl_info *mci;
+       struct ie31200_priv *priv;
+
+       edac_dbg(0, "\n");
+       mci = edac_mc_del_mc(&pdev->dev);
+       if (!mci)
+               return;
+       priv = mci->pvt_info;
+       iounmap(priv->window);
+       edac_mc_free(mci);
+}
+
+static const struct pci_device_id ie31200_pci_tbl[] = {
+       {
+               PCI_VEND_DEV(INTEL, IE31200_HB_1), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               IE31200},
+       {
+               PCI_VEND_DEV(INTEL, IE31200_HB_2), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               IE31200},
+       {
+               PCI_VEND_DEV(INTEL, IE31200_HB_3), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               IE31200},
+       {
+               PCI_VEND_DEV(INTEL, IE31200_HB_4), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               IE31200},
+       {
+               PCI_VEND_DEV(INTEL, IE31200_HB_5), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               IE31200},
+       {
+               PCI_VEND_DEV(INTEL, IE31200_HB_6), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               IE31200},
+       {
+               PCI_VEND_DEV(INTEL, IE31200_HB_7), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+               IE31200},
+       {
+               0,
+       }            /* 0 terminated list. */
+};
+MODULE_DEVICE_TABLE(pci, ie31200_pci_tbl);
+
+static struct pci_driver ie31200_driver = {
+       .name = EDAC_MOD_STR,
+       .probe = ie31200_init_one,
+       .remove = ie31200_remove_one,
+       .id_table = ie31200_pci_tbl,
+};
+
+static int __init ie31200_init(void)
+{
+       edac_dbg(3, "MC:\n");
+       /* Ensure that the OPSTATE is set correctly for POLL or NMI */
+       opstate_init();
+
+       return pci_register_driver(&ie31200_driver);
+}
+
+static void __exit ie31200_exit(void)
+{
+       edac_dbg(3, "MC:\n");
+       pci_unregister_driver(&ie31200_driver);
+}
+
+module_init(ie31200_init);
+module_exit(ie31200_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jason Baron <jbaron@akamai.com>");
+MODULE_DESCRIPTION("MC support for Intel Processor E31200 memory hub controllers");
index 5f43620..f78c1c5 100644 (file)
@@ -78,7 +78,8 @@ static const char * const f15h_mc1_mce_desc[] = {
        "uop queue",
        "insn buffer",
        "predecode buffer",
-       "fetch address FIFO"
+       "fetch address FIFO",
+       "dispatch uop queue"
 };
 
 static const char * const f15h_mc2_mce_desc[] = {
@@ -267,6 +268,12 @@ static bool f15h_mc0_mce(u16 ec, u8 xec)
                        pr_cont("System Read Data Error.\n");
                else
                        pr_cont(" Internal error condition type %d.\n", xec);
+       } else if (INT_ERROR(ec)) {
+               if (xec <= 0x1f)
+                       pr_cont("Hardware Assert.\n");
+               else
+                       ret = false;
+
        } else
                ret = false;
 
@@ -373,7 +380,7 @@ static bool f15h_mc1_mce(u16 ec, u8 xec)
                pr_cont("%s.\n", f15h_mc1_mce_desc[xec-4]);
                break;
 
-       case 0x11 ... 0x14:
+       case 0x11 ... 0x15:
                pr_cont("Decoder %s parity error.\n", f15h_mc1_mce_desc[xec-4]);
                break;
 
@@ -397,10 +404,20 @@ static void decode_mc1_mce(struct mce *m)
                bool k8 = (boot_cpu_data.x86 == 0xf && (m->status & BIT_64(58)));
 
                pr_cont("during %s.\n", (k8 ? "system linefill" : "NB data read"));
+       } else if (INT_ERROR(ec)) {
+               if (xec <= 0x3f)
+                       pr_cont("Hardware Assert.\n");
+               else
+                       goto wrong_mc1_mce;
        } else if (fam_ops->mc1_mce(ec, xec))
                ;
        else
-               pr_emerg(HW_ERR "Corrupted MC1 MCE info?\n");
+               goto wrong_mc1_mce;
+
+       return;
+
+wrong_mc1_mce:
+       pr_emerg(HW_ERR "Corrupted MC1 MCE info?\n");
 }
 
 static bool k8_mc2_mce(u16 ec, u8 xec)
@@ -468,6 +485,11 @@ static bool f15h_mc2_mce(u16 ec, u8 xec)
                default:
                        ret = false;
                }
+       } else if (INT_ERROR(ec)) {
+               if (xec <= 0x3f)
+                       pr_cont("Hardware Assert.\n");
+               else
+                       ret = false;
        }
 
        return ret;
@@ -615,6 +637,7 @@ static void decode_mc4_mce(struct mce *m)
 static void decode_mc5_mce(struct mce *m)
 {
        struct cpuinfo_x86 *c = &boot_cpu_data;
+       u16 ec = EC(m->status);
        u8 xec = XEC(m->status, xec_mask);
 
        if (c->x86 == 0xf || c->x86 == 0x11)
@@ -622,6 +645,14 @@ static void decode_mc5_mce(struct mce *m)
 
        pr_emerg(HW_ERR "MC5 Error: ");
 
+       if (INT_ERROR(ec)) {
+               if (xec <= 0x1f) {
+                       pr_cont("Hardware Assert.\n");
+                       return;
+               } else
+                       goto wrong_mc5_mce;
+       }
+
        if (xec == 0x0 || xec == 0xc)
                pr_cont("%s.\n", mc5_mce_desc[xec]);
        else if (xec <= 0xd)
@@ -642,6 +673,10 @@ static void decode_mc6_mce(struct mce *m)
        pr_emerg(HW_ERR "MC6 Error: ");
 
        switch (xec) {
+       case 0x0:
+               pr_cont("Hardware Assertion");
+               break;
+
        case 0x1:
                pr_cont("Free List");
                break;
@@ -857,7 +892,8 @@ static int __init mce_amd_init(void)
                break;
 
        case 0x15:
-               xec_mask = 0x1f;
+               xec_mask = c->x86_model == 0x60 ? 0x3f : 0x1f;
+
                fam_ops->mc0_mce = f15h_mc0_mce;
                fam_ops->mc1_mce = f15h_mc1_mce;
                fam_ops->mc2_mce = f15h_mc2_mce;
index 4891b45..e644b52 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/pci.h>
 #include <linux/pci_ids.h>
 #include <linux/edac.h>
+
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
 #include "edac_core.h"
 
 #define X38_REVISION           "1.1"
@@ -161,11 +163,6 @@ static void x38_clear_error_info(struct mem_ctl_info *mci)
                         X38_ERRSTS_BITS);
 }
 
-static u64 x38_readq(const void __iomem *addr)
-{
-       return readl(addr) | (((u64)readl(addr + 4)) << 32);
-}
-
 static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
                                 struct x38_error_info *info)
 {
@@ -183,9 +180,9 @@ static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
        if (!(info->errsts & X38_ERRSTS_BITS))
                return;
 
-       info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
+       info->eccerrlog[0] = lo_hi_readq(window + X38_C0ECCERRLOG);
        if (x38_channel_num == 2)
-               info->eccerrlog[1] = x38_readq(window + X38_C1ECCERRLOG);
+               info->eccerrlog[1] = lo_hi_readq(window + X38_C1ECCERRLOG);
 
        pci_read_config_word(pdev, X38_ERRSTS, &info->errsts2);
 
@@ -196,10 +193,10 @@ static void x38_get_and_clear_error_info(struct mem_ctl_info *mci,
         * should be UE info.
         */
        if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
-               info->eccerrlog[0] = x38_readq(window + X38_C0ECCERRLOG);
+               info->eccerrlog[0] = lo_hi_readq(window + X38_C0ECCERRLOG);
                if (x38_channel_num == 2)
                        info->eccerrlog[1] =
-                               x38_readq(window + X38_C1ECCERRLOG);
+                               lo_hi_readq(window + X38_C1ECCERRLOG);
        }
 
        x38_clear_error_info(mci);
index 4199849..145974f 100644 (file)
@@ -1,4 +1,5 @@
 menu "IEEE 1394 (FireWire) support"
+       depends on HAS_DMA
        depends on PCI || COMPILE_TEST
        # firewire-core does not depend on PCI but is
        # not useful without PCI controller driver
index 5798541..a66a321 100644 (file)
@@ -336,10 +336,10 @@ static const struct {
                QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
 
        {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
-               QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
+               QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
-               0},
+               QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
                QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
index 4b9dc83..e992abc 100644 (file)
@@ -40,7 +40,7 @@ struct pstore_read_data {
 static inline u64 generic_id(unsigned long timestamp,
                             unsigned int part, int count)
 {
-       return (timestamp * 100 + part) * 1000 + count;
+       return ((u64) timestamp * 100 + part) * 1000 + count;
 }
 
 static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
index cd36deb..dc79346 100644 (file)
@@ -346,6 +346,7 @@ static __initdata struct {
 
 struct param_info {
        int verbose;
+       int found;
        void *params;
 };
 
@@ -353,25 +354,21 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
                                       int depth, void *data)
 {
        struct param_info *info = data;
-       void *prop, *dest;
-       unsigned long len;
+       const void *prop;
+       void *dest;
        u64 val;
-       int i;
+       int i, len;
 
        if (depth != 1 ||
            (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
                return 0;
 
-       pr_info("Getting parameters from FDT:\n");
-
        for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
                prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len);
-               if (!prop) {
-                       pr_err("Can't find %s in device tree!\n",
-                              dt_params[i].name);
+               if (!prop)
                        return 0;
-               }
                dest = info->params + dt_params[i].offset;
+               info->found++;
 
                val = of_read_number(prop, len / sizeof(u32));
 
@@ -390,10 +387,21 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
 int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose)
 {
        struct param_info info;
+       int ret;
+
+       pr_info("Getting EFI parameters from FDT:\n");
 
        info.verbose = verbose;
+       info.found = 0;
        info.params = params;
 
-       return of_scan_flat_dt(fdt_find_uefi_params, &info);
+       ret = of_scan_flat_dt(fdt_find_uefi_params, &info);
+       if (!info.found)
+               pr_info("UEFI not found.\n");
+       else if (!ret)
+               pr_err("Can't find '%s' in device tree!\n",
+                      dt_params[info.found].name);
+
+       return ret;
 }
 #endif /* CONFIG_EFI_PARAMS_FROM_FDT */
index 5c6a8e8..507a3df 100644 (file)
@@ -23,16 +23,6 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
        u32 fdt_val32;
        u64 fdt_val64;
 
-       /*
-        * Copy definition of linux_banner here.  Since this code is
-        * built as part of the decompressor for ARM v7, pulling
-        * in version.c where linux_banner is defined for the
-        * kernel brings other kernel dependencies with it.
-        */
-       const char linux_banner[] =
-           "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
-           LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
-
        /* Do some checks on provided FDT, if it exists*/
        if (orig_fdt) {
                if (fdt_check_header(orig_fdt)) {
@@ -63,7 +53,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
         */
        prev = 0;
        for (;;) {
-               const char *type, *name;
+               const char *type;
                int len;
 
                node = fdt_next_node(fdt, prev, NULL);
index fe7c0e2..57adbc9 100644 (file)
@@ -900,8 +900,6 @@ static int mcp23s08_probe(struct spi_device *spi)
                        if (spi_present_mask & (1 << addr))
                                chips++;
                }
-               if (!chips)
-                       return -ENODEV;
        } else {
                type = spi_get_device_id(spi)->driver_data;
                pdata = dev_get_platdata(&spi->dev);
@@ -940,10 +938,6 @@ static int mcp23s08_probe(struct spi_device *spi)
                if (!(spi_present_mask & (1 << addr)))
                        continue;
                chips--;
-               if (chips < 0) {
-                       dev_err(&spi->dev, "FATAL: invalid negative chip id\n");
-                       goto fail;
-               }
                data->mcp[addr] = &data->chip[chips];
                status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
                                            0x40 | (addr << 1), type, base,
index 0c9f803..b6ae89e 100644 (file)
@@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
 
 static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
        .map    = gpio_rcar_irq_domain_map,
+       .xlate  = irq_domain_xlate_twocell,
 };
 
 struct gpio_rcar_info {
index 03711d0..8218078 100644 (file)
@@ -419,8 +419,9 @@ long drm_ioctl(struct file *filp,
                        retcode = -EFAULT;
                        goto err_i1;
                }
-       } else
+       } else if (cmd & IOC_OUT) {
                memset(kdata, 0, usize);
+       }
 
        if (ioctl->flags & DRM_UNLOCKED)
                retcode = func(dev, kdata, file_priv);
index 7c2497d..0dc57d5 100644 (file)
@@ -64,6 +64,7 @@
 void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
                uint32_t flags)
 {
+       memset(ctx, 0, sizeof(*ctx));
        ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
        INIT_LIST_HEAD(&ctx->locked);
 }
index 482127f..9e530f2 100644 (file)
@@ -40,7 +40,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
 {
        struct exynos_dpi *ctx = connector_to_dpi(connector);
 
-       if (!ctx->panel->connector)
+       if (ctx->panel && !ctx->panel->connector)
                drm_panel_attach(ctx->panel, &ctx->connector);
 
        return connector_status_connected;
index d91f277..ab7d182 100644 (file)
@@ -765,24 +765,24 @@ static int exynos_drm_init(void)
 
        return 0;
 
-err_unregister_pd:
-       platform_device_unregister(exynos_drm_pdev);
-
 err_remove_vidi:
 #ifdef CONFIG_DRM_EXYNOS_VIDI
        exynos_drm_remove_vidi();
+
+err_unregister_pd:
 #endif
+       platform_device_unregister(exynos_drm_pdev);
 
        return ret;
 }
 
 static void exynos_drm_exit(void)
 {
+       platform_driver_unregister(&exynos_drm_platform_driver);
 #ifdef CONFIG_DRM_EXYNOS_VIDI
        exynos_drm_remove_vidi();
 #endif
        platform_device_unregister(exynos_drm_pdev);
-       platform_driver_unregister(&exynos_drm_platform_driver);
 }
 
 module_init(exynos_drm_init);
index 36535f3..06cde45 100644 (file)
@@ -343,7 +343,7 @@ struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
 int exynos_dpi_remove(struct device *dev);
 #else
 static inline struct exynos_drm_display *
-exynos_dpi_probe(struct device *dev) { return 0; }
+exynos_dpi_probe(struct device *dev) { return NULL; }
 static inline int exynos_dpi_remove(struct device *dev) { return 0; }
 #endif
 
index bb45ab2..33161ad 100644 (file)
@@ -741,6 +741,8 @@ static void fimd_apply(struct exynos_drm_manager *mgr)
                win_data = &ctx->win_data[i];
                if (win_data->enabled)
                        fimd_win_commit(mgr, i);
+               else
+                       fimd_win_disable(mgr, i);
        }
 
        fimd_commit(mgr);
index c104d0c..aa259b0 100644 (file)
@@ -2090,6 +2090,11 @@ out:
 
 static void hdmi_dpms(struct exynos_drm_display *display, int mode)
 {
+       struct hdmi_context *hdata = display->ctx;
+       struct drm_encoder *encoder = hdata->encoder;
+       struct drm_crtc *crtc = encoder->crtc;
+       struct drm_crtc_helper_funcs *funcs = NULL;
+
        DRM_DEBUG_KMS("mode %d\n", mode);
 
        switch (mode) {
@@ -2099,6 +2104,20 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
+               /*
+                * The SFRs of VP and Mixer are updated by Vertical Sync of
+                * Timing generator which is a part of HDMI so the sequence
+                * to disable TV Subsystem should be as following,
+                *      VP -> Mixer -> HDMI
+                *
+                * Below codes will try to disable Mixer and VP(if used)
+                * prior to disabling HDMI.
+                */
+               if (crtc)
+                       funcs = crtc->helper_private;
+               if (funcs && funcs->dpms)
+                       (*funcs->dpms)(crtc, mode);
+
                hdmi_poweroff(display);
                break;
        default:
index 4c5aed7..7529946 100644 (file)
@@ -377,6 +377,20 @@ static void mixer_run(struct mixer_context *ctx)
        mixer_regs_dump(ctx);
 }
 
+static void mixer_stop(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       int timeout = 20;
+
+       mixer_reg_writemask(res, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
+
+       while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
+                       --timeout)
+               usleep_range(10000, 12000);
+
+       mixer_regs_dump(ctx);
+}
+
 static void vp_video_buffer(struct mixer_context *ctx, int win)
 {
        struct mixer_resources *res = &ctx->mixer_res;
@@ -497,13 +511,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
 static void mixer_layer_update(struct mixer_context *ctx)
 {
        struct mixer_resources *res = &ctx->mixer_res;
-       u32 val;
-
-       val = mixer_reg_read(res, MXR_CFG);
 
-       /* allow one update per vsync only */
-       if (!(val & MXR_CFG_LAYER_UPDATE_COUNT_MASK))
-               mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
+       mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
 }
 
 static void mixer_graph_buffer(struct mixer_context *ctx, int win)
@@ -1010,6 +1019,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
        }
        mutex_unlock(&mixer_ctx->mixer_mutex);
 
+       drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+
        atomic_set(&mixer_ctx->wait_vsync_event, 1);
 
        /*
@@ -1020,6 +1031,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
                                !atomic_read(&mixer_ctx->wait_vsync_event),
                                HZ/20))
                DRM_DEBUG_KMS("vblank wait timed out.\n");
+
+       drm_vblank_put(mgr->crtc->dev, mixer_ctx->pipe);
 }
 
 static void mixer_window_suspend(struct exynos_drm_manager *mgr)
@@ -1061,7 +1074,7 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
                mutex_unlock(&ctx->mixer_mutex);
                return;
        }
-       ctx->powered = true;
+
        mutex_unlock(&ctx->mixer_mutex);
 
        pm_runtime_get_sync(ctx->dev);
@@ -1072,6 +1085,12 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
                clk_prepare_enable(res->sclk_mixer);
        }
 
+       mutex_lock(&ctx->mixer_mutex);
+       ctx->powered = true;
+       mutex_unlock(&ctx->mixer_mutex);
+
+       mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
+
        mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
        mixer_win_reset(ctx);
 
@@ -1084,14 +1103,21 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
        struct mixer_resources *res = &ctx->mixer_res;
 
        mutex_lock(&ctx->mixer_mutex);
-       if (!ctx->powered)
-               goto out;
+       if (!ctx->powered) {
+               mutex_unlock(&ctx->mixer_mutex);
+               return;
+       }
        mutex_unlock(&ctx->mixer_mutex);
 
+       mixer_stop(ctx);
        mixer_window_suspend(mgr);
 
        ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
 
+       mutex_lock(&ctx->mixer_mutex);
+       ctx->powered = false;
+       mutex_unlock(&ctx->mixer_mutex);
+
        clk_disable_unprepare(res->mixer);
        if (ctx->vp_enabled) {
                clk_disable_unprepare(res->vp);
@@ -1099,12 +1125,6 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
        }
 
        pm_runtime_put_sync(ctx->dev);
-
-       mutex_lock(&ctx->mixer_mutex);
-       ctx->powered = false;
-
-out:
-       mutex_unlock(&ctx->mixer_mutex);
 }
 
 static void mixer_dpms(struct exynos_drm_manager *mgr, int mode)
index 4537026..5f32e1a 100644 (file)
@@ -78,6 +78,7 @@
 #define MXR_STATUS_BIG_ENDIAN          (1 << 3)
 #define MXR_STATUS_ENDIAN_MASK         (1 << 3)
 #define MXR_STATUS_SYNC_ENABLE         (1 << 2)
+#define MXR_STATUS_REG_IDLE            (1 << 1)
 #define MXR_STATUS_REG_RUN             (1 << 0)
 
 /* bits for MXR_CFG */
index 240c331..ac357b0 100644 (file)
@@ -810,6 +810,12 @@ static int
 tda998x_encoder_mode_valid(struct drm_encoder *encoder,
                          struct drm_display_mode *mode)
 {
+       if (mode->clock > 150000)
+               return MODE_CLOCK_HIGH;
+       if (mode->htotal >= BIT(13))
+               return MODE_BAD_HVALUE;
+       if (mode->vtotal >= BIT(11))
+               return MODE_BAD_VVALUE;
        return MODE_OK;
 }
 
@@ -1048,8 +1054,8 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
                        return i;
                }
        } else {
-               for (i = 10; i > 0; i--) {
-                       msleep(10);
+               for (i = 100; i > 0; i--) {
+                       msleep(1);
                        ret = reg_read(priv, REG_INT_FLAGS_2);
                        if (ret < 0)
                                return ret;
@@ -1183,7 +1189,6 @@ static void
 tda998x_encoder_destroy(struct drm_encoder *encoder)
 {
        struct tda998x_priv *priv = to_tda998x_priv(encoder);
-       drm_i2c_encoder_destroy(encoder);
 
        /* disable all IRQs and free the IRQ handler */
        cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
@@ -1193,6 +1198,7 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
 
        if (priv->cec)
                i2c_unregister_device(priv->cec);
+       drm_i2c_encoder_destroy(encoder);
        kfree(priv);
 }
 
index 601caa8..b8c6892 100644 (file)
@@ -446,7 +446,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 
                memset(&stats, 0, sizeof(stats));
                stats.file_priv = file->driver_priv;
+               spin_lock(&file->table_lock);
                idr_for_each(&file->object_idr, per_file_stats, &stats);
+               spin_unlock(&file->table_lock);
                /*
                 * Although we have a valid reference on file->pid, that does
                 * not guarantee that the task_struct who called get_pid() is
index 4c22a5b..d443441 100644 (file)
@@ -36,6 +36,8 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include <linux/pci.h>
+#include <linux/console.h>
+#include <linux/vt.h>
 #include <linux/vgaarb.h>
 #include <linux/acpi.h>
 #include <linux/pnp.h>
@@ -1386,7 +1388,6 @@ cleanup_gem:
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
        WARN_ON(dev_priv->mm.aliasing_ppgtt);
-       drm_mm_takedown(&dev_priv->gtt.base.mm);
 cleanup_irq:
        drm_irq_uninstall(dev);
 cleanup_gem_stolen:
@@ -1450,6 +1451,39 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 }
 #endif
 
+#if !defined(CONFIG_VGA_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+       return 0;
+}
+#elif !defined(CONFIG_DUMMY_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+       return -ENODEV;
+}
+#else
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+       int ret = 0;
+
+       DRM_INFO("Replacing VGA console driver\n");
+
+       console_lock();
+       if (con_is_bound(&vga_con))
+               ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
+       if (ret == 0) {
+               ret = do_unregister_con_driver(&vga_con);
+
+               /* Ignore "already unregistered". */
+               if (ret == -ENODEV)
+                       ret = 0;
+       }
+       console_unlock();
+
+       return ret;
+}
+#endif
+
 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 {
        const struct intel_device_info *info = &dev_priv->info;
@@ -1623,8 +1657,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (ret)
                goto out_regs;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               ret = i915_kick_out_vgacon(dev_priv);
+               if (ret) {
+                       DRM_ERROR("failed to remove conflicting VGA console\n");
+                       goto out_gtt;
+               }
+
                i915_kick_out_firmware_fb(dev_priv);
+       }
 
        pci_set_master(dev->pdev);
 
@@ -1756,8 +1797,6 @@ out_mtrrfree:
        arch_phys_wc_del(dev_priv->gtt.mtrr);
        io_mapping_free(dev_priv->gtt.mappable);
 out_gtt:
-       list_del(&dev_priv->gtt.base.global_link);
-       drm_mm_takedown(&dev_priv->gtt.base.mm);
        dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 out_regs:
        intel_uncore_fini(dev);
@@ -1846,7 +1885,6 @@ int i915_driver_unload(struct drm_device *dev)
                        i915_free_hws(dev);
        }
 
-       list_del(&dev_priv->gtt.base.global_link);
        WARN_ON(!list_empty(&dev_priv->vm_list));
 
        drm_vblank_cleanup(dev);
index 49414d3..374f964 100644 (file)
@@ -656,6 +656,7 @@ enum intel_sbi_destination {
 #define QUIRK_PIPEA_FORCE (1<<0)
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_BACKLIGHT_PRESENT (1<<3)
 
 struct intel_fbdev;
 struct intel_fbc_work;
@@ -977,6 +978,8 @@ struct i915_power_well {
        bool always_on;
        /* power well enable/disable usage count */
        int count;
+       /* cached hw enabled state */
+       bool hw_enabled;
        unsigned long domains;
        unsigned long data;
        const struct i915_power_well_ops *ops;
index f361263..d893e4d 100644 (file)
@@ -1616,22 +1616,6 @@ out:
        return ret;
 }
 
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
-{
-       struct i915_vma *vma;
-
-       /*
-        * Only the global gtt is relevant for gtt memory mappings, so restrict
-        * list traversal to objects bound into the global address space. Note
-        * that the active list should be empty, but better safe than sorry.
-        */
-       WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
-       list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
-               i915_gem_release_mmap(vma->obj);
-       list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
-               i915_gem_release_mmap(vma->obj);
-}
-
 /**
  * i915_gem_release_mmap - remove physical page mappings
  * @obj: obj in question
@@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        obj->fault_mappable = false;
 }
 
+void
+i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+       struct drm_i915_gem_object *obj;
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+               i915_gem_release_mmap(obj);
+}
+
 uint32_t
 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
 {
index 3ffe308..a5ddf3b 100644 (file)
@@ -598,6 +598,7 @@ static int do_switch(struct intel_engine_cs *ring,
        struct intel_context *from = ring->last_context;
        struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
        u32 hw_flags = 0;
+       bool uninitialized = false;
        int ret, i;
 
        if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -696,19 +697,20 @@ static int do_switch(struct intel_engine_cs *ring,
                i915_gem_context_unreference(from);
        }
 
+       uninitialized = !to->is_initialized && from == NULL;
+       to->is_initialized = true;
+
 done:
        i915_gem_context_reference(to);
        ring->last_context = to;
        to->last_ring = ring;
 
-       if (ring->id == RCS && !to->is_initialized && from == NULL) {
+       if (uninitialized) {
                ret = i915_gem_render_state_init(ring);
                if (ret)
                        DRM_ERROR("init render state: %d\n", ret);
        }
 
-       to->is_initialized = true;
-
        return 0;
 
 unpin_out:
index eec820a..8b3cde7 100644 (file)
@@ -1992,7 +1992,10 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
 
        struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
 
-       drm_mm_takedown(&vm->mm);
+       if (drm_mm_initialized(&vm->mm)) {
+               drm_mm_takedown(&vm->mm);
+               list_del(&vm->global_link);
+       }
        iounmap(gtt->gsm);
        teardown_scratch_page(vm->dev);
 }
@@ -2025,6 +2028,10 @@ static int i915_gmch_probe(struct drm_device *dev,
 
 static void i915_gmch_remove(struct i915_address_space *vm)
 {
+       if (drm_mm_initialized(&vm->mm)) {
+               drm_mm_takedown(&vm->mm);
+               list_del(&vm->global_link);
+       }
        intel_gmch_remove();
 }
 
index 3521f99..34894b5 100644 (file)
@@ -31,7 +31,7 @@
 struct i915_render_state {
        struct drm_i915_gem_object *obj;
        unsigned long ggtt_offset;
-       void *batch;
+       u32 *batch;
        u32 size;
        u32 len;
 };
@@ -80,7 +80,7 @@ free:
 
 static void render_state_free(struct i915_render_state *so)
 {
-       kunmap(so->batch);
+       kunmap(kmap_to_page(so->batch));
        i915_gem_object_ggtt_unpin(so->obj);
        drm_gem_object_unreference(&so->obj->base);
        kfree(so);
index 62ef55b..7465ab0 100644 (file)
@@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
        if (base == 0)
                return 0;
 
+       /* make sure we don't clobber the GTT if it's within stolen memory */
+       if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
+               struct {
+                       u32 start, end;
+               } stolen[2] = {
+                       { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+                       { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+               };
+               u64 gtt_start, gtt_end;
+
+               gtt_start = I915_READ(PGTBL_CTL);
+               if (IS_GEN4(dev))
+                       gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
+                               (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
+               else
+                       gtt_start &= PGTBL_ADDRESS_LO_MASK;
+               gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
+
+               if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
+                       stolen[0].end = gtt_start;
+               if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
+                       stolen[1].start = gtt_end;
+
+               /* pick the larger of the two chunks */
+               if (stolen[0].end - stolen[0].start >
+                   stolen[1].end - stolen[1].start) {
+                       base = stolen[0].start;
+                       dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
+               } else {
+                       base = stolen[1].start;
+                       dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
+               }
+
+               if (stolen[0].start != stolen[1].start ||
+                   stolen[0].end != stolen[1].end) {
+                       DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
+                                     (unsigned long long) gtt_start,
+                                     (unsigned long long) gtt_end - 1);
+                       DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
+                                     base, base + (u32) dev_priv->gtt.stolen_size - 1);
+               }
+       }
+
+
        /* Verify that nothing else uses this physical address. Stolen
         * memory should be reserved by the BIOS and hidden from the
         * kernel. So if the region is already marked as busy, something
index 87ec60e..66cf417 100644 (file)
@@ -888,6 +888,8 @@ static void i915_gem_record_rings(struct drm_device *dev,
        for (i = 0; i < I915_NUM_RINGS; i++) {
                struct intel_engine_cs *ring = &dev_priv->ring[i];
 
+               error->ring[i].pid = -1;
+
                if (ring->dev == NULL)
                        continue;
 
@@ -895,7 +897,6 @@ static void i915_gem_record_rings(struct drm_device *dev,
 
                i915_record_ring_state(dev, ring, &error->ring[i]);
 
-               error->ring[i].pid = -1;
                request = i915_gem_find_active_request(ring);
                if (request) {
                        /* We need to copy these to an anonymous buffer
index 6f8017a..c05c84f 100644 (file)
@@ -2845,20 +2845,27 @@ static int semaphore_passed(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct intel_engine_cs *signaller;
-       u32 seqno, ctl;
+       u32 seqno;
 
-       ring->hangcheck.deadlock = true;
+       ring->hangcheck.deadlock++;
 
        signaller = semaphore_waits_for(ring, &seqno);
-       if (signaller == NULL || signaller->hangcheck.deadlock)
+       if (signaller == NULL)
                return -1;
 
+       /* Prevent pathological recursion due to driver bugs */
+       if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
+               return -1;
+
+       if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
+               return 1;
+
        /* cursory check for an unkickable deadlock */
-       ctl = I915_READ_CTL(signaller);
-       if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
+       if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+           semaphore_passed(signaller) < 0)
                return -1;
 
-       return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
+       return 0;
 }
 
 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
@@ -2867,7 +2874,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
        int i;
 
        for_each_ring(ring, dev_priv, i)
-               ring->hangcheck.deadlock = false;
+               ring->hangcheck.deadlock = 0;
 }
 
 static enum intel_ring_hangcheck_action
index e691b30..a5bab61 100644 (file)
@@ -942,6 +942,9 @@ enum punit_power_well {
 /*
  * Instruction and interrupt control regs
  */
+#define PGTBL_CTL      0x02020
+#define   PGTBL_ADDRESS_LO_MASK        0xfffff000 /* bits [31:12] */
+#define   PGTBL_ADDRESS_HI_MASK        0x000000f0 /* bits [35:32] (gen4) */
 #define PGTBL_ER       0x02024
 #define RENDER_RING_BASE       0x02000
 #define BSD_RING_BASE          0x04000
index 1ee98f1..827498e 100644 (file)
@@ -315,9 +315,6 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
        const struct bdb_lfp_backlight_data *backlight_data;
        const struct bdb_lfp_backlight_data_entry *entry;
 
-       /* Err to enabling backlight if no backlight block. */
-       dev_priv->vbt.backlight.present = true;
-
        backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
        if (!backlight_data)
                return;
@@ -1088,6 +1085,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
 
        dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
 
+       /* Default to having backlight */
+       dev_priv->vbt.backlight.present = true;
+
        /* LFP panel data */
        dev_priv->vbt.lvds_dither = 1;
        dev_priv->vbt.lvds_vbt = 0;
index efd3cf5..f0be855 100644 (file)
@@ -2087,6 +2087,7 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
 static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
                                          enum plane plane, enum pipe pipe)
 {
+       struct drm_device *dev = dev_priv->dev;
        struct intel_crtc *intel_crtc =
                to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
        int reg;
@@ -2106,6 +2107,14 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
 
        I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
        intel_flush_primary_plane(dev_priv, plane);
+
+       /*
+        * BDW signals flip done immediately if the plane
+        * is disabled, even if the plane enable is already
+        * armed to occur at the next vblank :(
+        */
+       if (IS_BROADWELL(dev))
+               intel_wait_for_vblank(dev, intel_crtc->pipe);
 }
 
 /**
@@ -4564,7 +4573,10 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        if (intel_crtc->active)
                return;
 
-       vlv_prepare_pll(intel_crtc);
+       is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+
+       if (!is_dsi && !IS_CHERRYVIEW(dev))
+               vlv_prepare_pll(intel_crtc);
 
        /* Set up the display plane register */
        dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -4598,8 +4610,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
                if (encoder->pre_pll_enable)
                        encoder->pre_pll_enable(encoder);
 
-       is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
-
        if (!is_dsi) {
                if (IS_CHERRYVIEW(dev))
                        chv_enable_pll(intel_crtc);
@@ -11087,6 +11097,22 @@ const char *intel_output_name(int output)
        return names[output];
 }
 
+static bool intel_crt_present(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (IS_ULT(dev))
+               return false;
+
+       if (IS_CHERRYVIEW(dev))
+               return false;
+
+       if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
+               return false;
+
+       return true;
+}
+
 static void intel_setup_outputs(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11095,7 +11121,7 @@ static void intel_setup_outputs(struct drm_device *dev)
 
        intel_lvds_init(dev);
 
-       if (!IS_ULT(dev) && !IS_CHERRYVIEW(dev) && dev_priv->vbt.int_crt_support)
+       if (intel_crt_present(dev))
                intel_crt_init(dev);
 
        if (HAS_DDI(dev)) {
@@ -11565,6 +11591,14 @@ static void quirk_invert_brightness(struct drm_device *dev)
        DRM_INFO("applying inverted panel brightness quirk\n");
 }
 
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
+       DRM_INFO("applying backlight present quirk\n");
+}
+
 struct intel_quirk {
        int device;
        int subsystem_vendor;
@@ -11633,6 +11667,15 @@ static struct intel_quirk intel_quirks[] = {
 
        /* Acer Aspire 5336 */
        { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+       /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+       { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+       /* Toshiba CB35 Chromebook (Celeron 2955U) */
+       { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+       /* HP Chromebook 14 (Celeron 2955U) */
+       { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
@@ -11871,6 +11914,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                 * ...  */
                plane = crtc->plane;
                crtc->plane = !plane;
+               crtc->primary_enabled = true;
                dev_priv->display.crtc_disable(&crtc->base);
                crtc->plane = plane;
 
@@ -12411,8 +12455,8 @@ intel_display_capture_error_state(struct drm_device *dev)
 
        for_each_pipe(i) {
                error->pipe[i].power_domain_on =
-                       intel_display_power_enabled_sw(dev_priv,
-                                                      POWER_DOMAIN_PIPE(i));
+                       intel_display_power_enabled_unlocked(dev_priv,
+                                                          POWER_DOMAIN_PIPE(i));
                if (!error->pipe[i].power_domain_on)
                        continue;
 
@@ -12447,7 +12491,7 @@ intel_display_capture_error_state(struct drm_device *dev)
                enum transcoder cpu_transcoder = transcoders[i];
 
                error->transcoder[i].power_domain_on =
-                       intel_display_power_enabled_sw(dev_priv,
+                       intel_display_power_enabled_unlocked(dev_priv,
                                POWER_DOMAIN_TRANSCODER(cpu_transcoder));
                if (!error->transcoder[i].power_domain_on)
                        continue;
index 52fda95..8a1a4fb 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
@@ -336,6 +338,37 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
                return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
 }
 
+/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
+   This function only applicable when panel PM state is not to be tracked */
+static int edp_notify_handler(struct notifier_block *this, unsigned long code,
+                             void *unused)
+{
+       struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
+                                                edp_notifier);
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 pp_div;
+       u32 pp_ctrl_reg, pp_div_reg;
+       enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+       if (!is_edp(intel_dp) || code != SYS_RESTART)
+               return 0;
+
+       if (IS_VALLEYVIEW(dev)) {
+               pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+               pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
+               pp_div = I915_READ(pp_div_reg);
+               pp_div &= PP_REFERENCE_DIVIDER_MASK;
+
+               /* 0x1F write to PP_DIV_REG sets max cycle delay */
+               I915_WRITE(pp_div_reg, pp_div | 0x1F);
+               I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
+               msleep(intel_dp->panel_power_cycle_delay);
+       }
+
+       return 0;
+}
+
 static bool edp_have_panel_power(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -873,8 +906,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
                                                   bpp);
 
-               for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
-                       for (clock = min_clock; clock <= max_clock; clock++) {
+               for (clock = min_clock; clock <= max_clock; clock++) {
+                       for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
                                link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
                                link_avail = intel_dp_max_data_rate(link_clock,
                                                                    lane_count);
@@ -3707,6 +3740,10 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
                drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
                edp_panel_vdd_off_sync(intel_dp);
                drm_modeset_unlock(&dev->mode_config.connection_mutex);
+               if (intel_dp->edp_notifier.notifier_call) {
+                       unregister_reboot_notifier(&intel_dp->edp_notifier);
+                       intel_dp->edp_notifier.notifier_call = NULL;
+               }
        }
        kfree(intel_dig_port);
 }
@@ -4184,6 +4221,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        }
        mutex_unlock(&dev->mode_config.mutex);
 
+       if (IS_VALLEYVIEW(dev)) {
+               intel_dp->edp_notifier.notifier_call = edp_notify_handler;
+               register_reboot_notifier(&intel_dp->edp_notifier);
+       }
+
        intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
        intel_panel_setup_backlight(connector);
 
index bda0ae3..f67340e 100644 (file)
@@ -538,6 +538,8 @@ struct intel_dp {
        unsigned long last_power_on;
        unsigned long last_backlight_off;
        bool psr_setup_done;
+       struct notifier_block edp_notifier;
+
        bool use_tps3;
        struct intel_connector *attached_connector;
 
@@ -950,8 +952,8 @@ int intel_power_domains_init(struct drm_i915_private *);
 void intel_power_domains_remove(struct drm_i915_private *);
 bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
                                 enum intel_display_power_domain domain);
-bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
-                                   enum intel_display_power_domain domain);
+bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
+                                         enum intel_display_power_domain domain);
 void intel_display_power_get(struct drm_i915_private *dev_priv,
                             enum intel_display_power_domain domain);
 void intel_display_power_put(struct drm_i915_private *dev_priv,
index 02f99d7..3fd0829 100644 (file)
@@ -117,17 +117,18 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
        /* bandgap reset is needed after everytime we do power gate */
        band_gap_reset(dev_priv);
 
+       I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+       usleep_range(2500, 3000);
+
        val = I915_READ(MIPI_PORT_CTRL(pipe));
        I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
        usleep_range(1000, 1500);
-       I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
-       usleep_range(2000, 2500);
-       I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
-       usleep_range(2000, 2500);
-       I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
-       usleep_range(2000, 2500);
+
+       I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+       usleep_range(2500, 3000);
+
        I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
-       usleep_range(2000, 2500);
+       usleep_range(2500, 3000);
 }
 
 static void intel_dsi_enable(struct intel_encoder *encoder)
@@ -271,23 +272,23 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
 
        DRM_DEBUG_KMS("\n");
 
-       I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+       I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
        usleep_range(2000, 2500);
 
-       I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+       I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
        usleep_range(2000, 2500);
 
-       I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+       I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
        usleep_range(2000, 2500);
 
-       val = I915_READ(MIPI_PORT_CTRL(pipe));
-       I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
-       usleep_range(1000, 1500);
-
        if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
                                        == 0x00000), 30))
                DRM_ERROR("DSI LP not going Low\n");
 
+       val = I915_READ(MIPI_PORT_CTRL(pipe));
+       I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
+       usleep_range(1000, 1500);
+
        I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
        usleep_range(2000, 2500);
 
index 3eeb21b..933c863 100644 (file)
@@ -404,12 +404,6 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
        else
                cmd |= DPI_LP_MODE;
 
-       /* DPI virtual channel?! */
-
-       mask = DPI_FIFO_EMPTY;
-       if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
-               DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
-
        /* clear bit */
        I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
 
index 2312602..5e5a72f 100644 (file)
@@ -111,6 +111,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
 
        pipe_config->adjusted_mode.flags |= flags;
 
+       /* gen2/3 store dither state in pfit control, needs to match */
+       if (INTEL_INFO(dev)->gen < 4) {
+               tmp = I915_READ(PFIT_CONTROL);
+
+               pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+       }
+
        dotclock = pipe_config->port_clock;
 
        if (HAS_PCH_SPLIT(dev_priv->dev))
index 2e2c71f..4f6b539 100644 (file)
@@ -403,6 +403,15 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 
        DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
+       /*
+        * If the acpi_video interface is not supposed to be used, don't
+        * bother processing backlight level change requests from firmware.
+        */
+       if (!acpi_video_verify_backlight_support()) {
+               DRM_DEBUG_KMS("opregion backlight request ignored\n");
+               return 0;
+       }
+
        if (!(bclp & ASLE_BCLP_VALID))
                return ASLC_BACKLIGHT_FAILED;
 
index 5e6c888..12b02fe 100644 (file)
@@ -361,16 +361,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
                pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
                                 PFIT_FILTER_FUZZY);
 
-       /* Make sure pre-965 set dither correctly for 18bpp panels. */
-       if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
-               pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-
 out:
        if ((pfit_control & PFIT_ENABLE) == 0) {
                pfit_control = 0;
                pfit_pgm_ratios = 0;
        }
 
+       /* Make sure pre-965 set dither correctly for 18bpp panels. */
+       if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+               pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
        pipe_config->gmch_pfit.control = pfit_control;
        pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
        pipe_config->gmch_pfit.lvds_border_bits = border;
@@ -798,9 +798,6 @@ static void i965_enable_backlight(struct intel_connector *connector)
        ctl = freq << 16;
        I915_WRITE(BLC_PWM_CTL, ctl);
 
-       /* XXX: combine this into above write? */
-       intel_panel_actually_set_backlight(connector, panel->backlight.level);
-
        ctl2 = BLM_PIPE(pipe);
        if (panel->backlight.combination_mode)
                ctl2 |= BLM_COMBINATION_MODE;
@@ -809,6 +806,8 @@ static void i965_enable_backlight(struct intel_connector *connector)
        I915_WRITE(BLC_PWM_CTL2, ctl2);
        POSTING_READ(BLC_PWM_CTL2);
        I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+
+       intel_panel_actually_set_backlight(connector, panel->backlight.level);
 }
 
 static void vlv_enable_backlight(struct intel_connector *connector)
@@ -1119,8 +1118,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
        int ret;
 
        if (!dev_priv->vbt.backlight.present) {
-               DRM_DEBUG_KMS("native backlight control not available per VBT\n");
-               return 0;
+               if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+                       DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
+               } else {
+                       DRM_DEBUG_KMS("no backlight present per VBT\n");
+                       return 0;
+               }
        }
 
        /* set level and max in panel struct */
index d1e53ab..ee72807 100644 (file)
@@ -511,8 +511,7 @@ void intel_update_fbc(struct drm_device *dev)
        obj = intel_fb->obj;
        adjusted_mode = &intel_crtc->config.adjusted_mode;
 
-       if (i915.enable_fbc < 0 &&
-           INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
+       if (i915.enable_fbc < 0) {
                if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
                        DRM_DEBUG_KMS("disabled per chip default\n");
                goto out_disable;
@@ -3210,6 +3209,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
 */
 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
 {
+       struct drm_device *dev = dev_priv->dev;
+
+       /* Latest VLV doesn't need to force the gfx clock */
+       if (dev->pdev->revision >= 0xd) {
+               valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+               return;
+       }
+
        /*
         * When we are idle.  Drop to min voltage state.
         */
@@ -3506,15 +3513,11 @@ static void gen8_enable_rps(struct drm_device *dev)
 
        I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
-       /* WaDisablePwrmtrEvent:chv (pre-production hw) */
-       I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
-       I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
-
        /* 5: Enable RPS */
        I915_WRITE(GEN6_RP_CONTROL,
                   GEN6_RP_MEDIA_TURBO |
                   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-                  GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+                  GEN6_RP_MEDIA_IS_GFX |
                   GEN6_RP_ENABLE |
                   GEN6_RP_UP_BUSY_AVG |
                   GEN6_RP_DOWN_IDLE_AVG);
@@ -5608,8 +5611,8 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
                     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
 }
 
-bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
-                                   enum intel_display_power_domain domain)
+bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
+                                         enum intel_display_power_domain domain)
 {
        struct i915_power_domains *power_domains;
        struct i915_power_well *power_well;
@@ -5620,16 +5623,19 @@ bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
                return false;
 
        power_domains = &dev_priv->power_domains;
+
        is_enabled = true;
+
        for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
                if (power_well->always_on)
                        continue;
 
-               if (!power_well->count) {
+               if (!power_well->hw_enabled) {
                        is_enabled = false;
                        break;
                }
        }
+
        return is_enabled;
 }
 
@@ -5637,30 +5643,15 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
                                 enum intel_display_power_domain domain)
 {
        struct i915_power_domains *power_domains;
-       struct i915_power_well *power_well;
-       bool is_enabled;
-       int i;
-
-       if (dev_priv->pm.suspended)
-               return false;
+       bool ret;
 
        power_domains = &dev_priv->power_domains;
 
-       is_enabled = true;
-
        mutex_lock(&power_domains->lock);
-       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
-               if (power_well->always_on)
-                       continue;
-
-               if (!power_well->ops->is_enabled(dev_priv, power_well)) {
-                       is_enabled = false;
-                       break;
-               }
-       }
+       ret = intel_display_power_enabled_unlocked(dev_priv, domain);
        mutex_unlock(&power_domains->lock);
 
-       return is_enabled;
+       return ret;
 }
 
 /*
@@ -5981,6 +5972,7 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
                if (!power_well->count++) {
                        DRM_DEBUG_KMS("enabling %s\n", power_well->name);
                        power_well->ops->enable(dev_priv, power_well);
+                       power_well->hw_enabled = true;
                }
 
                check_power_well_state(dev_priv, power_well);
@@ -6010,6 +6002,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
 
                if (!--power_well->count && i915.disable_power_well) {
                        DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+                       power_well->hw_enabled = false;
                        power_well->ops->disable(dev_priv, power_well);
                }
 
@@ -6024,33 +6017,56 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
 static struct i915_power_domains *hsw_pwr;
 
 /* Display audio driver power well request */
-void i915_request_power_well(void)
+int i915_request_power_well(void)
 {
        struct drm_i915_private *dev_priv;
 
-       if (WARN_ON(!hsw_pwr))
-               return;
+       if (!hsw_pwr)
+               return -ENODEV;
 
        dev_priv = container_of(hsw_pwr, struct drm_i915_private,
                                power_domains);
        intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(i915_request_power_well);
 
 /* Display audio driver power well release */
-void i915_release_power_well(void)
+int i915_release_power_well(void)
 {
        struct drm_i915_private *dev_priv;
 
-       if (WARN_ON(!hsw_pwr))
-               return;
+       if (!hsw_pwr)
+               return -ENODEV;
 
        dev_priv = container_of(hsw_pwr, struct drm_i915_private,
                                power_domains);
        intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(i915_release_power_well);
 
+/*
+ * Private interface for the audio driver to get CDCLK in kHz.
+ *
+ * Caller must request power well using i915_request_power_well() prior to
+ * making the call.
+ */
+int i915_get_cdclk_freq(void)
+{
+       struct drm_i915_private *dev_priv;
+
+       if (!hsw_pwr)
+               return -ENODEV;
+
+       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+                               power_domains);
+
+       return intel_ddi_get_cdclk_freq(dev_priv);
+}
+EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
+
+
 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
 
 #define HSW_ALWAYS_ON_POWER_DOMAINS (                  \
@@ -6270,8 +6286,11 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
        int i;
 
        mutex_lock(&power_domains->lock);
-       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
+       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
                power_well->ops->sync_hw(dev_priv, power_well);
+               power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
+                                                                    power_well);
+       }
        mutex_unlock(&power_domains->lock);
 }
 
index 910c83c..e72017b 100644 (file)
@@ -55,7 +55,7 @@ struct intel_ring_hangcheck {
        u32 seqno;
        int score;
        enum intel_ring_hangcheck_action action;
-       bool deadlock;
+       int deadlock;
 };
 
 struct intel_ringbuffer {
index 6a4d5bc..20375cc 100644 (file)
@@ -1385,7 +1385,9 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
                         >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
        }
 
-       dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+       dotclock = pipe_config->port_clock;
+       if (pipe_config->pixel_multiplier)
+               dotclock /= pipe_config->pixel_multiplier;
 
        if (HAS_PCH_SPLIT(dev))
                ironlake_check_encoder_dotclock(pipe_config, dotclock);
index 1b66ddc..9a17b4e 100644 (file)
@@ -690,6 +690,14 @@ intel_post_enable_primary(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
+       /*
+        * BDW signals flip done immediately if the plane
+        * is disabled, even if the plane enable is already
+        * armed to occur at the next vblank :(
+        */
+       if (IS_BROADWELL(dev))
+               intel_wait_for_vblank(dev, intel_crtc->pipe);
+
        /*
         * FIXME IPS should be fine as long as one plane is
         * enabled, but in practice it seems to have problems
index 79cba59..4f6fef7 100644 (file)
@@ -320,7 +320,8 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
 
-       del_timer_sync(&dev_priv->uncore.force_wake_timer);
+       if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
+               gen6_force_wake_timer((unsigned long)dev_priv);
 
        /* Hold uncore.lock across reset to prevent any register access
         * with forcewake not set correctly
index ae750f6..7f7aade 100644 (file)
@@ -277,6 +277,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
        static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
        static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
        static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+       static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
        static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
 
        config.phy_init      = hdmi_phy_8x74_init;
@@ -286,6 +287,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
        config.pwr_reg_names = pwr_reg_names;
        config.pwr_reg_cnt   = ARRAY_SIZE(pwr_reg_names);
        config.hpd_clk_names = hpd_clk_names;
+       config.hpd_freq      = hpd_clk_freq;
        config.hpd_clk_cnt   = ARRAY_SIZE(hpd_clk_names);
        config.pwr_clk_names = pwr_clk_names;
        config.pwr_clk_cnt   = ARRAY_SIZE(pwr_clk_names);
index 9fafee6..9d7723c 100644 (file)
@@ -87,6 +87,7 @@ struct hdmi_platform_config {
 
        /* clks that need to be on for hpd: */
        const char **hpd_clk_names;
+       const long unsigned *hpd_freq;
        int hpd_clk_cnt;
 
        /* clks that need to be on for screen pwr (ie pixel clk): */
index e56a619..28f7e3e 100644 (file)
@@ -127,6 +127,14 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
        }
 
        for (i = 0; i < config->hpd_clk_cnt; i++) {
+               if (config->hpd_freq && config->hpd_freq[i]) {
+                       ret = clk_set_rate(hdmi->hpd_clks[i],
+                                       config->hpd_freq[i]);
+                       if (ret)
+                               dev_warn(dev->dev, "failed to set clk %s (%d)\n",
+                                               config->hpd_clk_names[i], ret);
+               }
+
                ret = clk_prepare_enable(hdmi->hpd_clks[i]);
                if (ret) {
                        dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n",
index 42caf7f..71510ee 100644 (file)
 #include "msm_mmu.h"
 #include "mdp5_kms.h"
 
+static const char *iommu_ports[] = {
+               "mdp_0",
+};
+
 static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
 
 static int mdp5_hw_init(struct msm_kms *kms)
@@ -104,6 +108,12 @@ static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
 static void mdp5_destroy(struct msm_kms *kms)
 {
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+       struct msm_mmu *mmu = mdp5_kms->mmu;
+
+       if (mmu) {
+               mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
+               mmu->funcs->destroy(mmu);
+       }
        kfree(mdp5_kms);
 }
 
@@ -216,10 +226,6 @@ fail:
        return ret;
 }
 
-static const char *iommu_ports[] = {
-               "mdp_0",
-};
-
 static int get_clk(struct platform_device *pdev, struct clk **clkp,
                const char *name)
 {
@@ -317,17 +323,23 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
                mmu = msm_iommu_new(dev, config->iommu);
                if (IS_ERR(mmu)) {
                        ret = PTR_ERR(mmu);
+                       dev_err(dev->dev, "failed to init iommu: %d\n", ret);
                        goto fail;
                }
+
                ret = mmu->funcs->attach(mmu, iommu_ports,
                                ARRAY_SIZE(iommu_ports));
-               if (ret)
+               if (ret) {
+                       dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
+                       mmu->funcs->destroy(mmu);
                        goto fail;
+               }
        } else {
                dev_info(dev->dev, "no iommu, fallback to phys "
                                "contig buffers for scanout\n");
                mmu = NULL;
        }
+       mdp5_kms->mmu = mmu;
 
        mdp5_kms->id = msm_register_mmu(dev, mmu);
        if (mdp5_kms->id < 0) {
index c8b1a25..6e981b6 100644 (file)
@@ -33,6 +33,7 @@ struct mdp5_kms {
 
        /* mapper-id used to request GEM buffer mapped for scanout: */
        int id;
+       struct msm_mmu *mmu;
 
        /* for tracking smp allocation amongst pipes: */
        mdp5_smp_state_t smp_state;
index 0d2562f..9a5d87d 100644 (file)
@@ -159,7 +159,7 @@ static int msm_unload(struct drm_device *dev)
 static int get_mdp_ver(struct platform_device *pdev)
 {
 #ifdef CONFIG_OF
-       const static struct of_device_id match_types[] = { {
+       static const struct of_device_id match_types[] = { {
                .compatible = "qcom,mdss_mdp",
                .data   = (void *)5,
        }, {
index a752ab8..5107fc4 100644 (file)
@@ -59,7 +59,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
        struct drm_framebuffer *fb = NULL;
        struct fb_info *fbi = NULL;
        struct drm_mode_fb_cmd2 mode_cmd = {0};
-       dma_addr_t paddr;
+       uint32_t paddr;
        int ret, size;
 
        sizes->surface_bpp = 32;
index bb8026d..690d7e7 100644 (file)
@@ -278,6 +278,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
                uint32_t *iova)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       struct drm_device *dev = obj->dev;
        int ret = 0;
 
        if (!msm_obj->domain[id].iova) {
@@ -285,6 +286,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
                struct msm_mmu *mmu = priv->mmus[id];
                struct page **pages = get_pages(obj);
 
+               if (!mmu) {
+                       dev_err(dev->dev, "null MMU pointer\n");
+                       return -EINVAL;
+               }
+
                if (IS_ERR(pages))
                        return PTR_ERR(pages);
 
index 92b7459..4b2ad91 100644 (file)
@@ -28,7 +28,7 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
                unsigned long iova, int flags, void *arg)
 {
        DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
-       return 0;
+       return -ENOSYS;
 }
 
 static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
@@ -40,8 +40,10 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
        for (i = 0; i < cnt; i++) {
                struct device *msm_iommu_get_ctx(const char *ctx_name);
                struct device *ctx = msm_iommu_get_ctx(names[i]);
-               if (IS_ERR_OR_NULL(ctx))
+               if (IS_ERR_OR_NULL(ctx)) {
+                       dev_warn(dev->dev, "couldn't get %s context", names[i]);
                        continue;
+               }
                ret = iommu_attach_device(iommu->domain, ctx);
                if (ret) {
                        dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
@@ -52,6 +54,20 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
        return 0;
 }
 
+static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+       struct msm_iommu *iommu = to_msm_iommu(mmu);
+       int i;
+
+       for (i = 0; i < cnt; i++) {
+               struct device *msm_iommu_get_ctx(const char *ctx_name);
+               struct device *ctx = msm_iommu_get_ctx(names[i]);
+               if (IS_ERR_OR_NULL(ctx))
+                       continue;
+               iommu_detach_device(iommu->domain, ctx);
+       }
+}
+
 static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
                struct sg_table *sgt, unsigned len, int prot)
 {
@@ -110,7 +126,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
 
                VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
 
-               BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
+               BUG_ON(!PAGE_ALIGNED(bytes));
 
                da += bytes;
        }
@@ -127,6 +143,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
 
 static const struct msm_mmu_funcs funcs = {
                .attach = msm_iommu_attach,
+               .detach = msm_iommu_detach,
                .map = msm_iommu_map,
                .unmap = msm_iommu_unmap,
                .destroy = msm_iommu_destroy,
index 0303244..21da6d1 100644 (file)
@@ -22,6 +22,7 @@
 
 struct msm_mmu_funcs {
        int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
+       void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
        int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
                        unsigned len, int prot);
        int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
index 2b6156d..8b307e1 100644 (file)
@@ -140,6 +140,7 @@ nouveau-y += core/subdev/i2c/nv4e.o
 nouveau-y += core/subdev/i2c/nv50.o
 nouveau-y += core/subdev/i2c/nv94.o
 nouveau-y += core/subdev/i2c/nvd0.o
+nouveau-y += core/subdev/i2c/gf117.o
 nouveau-y += core/subdev/i2c/nve0.o
 nouveau-y += core/subdev/ibus/nvc0.o
 nouveau-y += core/subdev/ibus/nve0.o
index f199957..8d55ed6 100644 (file)
@@ -314,7 +314,7 @@ nvc0_identify(struct nouveau_device *device)
                device->cname = "GF117";
                device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
                device->oclass[NVDEV_SUBDEV_GPIO   ] =  nvd0_gpio_oclass;
-               device->oclass[NVDEV_SUBDEV_I2C    ] =  nvd0_i2c_oclass;
+               device->oclass[NVDEV_SUBDEV_I2C    ] =  gf117_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
index c41f656..9c38c5e 100644 (file)
@@ -99,8 +99,10 @@ _nouveau_disp_dtor(struct nouveau_object *object)
 
        nouveau_event_destroy(&disp->vblank);
 
-       list_for_each_entry_safe(outp, outt, &disp->outp, head) {
-               nouveau_object_ref(NULL, (struct nouveau_object **)&outp);
+       if (disp->outp.next) {
+               list_for_each_entry_safe(outp, outt, &disp->outp, head) {
+                       nouveau_object_ref(NULL, (struct nouveau_object **)&outp);
+               }
        }
 
        nouveau_engine_destroy(&disp->base);
index 39562d4..5a5b59b 100644 (file)
@@ -241,7 +241,9 @@ dp_link_train_eq(struct dp_state *dp)
                dp_set_training_pattern(dp, 2);
 
        do {
-               if (dp_link_train_update(dp, dp->pc2, 400))
+               if ((tries &&
+                   dp_link_train_commit(dp, dp->pc2)) ||
+                   dp_link_train_update(dp, dp->pc2, 400))
                        break;
 
                eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
@@ -253,9 +255,6 @@ dp_link_train_eq(struct dp_state *dp)
                            !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
                                eq_done = false;
                }
-
-               if (dp_link_train_commit(dp, dp->pc2))
-                       break;
        } while (!eq_done && cr_done && ++tries <= 5);
 
        return eq_done ? 0 : -1;
index 1e85f36..2283c44 100644 (file)
@@ -1270,7 +1270,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
        i--;
 
        outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
-       if (!data)
+       if (!outp)
                return NULL;
 
        if (outp->info.location == 0) {
@@ -1516,11 +1516,11 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
                }
 
                switch ((ctrl & 0x000f0000) >> 16) {
-               case 6: datarate = pclk * 30 / 8; break;
-               case 5: datarate = pclk * 24 / 8; break;
+               case 6: datarate = pclk * 30; break;
+               case 5: datarate = pclk * 24; break;
                case 2:
                default:
-                       datarate = pclk * 18 / 8;
+                       datarate = pclk * 18;
                        break;
                }
 
index 48aa38a..fa30d81 100644 (file)
@@ -1159,11 +1159,11 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
        if (outp->info.type == DCB_OUTPUT_DP) {
                u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
                switch ((sync & 0x000003c0) >> 6) {
-               case 6: pclk = pclk * 30 / 8; break;
-               case 5: pclk = pclk * 24 / 8; break;
+               case 6: pclk = pclk * 30; break;
+               case 5: pclk = pclk * 24; break;
                case 2:
                default:
-                       pclk = pclk * 18 / 8;
+                       pclk = pclk * 18;
                        break;
                }
 
index 52c299c..eb2d778 100644 (file)
@@ -34,7 +34,7 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
        struct nvkm_output_dp *outp = (void *)base;
        bool retrain = true;
        u8 link[2], stat[3];
-       u32 rate;
+       u32 linkrate;
        int ret, i;
 
        /* check that the link is trained at a high enough rate */
@@ -44,8 +44,10 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
                goto done;
        }
 
-       rate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET);
-       if (rate < ((datarate / 8) * 10)) {
+       linkrate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET);
+       linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */
+       datarate = (datarate + 9) / 10; /* -> decakilobits */
+       if (linkrate < datarate) {
                DBG("link not trained at sufficient rate\n");
                goto done;
        }
index e183277..7a1ebdf 100644 (file)
@@ -87,6 +87,7 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
                        struct nvkm_output_dp *outpdp = (void *)outp;
                        switch (data) {
                        case NV94_DISP_SOR_DP_PWR_STATE_OFF:
+                               nouveau_event_put(outpdp->irq);
                                ((struct nvkm_output_dp_impl *)nv_oclass(outp))
                                        ->lnk_pwr(outpdp, 0);
                                atomic_set(&outpdp->lt.done, 0);
index 2f7345f..7445f12 100644 (file)
@@ -54,7 +54,7 @@ mmio_list_base:
 #ifdef INCLUDE_CODE
 // reports an exception to the host
 //
-// In: $r15 error code (see nvc0.fuc)
+// In: $r15 error code (see os.h)
 //
 error:
        push $r14
index c8ddb8d..b4ad18b 100644 (file)
@@ -49,7 +49,7 @@ hub_mmio_list_next:
 #ifdef INCLUDE_CODE
 // reports an exception to the host
 //
-// In: $r15 error code (see nvc0.fuc)
+// In: $r15 error code (see os.h)
 //
 error:
        nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(5), 0, $r15)
@@ -343,13 +343,25 @@ ih:
        ih_no_ctxsw:
        and $r11 $r10 NV_PGRAPH_FECS_INTR_FWMTHD
        bra e #ih_no_fwmthd
-               // none we handle, ack, and fall-through to unhandled
+               // none we handle; report to host and ack
+               nv_rd32($r15, NV_PGRAPH_TRAPPED_DATA_LO)
+               nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(4), 0, $r15)
+               nv_rd32($r15, NV_PGRAPH_TRAPPED_ADDR)
+               nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(3), 0, $r15)
+               extr $r14 $r15 16:18
+               shl b32 $r14 $r14 2
+               imm32($r15, NV_PGRAPH_FE_OBJECT_TABLE(0))
+               add b32 $r14 $r15
+               call(nv_rd32)
+               nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(2), 0, $r15)
+               mov $r15 E_BAD_FWMTHD
+               call(error)
                mov $r11 0x100
                nv_wr32(0x400144, $r11)
 
        // anything we didn't handle, bring it to the host's attention
        ih_no_fwmthd:
-       mov $r11 0x104 // FIFO | CHSW
+       mov $r11 0x504 // FIFO | CHSW | FWMTHD
        not b32 $r11
        and $r11 $r10 $r11
        bra e #ih_no_other
index 214dd16..5f953c5 100644 (file)
@@ -478,10 +478,10 @@ uint32_t gm107_grhub_code[] = {
        0x01040080,
        0xbd0001f6,
        0x01004104,
-       0x627e020f,
-       0x717e0006,
+       0xa87e020f,
+       0xb77e0006,
        0x100f0006,
-       0x0006b37e,
+       0x0006f97e,
        0x98000e98,
        0x207e010f,
        0x14950001,
@@ -523,8 +523,8 @@ uint32_t gm107_grhub_code[] = {
        0x800040b7,
        0xf40132b6,
        0x000fb41b,
-       0x0006b37e,
-       0x627e000f,
+       0x0006f97e,
+       0xa87e000f,
        0x00800006,
        0x01f60201,
        0xbd04bd00,
@@ -554,7 +554,7 @@ uint32_t gm107_grhub_code[] = {
        0x0009f602,
        0x32f404bd,
        0x0231f401,
-       0x0008367e,
+       0x00087c7e,
        0x99f094bd,
        0x17008007,
        0x0009f602,
@@ -563,7 +563,7 @@ uint32_t gm107_grhub_code[] = {
        0x37008006,
        0x0009f602,
        0x31f404bd,
-       0x08367e01,
+       0x087c7e01,
        0xf094bd00,
        0x00800699,
        0x09f60217,
@@ -572,7 +572,7 @@ uint32_t gm107_grhub_code[] = {
        0x20f92f0e,
        0x32f412b2,
        0x0232f401,
-       0x0008367e,
+       0x00087c7e,
        0x008020fc,
        0x02f602c0,
        0xf404bd00,
@@ -580,7 +580,7 @@ uint32_t gm107_grhub_code[] = {
        0x23c8130e,
        0x0d0bf41f,
        0xf40131f4,
-       0x367e0232,
+       0x7c7e0232,
 /* 0x054e: chsw_done */
        0x01020008,
        0x02c30080,
@@ -593,7 +593,7 @@ uint32_t gm107_grhub_code[] = {
        0xb0ff2a0e,
        0x1bf401e4,
        0x7ef2b20c,
-       0xf40007d6,
+       0xf400081c,
 /* 0x057a: main_not_ctx_chan */
        0xe4b0400e,
        0x2c1bf402,
@@ -602,7 +602,7 @@ uint32_t gm107_grhub_code[] = {
        0x0009f602,
        0x32f404bd,
        0x0232f401,
-       0x0008367e,
+       0x00087c7e,
        0x99f094bd,
        0x17008007,
        0x0009f602,
@@ -642,238 +642,238 @@ uint32_t gm107_grhub_code[] = {
 /* 0x061a: ih_no_ctxsw */
        0xabe40000,
        0x0bf40400,
-       0x01004b10,
-       0x448ebfb2,
-       0x8f7e4001,
-/* 0x062e: ih_no_fwmthd */
-       0x044b0000,
-       0xffb0bd01,
-       0x0bf4b4ab,
-       0x0700800c,
-       0x000bf603,
-/* 0x0642: ih_no_other */
-       0x004004bd,
-       0x000af601,
-       0xf0fc04bd,
-       0xd0fce0fc,
-       0xa0fcb0fc,
-       0x80fc90fc,
-       0xfc0088fe,
-       0x0032f480,
-/* 0x0662: ctx_4170s */
-       0xf5f001f8,
-       0x8effb210,
-       0x7e404170,
-       0xf800008f,
-/* 0x0671: ctx_4170w */
-       0x41708e00,
+       0x07088e56,
        0x00657e40,
-       0xf0ffb200,
-       0x1bf410f4,
-/* 0x0683: ctx_redswitch */
-       0x4e00f8f3,
-       0xe5f00200,
-       0x20e5f040,
-       0x8010e5f0,
-       0xf6018500,
-       0x04bd000e,
-/* 0x069a: ctx_redswitch_delay */
-       0xf2b6080f,
-       0xfd1bf401,
-       0x0400e5f1,
-       0x0100e5f1,
-       0x01850080,
-       0xbd000ef6,
-/* 0x06b3: ctx_86c */
-       0x8000f804,
-       0xf6022300,
+       0x80ffb200,
+       0xf6020400,
        0x04bd000f,
-       0x148effb2,
-       0x8f7e408a,
-       0xffb20000,
-       0x41a88c8e,
+       0x4007048e,
+       0x0000657e,
+       0x0080ffb2,
+       0x0ff60203,
+       0xc704bd00,
+       0xee9450fe,
+       0x07008f02,
+       0x00efbb40,
+       0x0000657e,
+       0x02020080,
+       0xbd000ff6,
+       0x7e030f04,
+       0x4b0002f8,
+       0xbfb20100,
+       0x4001448e,
        0x00008f7e,
-/* 0x06d2: ctx_mem */
-       0x008000f8,
-       0x0ff60284,
-/* 0x06db: ctx_mem_wait */
-       0x8f04bd00,
-       0xcf028400,
-       0xfffd00ff,
-       0xf61bf405,
-/* 0x06ea: ctx_load */
-       0x94bd00f8,
-       0x800599f0,
-       0xf6023700,
-       0x04bd0009,
-       0xb87e0c0a,
-       0xf4bd0000,
-       0x02890080,
+/* 0x0674: ih_no_fwmthd */
+       0xbd05044b,
+       0xb4abffb0,
+       0x800c0bf4,
+       0xf6030700,
+       0x04bd000b,
+/* 0x0688: ih_no_other */
+       0xf6010040,
+       0x04bd000a,
+       0xe0fcf0fc,
+       0xb0fcd0fc,
+       0x90fca0fc,
+       0x88fe80fc,
+       0xf480fc00,
+       0x01f80032,
+/* 0x06a8: ctx_4170s */
+       0xb210f5f0,
+       0x41708eff,
+       0x008f7e40,
+/* 0x06b7: ctx_4170w */
+       0x8e00f800,
+       0x7e404170,
+       0xb2000065,
+       0x10f4f0ff,
+       0xf8f31bf4,
+/* 0x06c9: ctx_redswitch */
+       0x02004e00,
+       0xf040e5f0,
+       0xe5f020e5,
+       0x85008010,
+       0x000ef601,
+       0x080f04bd,
+/* 0x06e0: ctx_redswitch_delay */
+       0xf401f2b6,
+       0xe5f1fd1b,
+       0xe5f10400,
+       0x00800100,
+       0x0ef60185,
+       0xf804bd00,
+/* 0x06f9: ctx_86c */
+       0x23008000,
+       0x000ff602,
+       0xffb204bd,
+       0x408a148e,
+       0x00008f7e,
+       0x8c8effb2,
+       0x8f7e41a8,
+       0x00f80000,
+/* 0x0718: ctx_mem */
+       0x02840080,
        0xbd000ff6,
-       0xc1008004,
-       0x0002f602,
-       0x008004bd,
-       0x02f60283,
-       0x0f04bd00,
-       0x06d27e07,
-       0xc0008000,
-       0x0002f602,
-       0x0bfe04bd,
-       0x1f2af000,
-       0xb60424b6,
-       0x94bd0220,
-       0x800899f0,
-       0xf6023700,
-       0x04bd0009,
-       0x02810080,
-       0xbd0002f6,
-       0x0000d204,
-       0x25f08000,
-       0x88008002,
-       0x0002f602,
-       0x100104bd,
-       0xf0020042,
-       0x12fa0223,
-       0xbd03f805,
-       0x0899f094,
-       0x02170080,
-       0xbd0009f6,
-       0x81019804,
-       0x981814b6,
-       0x25b68002,
-       0x0512fd08,
-       0xbd1601b5,
-       0x0999f094,
-       0x02370080,
-       0xbd0009f6,
-       0x81008004,
-       0x0001f602,
-       0x010204bd,
-       0x02880080,
+/* 0x0721: ctx_mem_wait */
+       0x84008f04,
+       0x00ffcf02,
+       0xf405fffd,
+       0x00f8f61b,
+/* 0x0730: ctx_load */
+       0x99f094bd,
+       0x37008005,
+       0x0009f602,
+       0x0c0a04bd,
+       0x0000b87e,
+       0x0080f4bd,
+       0x0ff60289,
+       0x8004bd00,
+       0xf602c100,
+       0x04bd0002,
+       0x02830080,
        0xbd0002f6,
-       0x01004104,
-       0xfa0613f0,
-       0x03f80501,
+       0x7e070f04,
+       0x80000718,
+       0xf602c000,
+       0x04bd0002,
+       0xf0000bfe,
+       0x24b61f2a,
+       0x0220b604,
        0x99f094bd,
-       0x17008009,
+       0x37008008,
        0x0009f602,
-       0x94bd04bd,
-       0x800599f0,
+       0x008004bd,
+       0x02f60281,
+       0xd204bd00,
+       0x80000000,
+       0x800225f0,
+       0xf6028800,
+       0x04bd0002,
+       0x00421001,
+       0x0223f002,
+       0xf80512fa,
+       0xf094bd03,
+       0x00800899,
+       0x09f60217,
+       0x9804bd00,
+       0x14b68101,
+       0x80029818,
+       0xfd0825b6,
+       0x01b50512,
+       0xf094bd16,
+       0x00800999,
+       0x09f60237,
+       0x8004bd00,
+       0xf6028100,
+       0x04bd0001,
+       0x00800102,
+       0x02f60288,
+       0x4104bd00,
+       0x13f00100,
+       0x0501fa06,
+       0x94bd03f8,
+       0x800999f0,
        0xf6021700,
        0x04bd0009,
-/* 0x07d6: ctx_chan */
-       0xea7e00f8,
-       0x0c0a0006,
-       0x0000b87e,
-       0xd27e050f,
-       0x00f80006,
-/* 0x07e8: ctx_mmio_exec */
-       0x80410398,
+       0x99f094bd,
+       0x17008005,
+       0x0009f602,
+       0x00f804bd,
+/* 0x081c: ctx_chan */
+       0x0007307e,
+       0xb87e0c0a,
+       0x050f0000,
+       0x0007187e,
+/* 0x082e: ctx_mmio_exec */
+       0x039800f8,
+       0x81008041,
+       0x0003f602,
+       0x34bd04bd,
+/* 0x083c: ctx_mmio_loop */
+       0xf4ff34c4,
+       0x00450e1b,
+       0x0653f002,
+       0xf80535fa,
+/* 0x084d: ctx_mmio_pull */
+       0x804e9803,
+       0x7e814f98,
+       0xb600008f,
+       0x12b60830,
+       0xdf1bf401,
+/* 0x0860: ctx_mmio_done */
+       0x80160398,
        0xf6028100,
        0x04bd0003,
-/* 0x07f6: ctx_mmio_loop */
-       0x34c434bd,
-       0x0e1bf4ff,
-       0xf0020045,
-       0x35fa0653,
-/* 0x0807: ctx_mmio_pull */
-       0x9803f805,
-       0x4f98804e,
-       0x008f7e81,
-       0x0830b600,
-       0xf40112b6,
-/* 0x081a: ctx_mmio_done */
-       0x0398df1b,
-       0x81008016,
-       0x0003f602,
-       0x00b504bd,
-       0x01004140,
-       0xfa0613f0,
-       0x03f80601,
-/* 0x0836: ctx_xfer */
-       0x040e00f8,
-       0x03020080,
-       0xbd000ef6,
-/* 0x0841: ctx_xfer_idle */
-       0x00008e04,
-       0x00eecf03,
-       0x2000e4f1,
-       0xf4f51bf4,
-       0x02f40611,
-/* 0x0855: ctx_xfer_pre */
-       0x7e100f0c,
-       0xf40006b3,
-/* 0x085e: ctx_xfer_pre_load */
-       0x020f1b11,
-       0x0006627e,
-       0x0006717e,
-       0x0006837e,
-       0x627ef4bd,
-       0xea7e0006,
-/* 0x0876: ctx_xfer_exec */
-       0x01980006,
-       0x8024bd16,
-       0xf6010500,
-       0x04bd0002,
-       0x008e1fb2,
-       0x8f7e41a5,
-       0xfcf00000,
-       0x022cf001,
-       0xfd0124b6,
-       0xffb205f2,
-       0x41a5048e,
+       0x414000b5,
+       0x13f00100,
+       0x0601fa06,
+       0x00f803f8,
+/* 0x087c: ctx_xfer */
+       0x0080040e,
+       0x0ef60302,
+/* 0x0887: ctx_xfer_idle */
+       0x8e04bd00,
+       0xcf030000,
+       0xe4f100ee,
+       0x1bf42000,
+       0x0611f4f5,
+/* 0x089b: ctx_xfer_pre */
+       0x0f0c02f4,
+       0x06f97e10,
+       0x1b11f400,
+/* 0x08a4: ctx_xfer_pre_load */
+       0xa87e020f,
+       0xb77e0006,
+       0xc97e0006,
+       0xf4bd0006,
+       0x0006a87e,
+       0x0007307e,
+/* 0x08bc: ctx_xfer_exec */
+       0xbd160198,
+       0x05008024,
+       0x0002f601,
+       0x1fb204bd,
+       0x41a5008e,
        0x00008f7e,
-       0x0002167e,
-       0xfc8024bd,
-       0x02f60247,
-       0xf004bd00,
-       0x20b6012c,
-       0x4afc8003,
-       0x0002f602,
-       0xacf004bd,
-       0x06a5f001,
-       0x0c98000b,
-       0x010d9800,
-       0x3d7e000e,
-       0x080a0001,
-       0x0000ec7e,
-       0x00020a7e,
-       0x0a1201f4,
-       0x00b87e0c,
-       0x7e050f00,
-       0xf40006d2,
-/* 0x08f2: ctx_xfer_post */
-       0x020f2d02,
-       0x0006627e,
-       0xb37ef4bd,
-       0x277e0006,
-       0x717e0002,
+       0xf001fcf0,
+       0x24b6022c,
+       0x05f2fd01,
+       0x048effb2,
+       0x8f7e41a5,
+       0x167e0000,
+       0x24bd0002,
+       0x0247fc80,
+       0xbd0002f6,
+       0x012cf004,
+       0x800320b6,
+       0xf6024afc,
+       0x04bd0002,
+       0xf001acf0,
+       0x000b06a5,
+       0x98000c98,
+       0x000e010d,
+       0x00013d7e,
+       0xec7e080a,
+       0x0a7e0000,
+       0x01f40002,
+       0x7e0c0a12,
+       0x0f0000b8,
+       0x07187e05,
+       0x2d02f400,
+/* 0x0938: ctx_xfer_post */
+       0xa87e020f,
        0xf4bd0006,
-       0x0006627e,
-       0x981011f4,
-       0x11fd4001,
-       0x070bf405,
-       0x0007e87e,
-/* 0x091c: ctx_xfer_no_post_mmio */
-/* 0x091c: ctx_xfer_done */
-       0x000000f8,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0x0006f97e,
+       0x0002277e,
+       0x0006b77e,
+       0xa87ef4bd,
+       0x11f40006,
+       0x40019810,
+       0xf40511fd,
+       0x2e7e070b,
+/* 0x0962: ctx_xfer_no_post_mmio */
+/* 0x0962: ctx_xfer_done */
+       0x00f80008,
        0x00000000,
        0x00000000,
        0x00000000,
index 64dfd75..e49b5a8 100644 (file)
@@ -478,10 +478,10 @@ uint32_t nv108_grhub_code[] = {
        0x01040080,
        0xbd0001f6,
        0x01004104,
-       0x627e020f,
-       0x717e0006,
+       0xa87e020f,
+       0xb77e0006,
        0x100f0006,
-       0x0006b37e,
+       0x0006f97e,
        0x98000e98,
        0x207e010f,
        0x14950001,
@@ -523,8 +523,8 @@ uint32_t nv108_grhub_code[] = {
        0x800040b7,
        0xf40132b6,
        0x000fb41b,
-       0x0006b37e,
-       0x627e000f,
+       0x0006f97e,
+       0xa87e000f,
        0x00800006,
        0x01f60201,
        0xbd04bd00,
@@ -554,7 +554,7 @@ uint32_t nv108_grhub_code[] = {
        0x0009f602,
        0x32f404bd,
        0x0231f401,
-       0x0008367e,
+       0x00087c7e,
        0x99f094bd,
        0x17008007,
        0x0009f602,
@@ -563,7 +563,7 @@ uint32_t nv108_grhub_code[] = {
        0x37008006,
        0x0009f602,
        0x31f404bd,
-       0x08367e01,
+       0x087c7e01,
        0xf094bd00,
        0x00800699,
        0x09f60217,
@@ -572,7 +572,7 @@ uint32_t nv108_grhub_code[] = {
        0x20f92f0e,
        0x32f412b2,
        0x0232f401,
-       0x0008367e,
+       0x00087c7e,
        0x008020fc,
        0x02f602c0,
        0xf404bd00,
@@ -580,7 +580,7 @@ uint32_t nv108_grhub_code[] = {
        0x23c8130e,
        0x0d0bf41f,
        0xf40131f4,
-       0x367e0232,
+       0x7c7e0232,
 /* 0x054e: chsw_done */
        0x01020008,
        0x02c30080,
@@ -593,7 +593,7 @@ uint32_t nv108_grhub_code[] = {
        0xb0ff2a0e,
        0x1bf401e4,
        0x7ef2b20c,
-       0xf40007d6,
+       0xf400081c,
 /* 0x057a: main_not_ctx_chan */
        0xe4b0400e,
        0x2c1bf402,
@@ -602,7 +602,7 @@ uint32_t nv108_grhub_code[] = {
        0x0009f602,
        0x32f404bd,
        0x0232f401,
-       0x0008367e,
+       0x00087c7e,
        0x99f094bd,
        0x17008007,
        0x0009f602,
@@ -642,238 +642,238 @@ uint32_t nv108_grhub_code[] = {
 /* 0x061a: ih_no_ctxsw */
        0xabe40000,
        0x0bf40400,
-       0x01004b10,
-       0x448ebfb2,
-       0x8f7e4001,
-/* 0x062e: ih_no_fwmthd */
-       0x044b0000,
-       0xffb0bd01,
-       0x0bf4b4ab,
-       0x0700800c,
-       0x000bf603,
-/* 0x0642: ih_no_other */
-       0x004004bd,
-       0x000af601,
-       0xf0fc04bd,
-       0xd0fce0fc,
-       0xa0fcb0fc,
-       0x80fc90fc,
-       0xfc0088fe,
-       0x0032f480,
-/* 0x0662: ctx_4170s */
-       0xf5f001f8,
-       0x8effb210,
-       0x7e404170,
-       0xf800008f,
-/* 0x0671: ctx_4170w */
-       0x41708e00,
+       0x07088e56,
        0x00657e40,
-       0xf0ffb200,
-       0x1bf410f4,
-/* 0x0683: ctx_redswitch */
-       0x4e00f8f3,
-       0xe5f00200,
-       0x20e5f040,
-       0x8010e5f0,
-       0xf6018500,
-       0x04bd000e,
-/* 0x069a: ctx_redswitch_delay */
-       0xf2b6080f,
-       0xfd1bf401,
-       0x0400e5f1,
-       0x0100e5f1,
-       0x01850080,
-       0xbd000ef6,
-/* 0x06b3: ctx_86c */
-       0x8000f804,
-       0xf6022300,
+       0x80ffb200,
+       0xf6020400,
        0x04bd000f,
-       0x148effb2,
-       0x8f7e408a,
-       0xffb20000,
-       0x41a88c8e,
+       0x4007048e,
+       0x0000657e,
+       0x0080ffb2,
+       0x0ff60203,
+       0xc704bd00,
+       0xee9450fe,
+       0x07008f02,
+       0x00efbb40,
+       0x0000657e,
+       0x02020080,
+       0xbd000ff6,
+       0x7e030f04,
+       0x4b0002f8,
+       0xbfb20100,
+       0x4001448e,
        0x00008f7e,
-/* 0x06d2: ctx_mem */
-       0x008000f8,
-       0x0ff60284,
-/* 0x06db: ctx_mem_wait */
-       0x8f04bd00,
-       0xcf028400,
-       0xfffd00ff,
-       0xf61bf405,
-/* 0x06ea: ctx_load */
-       0x94bd00f8,
-       0x800599f0,
-       0xf6023700,
-       0x04bd0009,
-       0xb87e0c0a,
-       0xf4bd0000,
-       0x02890080,
+/* 0x0674: ih_no_fwmthd */
+       0xbd05044b,
+       0xb4abffb0,
+       0x800c0bf4,
+       0xf6030700,
+       0x04bd000b,
+/* 0x0688: ih_no_other */
+       0xf6010040,
+       0x04bd000a,
+       0xe0fcf0fc,
+       0xb0fcd0fc,
+       0x90fca0fc,
+       0x88fe80fc,
+       0xf480fc00,
+       0x01f80032,
+/* 0x06a8: ctx_4170s */
+       0xb210f5f0,
+       0x41708eff,
+       0x008f7e40,
+/* 0x06b7: ctx_4170w */
+       0x8e00f800,
+       0x7e404170,
+       0xb2000065,
+       0x10f4f0ff,
+       0xf8f31bf4,
+/* 0x06c9: ctx_redswitch */
+       0x02004e00,
+       0xf040e5f0,
+       0xe5f020e5,
+       0x85008010,
+       0x000ef601,
+       0x080f04bd,
+/* 0x06e0: ctx_redswitch_delay */
+       0xf401f2b6,
+       0xe5f1fd1b,
+       0xe5f10400,
+       0x00800100,
+       0x0ef60185,
+       0xf804bd00,
+/* 0x06f9: ctx_86c */
+       0x23008000,
+       0x000ff602,
+       0xffb204bd,
+       0x408a148e,
+       0x00008f7e,
+       0x8c8effb2,
+       0x8f7e41a8,
+       0x00f80000,
+/* 0x0718: ctx_mem */
+       0x02840080,
        0xbd000ff6,
-       0xc1008004,
-       0x0002f602,
-       0x008004bd,
-       0x02f60283,
-       0x0f04bd00,
-       0x06d27e07,
-       0xc0008000,
-       0x0002f602,
-       0x0bfe04bd,
-       0x1f2af000,
-       0xb60424b6,
-       0x94bd0220,
-       0x800899f0,
-       0xf6023700,
-       0x04bd0009,
-       0x02810080,
-       0xbd0002f6,
-       0x0000d204,
-       0x25f08000,
-       0x88008002,
-       0x0002f602,
-       0x100104bd,
-       0xf0020042,
-       0x12fa0223,
-       0xbd03f805,
-       0x0899f094,
-       0x02170080,
-       0xbd0009f6,
-       0x81019804,
-       0x981814b6,
-       0x25b68002,
-       0x0512fd08,
-       0xbd1601b5,
-       0x0999f094,
-       0x02370080,
-       0xbd0009f6,
-       0x81008004,
-       0x0001f602,
-       0x010204bd,
-       0x02880080,
+/* 0x0721: ctx_mem_wait */
+       0x84008f04,
+       0x00ffcf02,
+       0xf405fffd,
+       0x00f8f61b,
+/* 0x0730: ctx_load */
+       0x99f094bd,
+       0x37008005,
+       0x0009f602,
+       0x0c0a04bd,
+       0x0000b87e,
+       0x0080f4bd,
+       0x0ff60289,
+       0x8004bd00,
+       0xf602c100,
+       0x04bd0002,
+       0x02830080,
        0xbd0002f6,
-       0x01004104,
-       0xfa0613f0,
-       0x03f80501,
+       0x7e070f04,
+       0x80000718,
+       0xf602c000,
+       0x04bd0002,
+       0xf0000bfe,
+       0x24b61f2a,
+       0x0220b604,
        0x99f094bd,
-       0x17008009,
+       0x37008008,
        0x0009f602,
-       0x94bd04bd,
-       0x800599f0,
+       0x008004bd,
+       0x02f60281,
+       0xd204bd00,
+       0x80000000,
+       0x800225f0,
+       0xf6028800,
+       0x04bd0002,
+       0x00421001,
+       0x0223f002,
+       0xf80512fa,
+       0xf094bd03,
+       0x00800899,
+       0x09f60217,
+       0x9804bd00,
+       0x14b68101,
+       0x80029818,
+       0xfd0825b6,
+       0x01b50512,
+       0xf094bd16,
+       0x00800999,
+       0x09f60237,
+       0x8004bd00,
+       0xf6028100,
+       0x04bd0001,
+       0x00800102,
+       0x02f60288,
+       0x4104bd00,
+       0x13f00100,
+       0x0501fa06,
+       0x94bd03f8,
+       0x800999f0,
        0xf6021700,
        0x04bd0009,
-/* 0x07d6: ctx_chan */
-       0xea7e00f8,
-       0x0c0a0006,
-       0x0000b87e,
-       0xd27e050f,
-       0x00f80006,
-/* 0x07e8: ctx_mmio_exec */
-       0x80410398,
+       0x99f094bd,
+       0x17008005,
+       0x0009f602,
+       0x00f804bd,
+/* 0x081c: ctx_chan */
+       0x0007307e,
+       0xb87e0c0a,
+       0x050f0000,
+       0x0007187e,
+/* 0x082e: ctx_mmio_exec */
+       0x039800f8,
+       0x81008041,
+       0x0003f602,
+       0x34bd04bd,
+/* 0x083c: ctx_mmio_loop */
+       0xf4ff34c4,
+       0x00450e1b,
+       0x0653f002,
+       0xf80535fa,
+/* 0x084d: ctx_mmio_pull */
+       0x804e9803,
+       0x7e814f98,
+       0xb600008f,
+       0x12b60830,
+       0xdf1bf401,
+/* 0x0860: ctx_mmio_done */
+       0x80160398,
        0xf6028100,
        0x04bd0003,
-/* 0x07f6: ctx_mmio_loop */
-       0x34c434bd,
-       0x0e1bf4ff,
-       0xf0020045,
-       0x35fa0653,
-/* 0x0807: ctx_mmio_pull */
-       0x9803f805,
-       0x4f98804e,
-       0x008f7e81,
-       0x0830b600,
-       0xf40112b6,
-/* 0x081a: ctx_mmio_done */
-       0x0398df1b,
-       0x81008016,
-       0x0003f602,
-       0x00b504bd,
-       0x01004140,
-       0xfa0613f0,
-       0x03f80601,
-/* 0x0836: ctx_xfer */
-       0x040e00f8,
-       0x03020080,
-       0xbd000ef6,
-/* 0x0841: ctx_xfer_idle */
-       0x00008e04,
-       0x00eecf03,
-       0x2000e4f1,
-       0xf4f51bf4,
-       0x02f40611,
-/* 0x0855: ctx_xfer_pre */
-       0x7e100f0c,
-       0xf40006b3,
-/* 0x085e: ctx_xfer_pre_load */
-       0x020f1b11,
-       0x0006627e,
-       0x0006717e,
-       0x0006837e,
-       0x627ef4bd,
-       0xea7e0006,
-/* 0x0876: ctx_xfer_exec */
-       0x01980006,
-       0x8024bd16,
-       0xf6010500,
-       0x04bd0002,
-       0x008e1fb2,
-       0x8f7e41a5,
-       0xfcf00000,
-       0x022cf001,
-       0xfd0124b6,
-       0xffb205f2,
-       0x41a5048e,
+       0x414000b5,
+       0x13f00100,
+       0x0601fa06,
+       0x00f803f8,
+/* 0x087c: ctx_xfer */
+       0x0080040e,
+       0x0ef60302,
+/* 0x0887: ctx_xfer_idle */
+       0x8e04bd00,
+       0xcf030000,
+       0xe4f100ee,
+       0x1bf42000,
+       0x0611f4f5,
+/* 0x089b: ctx_xfer_pre */
+       0x0f0c02f4,
+       0x06f97e10,
+       0x1b11f400,
+/* 0x08a4: ctx_xfer_pre_load */
+       0xa87e020f,
+       0xb77e0006,
+       0xc97e0006,
+       0xf4bd0006,
+       0x0006a87e,
+       0x0007307e,
+/* 0x08bc: ctx_xfer_exec */
+       0xbd160198,
+       0x05008024,
+       0x0002f601,
+       0x1fb204bd,
+       0x41a5008e,
        0x00008f7e,
-       0x0002167e,
-       0xfc8024bd,
-       0x02f60247,
-       0xf004bd00,
-       0x20b6012c,
-       0x4afc8003,
-       0x0002f602,
-       0xacf004bd,
-       0x06a5f001,
-       0x0c98000b,
-       0x010d9800,
-       0x3d7e000e,
-       0x080a0001,
-       0x0000ec7e,
-       0x00020a7e,
-       0x0a1201f4,
-       0x00b87e0c,
-       0x7e050f00,
-       0xf40006d2,
-/* 0x08f2: ctx_xfer_post */
-       0x020f2d02,
-       0x0006627e,
-       0xb37ef4bd,
-       0x277e0006,
-       0x717e0002,
+       0xf001fcf0,
+       0x24b6022c,
+       0x05f2fd01,
+       0x048effb2,
+       0x8f7e41a5,
+       0x167e0000,
+       0x24bd0002,
+       0x0247fc80,
+       0xbd0002f6,
+       0x012cf004,
+       0x800320b6,
+       0xf6024afc,
+       0x04bd0002,
+       0xf001acf0,
+       0x000b06a5,
+       0x98000c98,
+       0x000e010d,
+       0x00013d7e,
+       0xec7e080a,
+       0x0a7e0000,
+       0x01f40002,
+       0x7e0c0a12,
+       0x0f0000b8,
+       0x07187e05,
+       0x2d02f400,
+/* 0x0938: ctx_xfer_post */
+       0xa87e020f,
        0xf4bd0006,
-       0x0006627e,
-       0x981011f4,
-       0x11fd4001,
-       0x070bf405,
-       0x0007e87e,
-/* 0x091c: ctx_xfer_no_post_mmio */
-/* 0x091c: ctx_xfer_done */
-       0x000000f8,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0x0006f97e,
+       0x0002277e,
+       0x0006b77e,
+       0xa87ef4bd,
+       0x11f40006,
+       0x40019810,
+       0xf40511fd,
+       0x2e7e070b,
+/* 0x0962: ctx_xfer_no_post_mmio */
+/* 0x0962: ctx_xfer_done */
+       0x00f80008,
        0x00000000,
        0x00000000,
        0x00000000,
index f8f7b27..92dfe6a 100644 (file)
@@ -528,10 +528,10 @@ uint32_t nvc0_grhub_code[] = {
        0x0001d001,
        0x17f104bd,
        0xf7f00100,
-       0xb521f502,
-       0xc721f507,
-       0x10f7f007,
-       0x081421f5,
+       0x0d21f502,
+       0x1f21f508,
+       0x10f7f008,
+       0x086c21f5,
        0x98000e98,
        0x21f5010f,
        0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvc0_grhub_code[] = {
        0xb6800040,
        0x1bf40132,
        0x00f7f0be,
-       0x081421f5,
+       0x086c21f5,
        0xf500f7f0,
-       0xf107b521,
+       0xf1080d21,
        0xf0010007,
        0x01d00203,
        0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvc0_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x31f40132,
-       0xe821f502,
-       0xf094bd09,
+       0x4021f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvc0_grhub_code[] = {
        0x0203f00f,
        0xbd0009d0,
        0x0131f404,
-       0x09e821f5,
+       0x0a4021f5,
        0x99f094bd,
        0x0007f106,
        0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvc0_grhub_code[] = {
        0x12b920f9,
        0x0132f402,
        0xf50232f4,
-       0xfc09e821,
+       0xfc0a4021,
        0x0007f120,
        0x0203f0c0,
        0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvc0_grhub_code[] = {
        0xf41f23c8,
        0x31f40d0b,
        0x0232f401,
-       0x09e821f5,
+       0x0a4021f5,
 /* 0x063c: chsw_done */
        0xf10127f0,
        0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvc0_grhub_code[] = {
 /* 0x0660: main_not_ctx_switch */
        0xf401e4b0,
        0xf2b90d1b,
-       0x7821f502,
+       0xd021f502,
        0x460ef409,
 /* 0x0670: main_not_ctx_chan */
        0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvc0_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x32f40132,
-       0xe821f502,
-       0xf094bd09,
+       0x4021f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvc0_grhub_code[] = {
 /* 0x072b: ih_no_ctxsw */
        0xe40421f4,
        0xf40400ab,
-       0xb7f1140b,
+       0xe7f16c0b,
+       0xe3f00708,
+       0x6821f440,
+       0xf102ffb9,
+       0xf0040007,
+       0x0fd00203,
+       0xf104bd00,
+       0xf00704e7,
+       0x21f440e3,
+       0x02ffb968,
+       0x030007f1,
+       0xd00203f0,
+       0x04bd000f,
+       0x9450fec7,
+       0xf7f102ee,
+       0xf3f00700,
+       0x00efbb40,
+       0xf16821f4,
+       0xf0020007,
+       0x0fd00203,
+       0xf004bd00,
+       0x21f503f7,
+       0xb7f1037e,
        0xbfb90100,
        0x44e7f102,
        0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
        0xf19d21f4,
-       0xbd0104b7,
+       0xbd0504b7,
        0xb4abffb0,
        0xf10f0bf4,
        0xf0070007,
        0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
        0xf104bd00,
        0xf0010007,
        0x0ad00003,
@@ -731,36 +753,36 @@ uint32_t nvc0_grhub_code[] = {
        0xfc90fca0,
        0x0088fe80,
        0x32f480fc,
-/* 0x077f: ctx_4160s */
+/* 0x07d7: ctx_4160s */
        0xf001f800,
        0xffb901f7,
        0x60e7f102,
        0x40e3f041,
-/* 0x078f: ctx_4160s_wait */
+/* 0x07e7: ctx_4160s_wait */
        0xf19d21f4,
        0xf04160e7,
        0x21f440e3,
        0x02ffb968,
        0xf404ffc8,
        0x00f8f00b,
-/* 0x07a4: ctx_4160c */
+/* 0x07fc: ctx_4160c */
        0xffb9f4bd,
        0x60e7f102,
        0x40e3f041,
        0xf89d21f4,
-/* 0x07b5: ctx_4170s */
+/* 0x080d: ctx_4170s */
        0x10f5f000,
        0xf102ffb9,
        0xf04170e7,
        0x21f440e3,
-/* 0x07c7: ctx_4170w */
+/* 0x081f: ctx_4170w */
        0xf100f89d,
        0xf04170e7,
        0x21f440e3,
        0x02ffb968,
        0xf410f4f0,
        0x00f8f01b,
-/* 0x07dc: ctx_redswitch */
+/* 0x0834: ctx_redswitch */
        0x0200e7f1,
        0xf040e5f0,
        0xe5f020e5,
@@ -768,7 +790,7 @@ uint32_t nvc0_grhub_code[] = {
        0x0103f085,
        0xbd000ed0,
        0x08f7f004,
-/* 0x07f8: ctx_redswitch_delay */
+/* 0x0850: ctx_redswitch_delay */
        0xf401f2b6,
        0xe5f1fd1b,
        0xe5f10400,
@@ -776,7 +798,7 @@ uint32_t nvc0_grhub_code[] = {
        0x03f08500,
        0x000ed001,
        0x00f804bd,
-/* 0x0814: ctx_86c */
+/* 0x086c: ctx_86c */
        0x1b0007f1,
        0xd00203f0,
        0x04bd000f,
@@ -787,16 +809,16 @@ uint32_t nvc0_grhub_code[] = {
        0xa86ce7f1,
        0xf441e3f0,
        0x00f89d21,
-/* 0x083c: ctx_mem */
+/* 0x0894: ctx_mem */
        0x840007f1,
        0xd00203f0,
        0x04bd000f,
-/* 0x0848: ctx_mem_wait */
+/* 0x08a0: ctx_mem_wait */
        0x8400f7f1,
        0xcf02f3f0,
        0xfffd00ff,
        0xf31bf405,
-/* 0x085a: ctx_load */
+/* 0x08b2: ctx_load */
        0x94bd00f8,
        0xf10599f0,
        0xf00f0007,
@@ -814,7 +836,7 @@ uint32_t nvc0_grhub_code[] = {
        0x02d00203,
        0xf004bd00,
        0x21f507f7,
-       0x07f1083c,
+       0x07f10894,
        0x03f0c000,
        0x0002d002,
        0x0bfe04bd,
@@ -869,31 +891,31 @@ uint32_t nvc0_grhub_code[] = {
        0x03f01700,
        0x0009d002,
        0x00f804bd,
-/* 0x0978: ctx_chan */
-       0x077f21f5,
-       0x085a21f5,
+/* 0x09d0: ctx_chan */
+       0x07d721f5,
+       0x08b221f5,
        0xf40ca7f0,
        0xf7f0d021,
-       0x3c21f505,
-       0xa421f508,
-/* 0x0993: ctx_mmio_exec */
+       0x9421f505,
+       0xfc21f508,
+/* 0x09eb: ctx_mmio_exec */
        0x9800f807,
        0x07f14103,
        0x03f08100,
        0x0003d002,
        0x34bd04bd,
-/* 0x09a4: ctx_mmio_loop */
+/* 0x09fc: ctx_mmio_loop */
        0xf4ff34c4,
        0x57f10f1b,
        0x53f00200,
        0x0535fa06,
-/* 0x09b6: ctx_mmio_pull */
+/* 0x0a0e: ctx_mmio_pull */
        0x4e9803f8,
        0x814f9880,
        0xb69d21f4,
        0x12b60830,
        0xdf1bf401,
-/* 0x09c8: ctx_mmio_done */
+/* 0x0a20: ctx_mmio_done */
        0xf1160398,
        0xf0810007,
        0x03d00203,
@@ -902,30 +924,30 @@ uint32_t nvc0_grhub_code[] = {
        0x13f00100,
        0x0601fa06,
        0x00f803f8,
-/* 0x09e8: ctx_xfer */
+/* 0x0a40: ctx_xfer */
        0xf104e7f0,
        0xf0020007,
        0x0ed00303,
-/* 0x09f7: ctx_xfer_idle */
+/* 0x0a4f: ctx_xfer_idle */
        0xf104bd00,
        0xf00000e7,
        0xeecf03e3,
        0x00e4f100,
        0xf21bf420,
        0xf40611f4,
-/* 0x0a0e: ctx_xfer_pre */
+/* 0x0a66: ctx_xfer_pre */
        0xf7f01102,
-       0x1421f510,
-       0x7f21f508,
+       0x6c21f510,
+       0xd721f508,
        0x1c11f407,
-/* 0x0a1c: ctx_xfer_pre_load */
+/* 0x0a74: ctx_xfer_pre_load */
        0xf502f7f0,
-       0xf507b521,
-       0xf507c721,
-       0xbd07dc21,
-       0xb521f5f4,
-       0x5a21f507,
-/* 0x0a35: ctx_xfer_exec */
+       0xf5080d21,
+       0xf5081f21,
+       0xbd083421,
+       0x0d21f5f4,
+       0xb221f508,
+/* 0x0a8d: ctx_xfer_exec */
        0x16019808,
        0x07f124bd,
        0x03f00500,
@@ -960,23 +982,65 @@ uint32_t nvc0_grhub_code[] = {
        0x1301f402,
        0xf40ca7f0,
        0xf7f0d021,
-       0x3c21f505,
+       0x9421f505,
        0x3202f408,
-/* 0x0ac4: ctx_xfer_post */
+/* 0x0b1c: ctx_xfer_post */
        0xf502f7f0,
-       0xbd07b521,
-       0x1421f5f4,
+       0xbd080d21,
+       0x6c21f5f4,
        0x7f21f508,
-       0xc721f502,
-       0xf5f4bd07,
-       0xf407b521,
+       0x1f21f502,
+       0xf5f4bd08,
+       0xf4080d21,
        0x01981011,
        0x0511fd40,
        0xf5070bf4,
-/* 0x0aef: ctx_xfer_no_post_mmio */
-       0xf5099321,
-/* 0x0af3: ctx_xfer_done */
-       0xf807a421,
+/* 0x0b47: ctx_xfer_no_post_mmio */
+       0xf509eb21,
+/* 0x0b4b: ctx_xfer_done */
+       0xf807fc21,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
index 624215a..62b0c76 100644 (file)
@@ -528,10 +528,10 @@ uint32_t nvd7_grhub_code[] = {
        0x0001d001,
        0x17f104bd,
        0xf7f00100,
-       0xb521f502,
-       0xc721f507,
-       0x10f7f007,
-       0x081421f5,
+       0x0d21f502,
+       0x1f21f508,
+       0x10f7f008,
+       0x086c21f5,
        0x98000e98,
        0x21f5010f,
        0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvd7_grhub_code[] = {
        0xb6800040,
        0x1bf40132,
        0x00f7f0be,
-       0x081421f5,
+       0x086c21f5,
        0xf500f7f0,
-       0xf107b521,
+       0xf1080d21,
        0xf0010007,
        0x01d00203,
        0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvd7_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x31f40132,
-       0xe821f502,
-       0xf094bd09,
+       0x4021f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvd7_grhub_code[] = {
        0x0203f00f,
        0xbd0009d0,
        0x0131f404,
-       0x09e821f5,
+       0x0a4021f5,
        0x99f094bd,
        0x0007f106,
        0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvd7_grhub_code[] = {
        0x12b920f9,
        0x0132f402,
        0xf50232f4,
-       0xfc09e821,
+       0xfc0a4021,
        0x0007f120,
        0x0203f0c0,
        0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvd7_grhub_code[] = {
        0xf41f23c8,
        0x31f40d0b,
        0x0232f401,
-       0x09e821f5,
+       0x0a4021f5,
 /* 0x063c: chsw_done */
        0xf10127f0,
        0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvd7_grhub_code[] = {
 /* 0x0660: main_not_ctx_switch */
        0xf401e4b0,
        0xf2b90d1b,
-       0x7821f502,
+       0xd021f502,
        0x460ef409,
 /* 0x0670: main_not_ctx_chan */
        0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvd7_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x32f40132,
-       0xe821f502,
-       0xf094bd09,
+       0x4021f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvd7_grhub_code[] = {
 /* 0x072b: ih_no_ctxsw */
        0xe40421f4,
        0xf40400ab,
-       0xb7f1140b,
+       0xe7f16c0b,
+       0xe3f00708,
+       0x6821f440,
+       0xf102ffb9,
+       0xf0040007,
+       0x0fd00203,
+       0xf104bd00,
+       0xf00704e7,
+       0x21f440e3,
+       0x02ffb968,
+       0x030007f1,
+       0xd00203f0,
+       0x04bd000f,
+       0x9450fec7,
+       0xf7f102ee,
+       0xf3f00700,
+       0x00efbb40,
+       0xf16821f4,
+       0xf0020007,
+       0x0fd00203,
+       0xf004bd00,
+       0x21f503f7,
+       0xb7f1037e,
        0xbfb90100,
        0x44e7f102,
        0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
        0xf19d21f4,
-       0xbd0104b7,
+       0xbd0504b7,
        0xb4abffb0,
        0xf10f0bf4,
        0xf0070007,
        0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
        0xf104bd00,
        0xf0010007,
        0x0ad00003,
@@ -731,36 +753,36 @@ uint32_t nvd7_grhub_code[] = {
        0xfc90fca0,
        0x0088fe80,
        0x32f480fc,
-/* 0x077f: ctx_4160s */
+/* 0x07d7: ctx_4160s */
        0xf001f800,
        0xffb901f7,
        0x60e7f102,
        0x40e3f041,
-/* 0x078f: ctx_4160s_wait */
+/* 0x07e7: ctx_4160s_wait */
        0xf19d21f4,
        0xf04160e7,
        0x21f440e3,
        0x02ffb968,
        0xf404ffc8,
        0x00f8f00b,
-/* 0x07a4: ctx_4160c */
+/* 0x07fc: ctx_4160c */
        0xffb9f4bd,
        0x60e7f102,
        0x40e3f041,
        0xf89d21f4,
-/* 0x07b5: ctx_4170s */
+/* 0x080d: ctx_4170s */
        0x10f5f000,
        0xf102ffb9,
        0xf04170e7,
        0x21f440e3,
-/* 0x07c7: ctx_4170w */
+/* 0x081f: ctx_4170w */
        0xf100f89d,
        0xf04170e7,
        0x21f440e3,
        0x02ffb968,
        0xf410f4f0,
        0x00f8f01b,
-/* 0x07dc: ctx_redswitch */
+/* 0x0834: ctx_redswitch */
        0x0200e7f1,
        0xf040e5f0,
        0xe5f020e5,
@@ -768,7 +790,7 @@ uint32_t nvd7_grhub_code[] = {
        0x0103f085,
        0xbd000ed0,
        0x08f7f004,
-/* 0x07f8: ctx_redswitch_delay */
+/* 0x0850: ctx_redswitch_delay */
        0xf401f2b6,
        0xe5f1fd1b,
        0xe5f10400,
@@ -776,7 +798,7 @@ uint32_t nvd7_grhub_code[] = {
        0x03f08500,
        0x000ed001,
        0x00f804bd,
-/* 0x0814: ctx_86c */
+/* 0x086c: ctx_86c */
        0x1b0007f1,
        0xd00203f0,
        0x04bd000f,
@@ -787,16 +809,16 @@ uint32_t nvd7_grhub_code[] = {
        0xa86ce7f1,
        0xf441e3f0,
        0x00f89d21,
-/* 0x083c: ctx_mem */
+/* 0x0894: ctx_mem */
        0x840007f1,
        0xd00203f0,
        0x04bd000f,
-/* 0x0848: ctx_mem_wait */
+/* 0x08a0: ctx_mem_wait */
        0x8400f7f1,
        0xcf02f3f0,
        0xfffd00ff,
        0xf31bf405,
-/* 0x085a: ctx_load */
+/* 0x08b2: ctx_load */
        0x94bd00f8,
        0xf10599f0,
        0xf00f0007,
@@ -814,7 +836,7 @@ uint32_t nvd7_grhub_code[] = {
        0x02d00203,
        0xf004bd00,
        0x21f507f7,
-       0x07f1083c,
+       0x07f10894,
        0x03f0c000,
        0x0002d002,
        0x0bfe04bd,
@@ -869,31 +891,31 @@ uint32_t nvd7_grhub_code[] = {
        0x03f01700,
        0x0009d002,
        0x00f804bd,
-/* 0x0978: ctx_chan */
-       0x077f21f5,
-       0x085a21f5,
+/* 0x09d0: ctx_chan */
+       0x07d721f5,
+       0x08b221f5,
        0xf40ca7f0,
        0xf7f0d021,
-       0x3c21f505,
-       0xa421f508,
-/* 0x0993: ctx_mmio_exec */
+       0x9421f505,
+       0xfc21f508,
+/* 0x09eb: ctx_mmio_exec */
        0x9800f807,
        0x07f14103,
        0x03f08100,
        0x0003d002,
        0x34bd04bd,
-/* 0x09a4: ctx_mmio_loop */
+/* 0x09fc: ctx_mmio_loop */
        0xf4ff34c4,
        0x57f10f1b,
        0x53f00200,
        0x0535fa06,
-/* 0x09b6: ctx_mmio_pull */
+/* 0x0a0e: ctx_mmio_pull */
        0x4e9803f8,
        0x814f9880,
        0xb69d21f4,
        0x12b60830,
        0xdf1bf401,
-/* 0x09c8: ctx_mmio_done */
+/* 0x0a20: ctx_mmio_done */
        0xf1160398,
        0xf0810007,
        0x03d00203,
@@ -902,30 +924,30 @@ uint32_t nvd7_grhub_code[] = {
        0x13f00100,
        0x0601fa06,
        0x00f803f8,
-/* 0x09e8: ctx_xfer */
+/* 0x0a40: ctx_xfer */
        0xf104e7f0,
        0xf0020007,
        0x0ed00303,
-/* 0x09f7: ctx_xfer_idle */
+/* 0x0a4f: ctx_xfer_idle */
        0xf104bd00,
        0xf00000e7,
        0xeecf03e3,
        0x00e4f100,
        0xf21bf420,
        0xf40611f4,
-/* 0x0a0e: ctx_xfer_pre */
+/* 0x0a66: ctx_xfer_pre */
        0xf7f01102,
-       0x1421f510,
-       0x7f21f508,
+       0x6c21f510,
+       0xd721f508,
        0x1c11f407,
-/* 0x0a1c: ctx_xfer_pre_load */
+/* 0x0a74: ctx_xfer_pre_load */
        0xf502f7f0,
-       0xf507b521,
-       0xf507c721,
-       0xbd07dc21,
-       0xb521f5f4,
-       0x5a21f507,
-/* 0x0a35: ctx_xfer_exec */
+       0xf5080d21,
+       0xf5081f21,
+       0xbd083421,
+       0x0d21f5f4,
+       0xb221f508,
+/* 0x0a8d: ctx_xfer_exec */
        0x16019808,
        0x07f124bd,
        0x03f00500,
@@ -960,23 +982,65 @@ uint32_t nvd7_grhub_code[] = {
        0x1301f402,
        0xf40ca7f0,
        0xf7f0d021,
-       0x3c21f505,
+       0x9421f505,
        0x3202f408,
-/* 0x0ac4: ctx_xfer_post */
+/* 0x0b1c: ctx_xfer_post */
        0xf502f7f0,
-       0xbd07b521,
-       0x1421f5f4,
+       0xbd080d21,
+       0x6c21f5f4,
        0x7f21f508,
-       0xc721f502,
-       0xf5f4bd07,
-       0xf407b521,
+       0x1f21f502,
+       0xf5f4bd08,
+       0xf4080d21,
        0x01981011,
        0x0511fd40,
        0xf5070bf4,
-/* 0x0aef: ctx_xfer_no_post_mmio */
-       0xf5099321,
-/* 0x0af3: ctx_xfer_done */
-       0xf807a421,
+/* 0x0b47: ctx_xfer_no_post_mmio */
+       0xf509eb21,
+/* 0x0b4b: ctx_xfer_done */
+       0xf807fc21,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
index 6547b3d..51c3797 100644 (file)
@@ -528,10 +528,10 @@ uint32_t nve0_grhub_code[] = {
        0x0001d001,
        0x17f104bd,
        0xf7f00100,
-       0x7f21f502,
-       0x9121f507,
+       0xd721f502,
+       0xe921f507,
        0x10f7f007,
-       0x07de21f5,
+       0x083621f5,
        0x98000e98,
        0x21f5010f,
        0x14950150,
@@ -574,9 +574,9 @@ uint32_t nve0_grhub_code[] = {
        0xb6800040,
        0x1bf40132,
        0x00f7f0be,
-       0x07de21f5,
+       0x083621f5,
        0xf500f7f0,
-       0xf1077f21,
+       0xf107d721,
        0xf0010007,
        0x01d00203,
        0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nve0_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x31f40132,
-       0xaa21f502,
-       0xf094bd09,
+       0x0221f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nve0_grhub_code[] = {
        0x0203f00f,
        0xbd0009d0,
        0x0131f404,
-       0x09aa21f5,
+       0x0a0221f5,
        0x99f094bd,
        0x0007f106,
        0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nve0_grhub_code[] = {
        0x12b920f9,
        0x0132f402,
        0xf50232f4,
-       0xfc09aa21,
+       0xfc0a0221,
        0x0007f120,
        0x0203f0c0,
        0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nve0_grhub_code[] = {
        0xf41f23c8,
        0x31f40d0b,
        0x0232f401,
-       0x09aa21f5,
+       0x0a0221f5,
 /* 0x063c: chsw_done */
        0xf10127f0,
        0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nve0_grhub_code[] = {
 /* 0x0660: main_not_ctx_switch */
        0xf401e4b0,
        0xf2b90d1b,
-       0x4221f502,
+       0x9a21f502,
        0x460ef409,
 /* 0x0670: main_not_ctx_chan */
        0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nve0_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x32f40132,
-       0xaa21f502,
-       0xf094bd09,
+       0x0221f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nve0_grhub_code[] = {
 /* 0x072b: ih_no_ctxsw */
        0xe40421f4,
        0xf40400ab,
-       0xb7f1140b,
+       0xe7f16c0b,
+       0xe3f00708,
+       0x6821f440,
+       0xf102ffb9,
+       0xf0040007,
+       0x0fd00203,
+       0xf104bd00,
+       0xf00704e7,
+       0x21f440e3,
+       0x02ffb968,
+       0x030007f1,
+       0xd00203f0,
+       0x04bd000f,
+       0x9450fec7,
+       0xf7f102ee,
+       0xf3f00700,
+       0x00efbb40,
+       0xf16821f4,
+       0xf0020007,
+       0x0fd00203,
+       0xf004bd00,
+       0x21f503f7,
+       0xb7f1037e,
        0xbfb90100,
        0x44e7f102,
        0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
        0xf19d21f4,
-       0xbd0104b7,
+       0xbd0504b7,
        0xb4abffb0,
        0xf10f0bf4,
        0xf0070007,
        0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
        0xf104bd00,
        0xf0010007,
        0x0ad00003,
@@ -731,19 +753,19 @@ uint32_t nve0_grhub_code[] = {
        0xfc90fca0,
        0x0088fe80,
        0x32f480fc,
-/* 0x077f: ctx_4170s */
+/* 0x07d7: ctx_4170s */
        0xf001f800,
        0xffb910f5,
        0x70e7f102,
        0x40e3f041,
        0xf89d21f4,
-/* 0x0791: ctx_4170w */
+/* 0x07e9: ctx_4170w */
        0x70e7f100,
        0x40e3f041,
        0xb96821f4,
        0xf4f002ff,
        0xf01bf410,
-/* 0x07a6: ctx_redswitch */
+/* 0x07fe: ctx_redswitch */
        0xe7f100f8,
        0xe5f00200,
        0x20e5f040,
@@ -751,7 +773,7 @@ uint32_t nve0_grhub_code[] = {
        0xf0850007,
        0x0ed00103,
        0xf004bd00,
-/* 0x07c2: ctx_redswitch_delay */
+/* 0x081a: ctx_redswitch_delay */
        0xf2b608f7,
        0xfd1bf401,
        0x0400e5f1,
@@ -759,7 +781,7 @@ uint32_t nve0_grhub_code[] = {
        0x850007f1,
        0xd00103f0,
        0x04bd000e,
-/* 0x07de: ctx_86c */
+/* 0x0836: ctx_86c */
        0x07f100f8,
        0x03f01b00,
        0x000fd002,
@@ -770,17 +792,17 @@ uint32_t nve0_grhub_code[] = {
        0xe7f102ff,
        0xe3f0a86c,
        0x9d21f441,
-/* 0x0806: ctx_mem */
+/* 0x085e: ctx_mem */
        0x07f100f8,
        0x03f08400,
        0x000fd002,
-/* 0x0812: ctx_mem_wait */
+/* 0x086a: ctx_mem_wait */
        0xf7f104bd,
        0xf3f08400,
        0x00ffcf02,
        0xf405fffd,
        0x00f8f31b,
-/* 0x0824: ctx_load */
+/* 0x087c: ctx_load */
        0x99f094bd,
        0x0007f105,
        0x0203f00f,
@@ -797,7 +819,7 @@ uint32_t nve0_grhub_code[] = {
        0x0203f083,
        0xbd0002d0,
        0x07f7f004,
-       0x080621f5,
+       0x085e21f5,
        0xc00007f1,
        0xd00203f0,
        0x04bd0002,
@@ -852,29 +874,29 @@ uint32_t nve0_grhub_code[] = {
        0x170007f1,
        0xd00203f0,
        0x04bd0009,
-/* 0x0942: ctx_chan */
+/* 0x099a: ctx_chan */
        0x21f500f8,
-       0xa7f00824,
+       0xa7f0087c,
        0xd021f40c,
        0xf505f7f0,
-       0xf8080621,
-/* 0x0955: ctx_mmio_exec */
+       0xf8085e21,
+/* 0x09ad: ctx_mmio_exec */
        0x41039800,
        0x810007f1,
        0xd00203f0,
        0x04bd0003,
-/* 0x0966: ctx_mmio_loop */
+/* 0x09be: ctx_mmio_loop */
        0x34c434bd,
        0x0f1bf4ff,
        0x020057f1,
        0xfa0653f0,
        0x03f80535,
-/* 0x0978: ctx_mmio_pull */
+/* 0x09d0: ctx_mmio_pull */
        0x98804e98,
        0x21f4814f,
        0x0830b69d,
        0xf40112b6,
-/* 0x098a: ctx_mmio_done */
+/* 0x09e2: ctx_mmio_done */
        0x0398df1b,
        0x0007f116,
        0x0203f081,
@@ -883,30 +905,30 @@ uint32_t nve0_grhub_code[] = {
        0x010017f1,
        0xfa0613f0,
        0x03f80601,
-/* 0x09aa: ctx_xfer */
+/* 0x0a02: ctx_xfer */
        0xe7f000f8,
        0x0007f104,
        0x0303f002,
        0xbd000ed0,
-/* 0x09b9: ctx_xfer_idle */
+/* 0x0a11: ctx_xfer_idle */
        0x00e7f104,
        0x03e3f000,
        0xf100eecf,
        0xf42000e4,
        0x11f4f21b,
        0x0d02f406,
-/* 0x09d0: ctx_xfer_pre */
+/* 0x0a28: ctx_xfer_pre */
        0xf510f7f0,
-       0xf407de21,
-/* 0x09da: ctx_xfer_pre_load */
+       0xf4083621,
+/* 0x0a32: ctx_xfer_pre_load */
        0xf7f01c11,
-       0x7f21f502,
-       0x9121f507,
-       0xa621f507,
+       0xd721f502,
+       0xe921f507,
+       0xfe21f507,
        0xf5f4bd07,
-       0xf5077f21,
-/* 0x09f3: ctx_xfer_exec */
-       0x98082421,
+       0xf507d721,
+/* 0x0a4b: ctx_xfer_exec */
+       0x98087c21,
        0x24bd1601,
        0x050007f1,
        0xd00103f0,
@@ -941,21 +963,21 @@ uint32_t nve0_grhub_code[] = {
        0xa7f01301,
        0xd021f40c,
        0xf505f7f0,
-       0xf4080621,
-/* 0x0a82: ctx_xfer_post */
+       0xf4085e21,
+/* 0x0ada: ctx_xfer_post */
        0xf7f02e02,
-       0x7f21f502,
+       0xd721f502,
        0xf5f4bd07,
-       0xf507de21,
+       0xf5083621,
        0xf5027f21,
-       0xbd079121,
-       0x7f21f5f4,
+       0xbd07e921,
+       0xd721f5f4,
        0x1011f407,
        0xfd400198,
        0x0bf40511,
-       0x5521f507,
-/* 0x0aad: ctx_xfer_no_post_mmio */
-/* 0x0aad: ctx_xfer_done */
+       0xad21f507,
+/* 0x0b05: ctx_xfer_no_post_mmio */
+/* 0x0b05: ctx_xfer_done */
        0x0000f809,
        0x00000000,
        0x00000000,
@@ -977,4 +999,46 @@ uint32_t nve0_grhub_code[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
 };
index a5aee5a..a0af4b7 100644 (file)
@@ -528,10 +528,10 @@ uint32_t nvf0_grhub_code[] = {
        0x0001d001,
        0x17f104bd,
        0xf7f00100,
-       0x7f21f502,
-       0x9121f507,
+       0xd721f502,
+       0xe921f507,
        0x10f7f007,
-       0x07de21f5,
+       0x083621f5,
        0x98000e98,
        0x21f5010f,
        0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvf0_grhub_code[] = {
        0xb6800040,
        0x1bf40132,
        0x00f7f0be,
-       0x07de21f5,
+       0x083621f5,
        0xf500f7f0,
-       0xf1077f21,
+       0xf107d721,
        0xf0010007,
        0x01d00203,
        0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvf0_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x31f40132,
-       0xaa21f502,
-       0xf094bd09,
+       0x0221f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvf0_grhub_code[] = {
        0x0203f037,
        0xbd0009d0,
        0x0131f404,
-       0x09aa21f5,
+       0x0a0221f5,
        0x99f094bd,
        0x0007f106,
        0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvf0_grhub_code[] = {
        0x12b920f9,
        0x0132f402,
        0xf50232f4,
-       0xfc09aa21,
+       0xfc0a0221,
        0x0007f120,
        0x0203f0c0,
        0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvf0_grhub_code[] = {
        0xf41f23c8,
        0x31f40d0b,
        0x0232f401,
-       0x09aa21f5,
+       0x0a0221f5,
 /* 0x063c: chsw_done */
        0xf10127f0,
        0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvf0_grhub_code[] = {
 /* 0x0660: main_not_ctx_switch */
        0xf401e4b0,
        0xf2b90d1b,
-       0x4221f502,
+       0x9a21f502,
        0x460ef409,
 /* 0x0670: main_not_ctx_chan */
        0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvf0_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x32f40132,
-       0xaa21f502,
-       0xf094bd09,
+       0x0221f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvf0_grhub_code[] = {
 /* 0x072b: ih_no_ctxsw */
        0xe40421f4,
        0xf40400ab,
-       0xb7f1140b,
+       0xe7f16c0b,
+       0xe3f00708,
+       0x6821f440,
+       0xf102ffb9,
+       0xf0040007,
+       0x0fd00203,
+       0xf104bd00,
+       0xf00704e7,
+       0x21f440e3,
+       0x02ffb968,
+       0x030007f1,
+       0xd00203f0,
+       0x04bd000f,
+       0x9450fec7,
+       0xf7f102ee,
+       0xf3f00700,
+       0x00efbb40,
+       0xf16821f4,
+       0xf0020007,
+       0x0fd00203,
+       0xf004bd00,
+       0x21f503f7,
+       0xb7f1037e,
        0xbfb90100,
        0x44e7f102,
        0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
        0xf19d21f4,
-       0xbd0104b7,
+       0xbd0504b7,
        0xb4abffb0,
        0xf10f0bf4,
        0xf0070007,
        0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
        0xf104bd00,
        0xf0010007,
        0x0ad00003,
@@ -731,19 +753,19 @@ uint32_t nvf0_grhub_code[] = {
        0xfc90fca0,
        0x0088fe80,
        0x32f480fc,
-/* 0x077f: ctx_4170s */
+/* 0x07d7: ctx_4170s */
        0xf001f800,
        0xffb910f5,
        0x70e7f102,
        0x40e3f041,
        0xf89d21f4,
-/* 0x0791: ctx_4170w */
+/* 0x07e9: ctx_4170w */
        0x70e7f100,
        0x40e3f041,
        0xb96821f4,
        0xf4f002ff,
        0xf01bf410,
-/* 0x07a6: ctx_redswitch */
+/* 0x07fe: ctx_redswitch */
        0xe7f100f8,
        0xe5f00200,
        0x20e5f040,
@@ -751,7 +773,7 @@ uint32_t nvf0_grhub_code[] = {
        0xf0850007,
        0x0ed00103,
        0xf004bd00,
-/* 0x07c2: ctx_redswitch_delay */
+/* 0x081a: ctx_redswitch_delay */
        0xf2b608f7,
        0xfd1bf401,
        0x0400e5f1,
@@ -759,7 +781,7 @@ uint32_t nvf0_grhub_code[] = {
        0x850007f1,
        0xd00103f0,
        0x04bd000e,
-/* 0x07de: ctx_86c */
+/* 0x0836: ctx_86c */
        0x07f100f8,
        0x03f02300,
        0x000fd002,
@@ -770,17 +792,17 @@ uint32_t nvf0_grhub_code[] = {
        0xe7f102ff,
        0xe3f0a88c,
        0x9d21f441,
-/* 0x0806: ctx_mem */
+/* 0x085e: ctx_mem */
        0x07f100f8,
        0x03f08400,
        0x000fd002,
-/* 0x0812: ctx_mem_wait */
+/* 0x086a: ctx_mem_wait */
        0xf7f104bd,
        0xf3f08400,
        0x00ffcf02,
        0xf405fffd,
        0x00f8f31b,
-/* 0x0824: ctx_load */
+/* 0x087c: ctx_load */
        0x99f094bd,
        0x0007f105,
        0x0203f037,
@@ -797,7 +819,7 @@ uint32_t nvf0_grhub_code[] = {
        0x0203f083,
        0xbd0002d0,
        0x07f7f004,
-       0x080621f5,
+       0x085e21f5,
        0xc00007f1,
        0xd00203f0,
        0x04bd0002,
@@ -852,29 +874,29 @@ uint32_t nvf0_grhub_code[] = {
        0x170007f1,
        0xd00203f0,
        0x04bd0009,
-/* 0x0942: ctx_chan */
+/* 0x099a: ctx_chan */
        0x21f500f8,
-       0xa7f00824,
+       0xa7f0087c,
        0xd021f40c,
        0xf505f7f0,
-       0xf8080621,
-/* 0x0955: ctx_mmio_exec */
+       0xf8085e21,
+/* 0x09ad: ctx_mmio_exec */
        0x41039800,
        0x810007f1,
        0xd00203f0,
        0x04bd0003,
-/* 0x0966: ctx_mmio_loop */
+/* 0x09be: ctx_mmio_loop */
        0x34c434bd,
        0x0f1bf4ff,
        0x020057f1,
        0xfa0653f0,
        0x03f80535,
-/* 0x0978: ctx_mmio_pull */
+/* 0x09d0: ctx_mmio_pull */
        0x98804e98,
        0x21f4814f,
        0x0830b69d,
        0xf40112b6,
-/* 0x098a: ctx_mmio_done */
+/* 0x09e2: ctx_mmio_done */
        0x0398df1b,
        0x0007f116,
        0x0203f081,
@@ -883,30 +905,30 @@ uint32_t nvf0_grhub_code[] = {
        0x010017f1,
        0xfa0613f0,
        0x03f80601,
-/* 0x09aa: ctx_xfer */
+/* 0x0a02: ctx_xfer */
        0xe7f000f8,
        0x0007f104,
        0x0303f002,
        0xbd000ed0,
-/* 0x09b9: ctx_xfer_idle */
+/* 0x0a11: ctx_xfer_idle */
        0x00e7f104,
        0x03e3f000,
        0xf100eecf,
        0xf42000e4,
        0x11f4f21b,
        0x0d02f406,
-/* 0x09d0: ctx_xfer_pre */
+/* 0x0a28: ctx_xfer_pre */
        0xf510f7f0,
-       0xf407de21,
-/* 0x09da: ctx_xfer_pre_load */
+       0xf4083621,
+/* 0x0a32: ctx_xfer_pre_load */
        0xf7f01c11,
-       0x7f21f502,
-       0x9121f507,
-       0xa621f507,
+       0xd721f502,
+       0xe921f507,
+       0xfe21f507,
        0xf5f4bd07,
-       0xf5077f21,
-/* 0x09f3: ctx_xfer_exec */
-       0x98082421,
+       0xf507d721,
+/* 0x0a4b: ctx_xfer_exec */
+       0x98087c21,
        0x24bd1601,
        0x050007f1,
        0xd00103f0,
@@ -941,21 +963,21 @@ uint32_t nvf0_grhub_code[] = {
        0xa7f01301,
        0xd021f40c,
        0xf505f7f0,
-       0xf4080621,
-/* 0x0a82: ctx_xfer_post */
+       0xf4085e21,
+/* 0x0ada: ctx_xfer_post */
        0xf7f02e02,
-       0x7f21f502,
+       0xd721f502,
        0xf5f4bd07,
-       0xf507de21,
+       0xf5083621,
        0xf5027f21,
-       0xbd079121,
-       0x7f21f5f4,
+       0xbd07e921,
+       0xd721f5f4,
        0x1011f407,
        0xfd400198,
        0x0bf40511,
-       0x5521f507,
-/* 0x0aad: ctx_xfer_no_post_mmio */
-/* 0x0aad: ctx_xfer_done */
+       0xad21f507,
+/* 0x0b05: ctx_xfer_no_post_mmio */
+/* 0x0b05: ctx_xfer_done */
        0x0000f809,
        0x00000000,
        0x00000000,
@@ -977,4 +999,46 @@ uint32_t nvf0_grhub_code[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
 };
index a47d49d..2a0b0f8 100644 (file)
 #define GK110 0xf0
 #define GK208 0x108
 
+#define NV_PGRAPH_TRAPPED_ADDR                                         0x400704
+#define NV_PGRAPH_TRAPPED_DATA_LO                                      0x400708
+#define NV_PGRAPH_TRAPPED_DATA_HI                                      0x40070c
+
+#define NV_PGRAPH_FE_OBJECT_TABLE(n)                        ((n) * 4 + 0x400700)
+
 #define NV_PGRAPH_FECS_INTR_ACK                                        0x409004
 #define NV_PGRAPH_FECS_INTR                                            0x409008
 #define NV_PGRAPH_FECS_INTR_FWMTHD                                   0x00000400
index fd1d380..1718ae4 100644 (file)
@@ -3,5 +3,6 @@
 
 #define E_BAD_COMMAND  0x00000001
 #define E_CMD_OVERFLOW 0x00000002
+#define E_BAD_FWMTHD   0x00000003
 
 #endif
index 1a2d564..20665c2 100644 (file)
@@ -976,7 +976,6 @@ nv50_graph_init(struct nouveau_object *object)
                break;
        case 0xa0:
        default:
-               nv_wr32(priv, 0x402cc0, 0x00000000);
                if (nv_device(priv)->chipset == 0xa0 ||
                    nv_device(priv)->chipset == 0xaa ||
                    nv_device(priv)->chipset == 0xac) {
@@ -991,10 +990,10 @@ nv50_graph_init(struct nouveau_object *object)
 
        /* zero out zcull regions */
        for (i = 0; i < 8; i++) {
-               nv_wr32(priv, 0x402c20 + (i * 8), 0x00000000);
-               nv_wr32(priv, 0x402c24 + (i * 8), 0x00000000);
-               nv_wr32(priv, 0x402c28 + (i * 8), 0x00000000);
-               nv_wr32(priv, 0x402c2c + (i * 8), 0x00000000);
+               nv_wr32(priv, 0x402c20 + (i * 0x10), 0x00000000);
+               nv_wr32(priv, 0x402c24 + (i * 0x10), 0x00000000);
+               nv_wr32(priv, 0x402c28 + (i * 0x10), 0x00000000);
+               nv_wr32(priv, 0x402c2c + (i * 0x10), 0x00000000);
        }
        return 0;
 }
index bf7bdb1..aa08389 100644 (file)
@@ -789,17 +789,40 @@ nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *priv)
 static void
 nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
 {
-       u32 ustat = nv_rd32(priv, 0x409c18);
+       u32 stat = nv_rd32(priv, 0x409c18);
 
-       if (ustat & 0x00000001)
-               nv_error(priv, "CTXCTL ucode error\n");
-       if (ustat & 0x00080000)
-               nv_error(priv, "CTXCTL watchdog timeout\n");
-       if (ustat & ~0x00080001)
-               nv_error(priv, "CTXCTL 0x%08x\n", ustat);
+       if (stat & 0x00000001) {
+               u32 code = nv_rd32(priv, 0x409814);
+               if (code == E_BAD_FWMTHD) {
+                       u32 class = nv_rd32(priv, 0x409808);
+                       u32  addr = nv_rd32(priv, 0x40980c);
+                       u32  subc = (addr & 0x00070000) >> 16;
+                       u32  mthd = (addr & 0x00003ffc);
+                       u32  data = nv_rd32(priv, 0x409810);
+
+                       nv_error(priv, "FECS MTHD subc %d class 0x%04x "
+                                      "mthd 0x%04x data 0x%08x\n",
+                                subc, class, mthd, data);
 
-       nvc0_graph_ctxctl_debug(priv);
-       nv_wr32(priv, 0x409c20, ustat);
+                       nv_wr32(priv, 0x409c20, 0x00000001);
+                       stat &= ~0x00000001;
+               } else {
+                       nv_error(priv, "FECS ucode error %d\n", code);
+               }
+       }
+
+       if (stat & 0x00080000) {
+               nv_error(priv, "FECS watchdog timeout\n");
+               nvc0_graph_ctxctl_debug(priv);
+               nv_wr32(priv, 0x409c20, 0x00080000);
+               stat &= ~0x00080000;
+       }
+
+       if (stat) {
+               nv_error(priv, "FECS 0x%08x\n", stat);
+               nvc0_graph_ctxctl_debug(priv);
+               nv_wr32(priv, 0x409c20, stat);
+       }
 }
 
 static void
index 75203a9..ffc2891 100644 (file)
@@ -38,6 +38,8 @@
 #include <engine/fifo.h>
 #include <engine/graph.h>
 
+#include "fuc/os.h"
+
 #define GPC_MAX 32
 #define TPC_MAX (GPC_MAX * 8)
 
index db1b39d..825f7bb 100644 (file)
@@ -84,6 +84,7 @@ extern struct nouveau_oclass *nv4e_i2c_oclass;
 extern struct nouveau_oclass *nv50_i2c_oclass;
 extern struct nouveau_oclass *nv94_i2c_oclass;
 extern struct nouveau_oclass *nvd0_i2c_oclass;
+extern struct nouveau_oclass *gf117_i2c_oclass;
 extern struct nouveau_oclass *nve0_i2c_oclass;
 
 static inline int
index 4ac1aa3..0e62a32 100644 (file)
@@ -307,7 +307,6 @@ calc_clk(struct nve0_clock_priv *priv,
                info->dsrc = src0;
                if (div0) {
                        info->ddiv |= 0x80000000;
-                       info->ddiv |= div0 << 8;
                        info->ddiv |= div0;
                }
                if (div1D) {
@@ -352,7 +351,7 @@ nve0_clock_prog_0(struct nve0_clock_priv *priv, int clk)
 {
        struct nve0_clock_info *info = &priv->eng[clk];
        if (!info->ssel) {
-               nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+               nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x8000003f, info->ddiv);
                nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
        }
 }
@@ -389,7 +388,10 @@ static void
 nve0_clock_prog_3(struct nve0_clock_priv *priv, int clk)
 {
        struct nve0_clock_info *info = &priv->eng[clk];
-       nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+       if (info->ssel)
+               nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f00, info->mdiv);
+       else
+               nv_mask(priv, 0x137250 + (clk * 0x04), 0x0000003f, info->mdiv);
 }
 
 static void
index 0f57fcf..2af9cfd 100644 (file)
@@ -26,7 +26,7 @@ ramfuc_reg2(u32 addr1, u32 addr2)
        };
 }
 
-static inline struct ramfuc_reg
+static noinline struct ramfuc_reg
 ramfuc_reg(u32 addr)
 {
        return ramfuc_reg2(addr, addr);
@@ -107,7 +107,7 @@ ramfuc_nsec(struct ramfuc *ram, u32 nsec)
 
 #define ram_init(s,p)       ramfuc_init(&(s)->base, (p))
 #define ram_exec(s,e)       ramfuc_exec(&(s)->base, (e))
-#define ram_have(s,r)       ((s)->r_##r.addr != 0x000000)
+#define ram_have(s,r)       ((s)->r_##r.addr[0] != 0x000000)
 #define ram_rd32(s,r)       ramfuc_rd32(&(s)->base, &(s)->r_##r)
 #define ram_wr32(s,r,d)     ramfuc_wr32(&(s)->base, &(s)->r_##r, (d))
 #define ram_nuke(s,r)       ramfuc_nuke(&(s)->base, &(s)->r_##r)
index 84c7efb..c5b46e3 100644 (file)
@@ -200,6 +200,7 @@ r1373f4_init(struct nve0_ramfuc *fuc)
        /* (re)program mempll, if required */
        if (ram->mode == 2) {
                ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
+               ram_mask(fuc, 0x132000, 0x80000000, 0x80000000);
                ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
                ram_mask(fuc, 0x132004, 0x103fffff, mcoef);
                ram_mask(fuc, 0x132000, 0x00000001, 0x00000001);
@@ -262,8 +263,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
        struct nve0_ram *ram = (void *)pfb->ram;
        struct nve0_ramfuc *fuc = &ram->fuc;
        struct nouveau_ram_data *next = ram->base.next;
-       int vc = !(next->bios.ramcfg_11_02_08);
-       int mv = !(next->bios.ramcfg_11_02_04);
+       int vc = !next->bios.ramcfg_11_02_08;
+       int mv = !next->bios.ramcfg_11_02_04;
        u32 mask, data;
 
        ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
@@ -370,8 +371,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
                }
        }
 
-       if ( (next->bios.ramcfg_11_02_40) ||
-            (next->bios.ramcfg_11_07_10)) {
+       if (next->bios.ramcfg_11_02_40 ||
+           next->bios.ramcfg_11_07_10) {
                ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
                ram_nsec(fuc, 20000);
        }
@@ -417,7 +418,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
                ram_mask(fuc, 0x10f694, 0xff00ff00, data);
        }
 
-       if (ram->mode == 2 && (next->bios.ramcfg_11_08_10))
+       if (ram->mode == 2 && next->bios.ramcfg_11_08_10)
                data = 0x00000080;
        else
                data = 0x00000000;
@@ -425,13 +426,13 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        mask = 0x00070000;
        data = 0x00000000;
-       if (!(next->bios.ramcfg_11_02_80))
+       if (!next->bios.ramcfg_11_02_80)
                data |= 0x03000000;
-       if (!(next->bios.ramcfg_11_02_40))
+       if (!next->bios.ramcfg_11_02_40)
                data |= 0x00002000;
-       if (!(next->bios.ramcfg_11_07_10))
+       if (!next->bios.ramcfg_11_07_10)
                data |= 0x00004000;
-       if (!(next->bios.ramcfg_11_07_08))
+       if (!next->bios.ramcfg_11_07_08)
                data |= 0x00000003;
        else
                data |= 0x74000000;
@@ -486,7 +487,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        data = mask = 0x00000000;
        if (NOTE00(ramcfg_02_03 != 0)) {
-               data |= (next->bios.ramcfg_11_02_03) << 8;
+               data |= next->bios.ramcfg_11_02_03 << 8;
                mask |= 0x00000300;
        }
        if (NOTE00(ramcfg_01_10)) {
@@ -498,7 +499,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        data = mask = 0x00000000;
        if (NOTE00(timing_30_07 != 0)) {
-               data |= (next->bios.timing_20_30_07) << 28;
+               data |= next->bios.timing_20_30_07 << 28;
                mask |= 0x70000000;
        }
        if (NOTE00(ramcfg_01_01)) {
@@ -510,7 +511,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        data = mask = 0x00000000;
        if (NOTE00(timing_30_07 != 0)) {
-               data |= (next->bios.timing_20_30_07) << 28;
+               data |= next->bios.timing_20_30_07 << 28;
                mask |= 0x70000000;
        }
        if (NOTE00(ramcfg_01_02)) {
@@ -522,16 +523,16 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        mask = 0x33f00000;
        data = 0x00000000;
-       if (!(next->bios.ramcfg_11_01_04))
+       if (!next->bios.ramcfg_11_01_04)
                data |= 0x20200000;
-       if (!(next->bios.ramcfg_11_07_80))
+       if (!next->bios.ramcfg_11_07_80)
                data |= 0x12800000;
        /*XXX: see note above about there probably being some condition
         *     for the 10f824 stuff that uses ramcfg 3...
         */
-       if ( (next->bios.ramcfg_11_03_f0)) {
+       if (next->bios.ramcfg_11_03_f0) {
                if (next->bios.rammap_11_08_0c) {
-                       if (!(next->bios.ramcfg_11_07_80))
+                       if (!next->bios.ramcfg_11_07_80)
                                mask |= 0x00000020;
                        else
                                data |= 0x00000020;
@@ -563,7 +564,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
                ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
        }
 
-       data = (next->bios.timing_20_30_07) << 8;
+       data = next->bios.timing_20_30_07 << 8;
        if (next->bios.ramcfg_11_01_01)
                data |= 0x80000000;
        ram_mask(fuc, 0x100778, 0x00000700, data);
@@ -588,7 +589,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
        ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
        ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
 
-       if ((next->bios.ramcfg_11_08_10) && (ram->mode == 2) /*XXX*/) {
+       if (next->bios.ramcfg_11_08_10 && (ram->mode == 2) /*XXX*/) {
                u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
                nve0_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/
                ram_nsec(fuc, 1000);
@@ -621,8 +622,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
        data  = ram_rd32(fuc, 0x10f978);
        data &= ~0x00046144;
        data |=  0x0000000b;
-       if (!(next->bios.ramcfg_11_07_08)) {
-               if (!(next->bios.ramcfg_11_07_04))
+       if (!next->bios.ramcfg_11_07_08) {
+               if (!next->bios.ramcfg_11_07_04)
                        data |= 0x0000200c;
                else
                        data |= 0x00000000;
@@ -636,11 +637,11 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
                ram_wr32(fuc, 0x10f830, data);
        }
 
-       if (!(next->bios.ramcfg_11_07_08)) {
+       if (!next->bios.ramcfg_11_07_08) {
                data = 0x88020000;
-               if ( (next->bios.ramcfg_11_07_04))
+               if ( next->bios.ramcfg_11_07_04)
                        data |= 0x10000000;
-               if (!(next->bios.rammap_11_08_10))
+               if (!next->bios.rammap_11_08_10)
                        data |= 0x00080000;
        } else {
                data = 0xa40e0000;
@@ -689,8 +690,8 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
        const u32 runk0 = ram->fN1 << 16;
        const u32 runk1 = ram->fN1;
        struct nouveau_ram_data *next = ram->base.next;
-       int vc = !(next->bios.ramcfg_11_02_08);
-       int mv = !(next->bios.ramcfg_11_02_04);
+       int vc = !next->bios.ramcfg_11_02_08;
+       int mv = !next->bios.ramcfg_11_02_04;
        u32 mask, data;
 
        ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
@@ -705,7 +706,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
        }
 
        ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
-       if ((next->bios.ramcfg_11_03_f0))
+       if (next->bios.ramcfg_11_03_f0)
                ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
 
        ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
@@ -761,7 +762,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
 
        ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
        data  = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
-       data |= (next->bios.ramcfg_11_03_30) << 12;
+       data |= next->bios.ramcfg_11_03_30 << 16;
        ram_wr32(fuc, 0x1373ec, data);
        ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
        ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
@@ -793,8 +794,8 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
                }
        }
 
-       if ( (next->bios.ramcfg_11_02_40) ||
-            (next->bios.ramcfg_11_07_10)) {
+       if (next->bios.ramcfg_11_02_40 ||
+           next->bios.ramcfg_11_07_10) {
                ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
                ram_nsec(fuc, 20000);
        }
@@ -810,13 +811,13 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
 
        mask = 0x00010000;
        data = 0x00000000;
-       if (!(next->bios.ramcfg_11_02_80))
+       if (!next->bios.ramcfg_11_02_80)
                data |= 0x03000000;
-       if (!(next->bios.ramcfg_11_02_40))
+       if (!next->bios.ramcfg_11_02_40)
                data |= 0x00002000;
-       if (!(next->bios.ramcfg_11_07_10))
+       if (!next->bios.ramcfg_11_07_10)
                data |= 0x00004000;
-       if (!(next->bios.ramcfg_11_07_08))
+       if (!next->bios.ramcfg_11_07_08)
                data |= 0x00000003;
        else
                data |= 0x14000000;
@@ -844,16 +845,16 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
 
        mask = 0x33f00000;
        data = 0x00000000;
-       if (!(next->bios.ramcfg_11_01_04))
+       if (!next->bios.ramcfg_11_01_04)
                data |= 0x20200000;
-       if (!(next->bios.ramcfg_11_07_80))
+       if (!next->bios.ramcfg_11_07_80)
                data |= 0x12800000;
        /*XXX: see note above about there probably being some condition
         *     for the 10f824 stuff that uses ramcfg 3...
         */
-       if ( (next->bios.ramcfg_11_03_f0)) {
+       if (next->bios.ramcfg_11_03_f0) {
                if (next->bios.rammap_11_08_0c) {
-                       if (!(next->bios.ramcfg_11_07_80))
+                       if (!next->bios.ramcfg_11_07_80)
                                mask |= 0x00000020;
                        else
                                data |= 0x00000020;
@@ -876,7 +877,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
                data = next->bios.timing_20_2c_1fc0;
        ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
 
-       ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8);
+       ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8 << 16);
 
        ram_wr32(fuc, 0x10f090, 0x4000007f);
        ram_nsec(fuc, 1000);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c
new file mode 100644 (file)
index 0000000..fa891c3
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+gf117_i2c_oclass = &(struct nouveau_i2c_impl) {
+       .base.handle = NV_SUBDEV(I2C, 0xd7),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = _nouveau_i2c_ctor,
+               .dtor = _nouveau_i2c_dtor,
+               .init = _nouveau_i2c_init,
+               .fini = _nouveau_i2c_fini,
+       },
+       .sclass = nvd0_i2c_sclass,
+       .pad_x = &nv04_i2c_pad_oclass,
+       .pad_s = &nv04_i2c_pad_oclass,
+}.base;
index 7120124..ebef970 100644 (file)
@@ -94,6 +94,23 @@ nve0_ibus_intr(struct nouveau_subdev *subdev)
        }
 }
 
+static int
+nve0_ibus_init(struct nouveau_object *object)
+{
+       struct nve0_ibus_priv *priv = (void *)object;
+       int ret = nouveau_ibus_init(&priv->base);
+       if (ret == 0) {
+               nv_mask(priv, 0x122318, 0x0003ffff, 0x00001000);
+               nv_mask(priv, 0x12231c, 0x0003ffff, 0x00000200);
+               nv_mask(priv, 0x122310, 0x0003ffff, 0x00000800);
+               nv_mask(priv, 0x122348, 0x0003ffff, 0x00000100);
+               nv_mask(priv, 0x1223b0, 0x0003ffff, 0x00000fff);
+               nv_mask(priv, 0x122348, 0x0003ffff, 0x00000200);
+               nv_mask(priv, 0x122358, 0x0003ffff, 0x00002880);
+       }
+       return ret;
+}
+
 static int
 nve0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
               struct nouveau_oclass *oclass, void *data, u32 size,
@@ -117,7 +134,7 @@ nve0_ibus_oclass = {
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nve0_ibus_ctor,
                .dtor = _nouveau_ibus_dtor,
-               .init = _nouveau_ibus_init,
+               .init = nve0_ibus_init,
                .fini = _nouveau_ibus_fini,
        },
 };
index 2284ecb..c2bb616 100644 (file)
@@ -83,7 +83,7 @@ host_send:
                // increment GET
                add b32 $r1 0x1
                and $r14 $r1 #fifo_qmaskf
-               nv_iowr(NV_PPWR_FIFO_GET(0), $r1)
+               nv_iowr(NV_PPWR_FIFO_GET(0), $r14)
                bra #host_send
        host_send_done:
        ret
index 4bd43a9..39a5dc1 100644 (file)
@@ -1018,7 +1018,7 @@ uint32_t nv108_pwr_code[] = {
        0xb600023f,
        0x1ec40110,
        0x04b0400f,
-       0xbd0001f6,
+       0xbd000ef6,
        0xc70ef404,
 /* 0x0328: host_send_done */
 /* 0x032a: host_recv */
index 5a73fa6..254205c 100644 (file)
@@ -1124,7 +1124,7 @@ uint32_t nva3_pwr_code[] = {
        0x0f1ec401,
        0x04b007f1,
        0xd00604b6,
-       0x04bd0001,
+       0x04bd000e,
 /* 0x03cb: host_send_done */
        0xf8ba0ef4,
 /* 0x03cd: host_recv */
index 4dba00d..7ac8740 100644 (file)
@@ -1124,7 +1124,7 @@ uint32_t nvc0_pwr_code[] = {
        0x0f1ec401,
        0x04b007f1,
        0xd00604b6,
-       0x04bd0001,
+       0x04bd000e,
 /* 0x03cb: host_send_done */
        0xf8ba0ef4,
 /* 0x03cd: host_recv */
index 5e24c6b..cd9ff1a 100644 (file)
@@ -1033,7 +1033,7 @@ uint32_t nvd0_pwr_code[] = {
        0xb6026b21,
        0x1ec40110,
        0xb007f10f,
-       0x0001d004,
+       0x000ed004,
        0x0ef404bd,
 /* 0x0365: host_send_done */
 /* 0x0367: host_recv */
index cfde9eb..6212537 100644 (file)
@@ -192,11 +192,11 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
        nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
                                             NOUVEAU_THERM_THRS_SHUTDOWN);
 
+       spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+
        /* schedule the next poll in one second */
        if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
-               ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
-
-       spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+               ptimer->alarm(ptimer, 1000000000ULL, alarm);
 }
 
 void
index 26b5647..47ad742 100644 (file)
@@ -736,6 +736,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                  fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
                  new_bo->bo.offset };
 
+       /* Keep vblanks on during flip, for the target crtc of this flip */
+       drm_vblank_get(dev, nouveau_crtc(crtc)->index);
+
        /* Emit a page flip */
        if (nv_device(drm->device)->card_type >= NV_50) {
                ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
@@ -779,6 +782,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        return 0;
 
 fail_unreserve:
+       drm_vblank_put(dev, nouveau_crtc(crtc)->index);
        ttm_bo_unreserve(&old_bo->bo);
 fail_unpin:
        mutex_unlock(&chan->cli->mutex);
@@ -817,6 +821,9 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
                drm_send_vblank_event(dev, crtcid, s->event);
        }
 
+       /* Give up ownership of vblank for page-flipped crtc */
+       drm_vblank_put(dev, s->crtc);
+
        list_del(&s->head);
        if (ps)
                *ps = *s;
index ddd8375..5425ffe 100644 (file)
@@ -652,12 +652,12 @@ int nouveau_pmops_resume(struct device *dev)
        ret = nouveau_do_resume(drm_dev);
        if (ret)
                return ret;
-       if (drm_dev->mode_config.num_crtc)
-               nouveau_fbcon_set_suspend(drm_dev, 0);
 
-       nouveau_fbcon_zfill_all(drm_dev);
-       if (drm_dev->mode_config.num_crtc)
+       if (drm_dev->mode_config.num_crtc) {
                nouveau_display_resume(drm_dev);
+               nouveau_fbcon_set_suspend(drm_dev, 0);
+       }
+
        return 0;
 }
 
@@ -683,11 +683,12 @@ static int nouveau_pmops_thaw(struct device *dev)
        ret = nouveau_do_resume(drm_dev);
        if (ret)
                return ret;
-       if (drm_dev->mode_config.num_crtc)
-               nouveau_fbcon_set_suspend(drm_dev, 0);
-       nouveau_fbcon_zfill_all(drm_dev);
-       if (drm_dev->mode_config.num_crtc)
+
+       if (drm_dev->mode_config.num_crtc) {
                nouveau_display_resume(drm_dev);
+               nouveau_fbcon_set_suspend(drm_dev, 0);
+       }
+
        return 0;
 }
 
index 64a42cf..191665e 100644 (file)
@@ -531,17 +531,10 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
                if (state == 1)
                        nouveau_fbcon_save_disable_accel(dev);
                fb_set_suspend(drm->fbcon->helper.fbdev, state);
-               if (state == 0)
+               if (state == 0) {
                        nouveau_fbcon_restore_accel(dev);
+                       nouveau_fbcon_zfill(dev, drm->fbcon);
+               }
                console_unlock();
        }
 }
-
-void
-nouveau_fbcon_zfill_all(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon) {
-               nouveau_fbcon_zfill(dev, drm->fbcon);
-       }
-}
index fdfc0c9..fcff797 100644 (file)
@@ -61,7 +61,6 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info);
 int nouveau_fbcon_init(struct drm_device *dev);
 void nouveau_fbcon_fini(struct drm_device *dev);
 void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
-void nouveau_fbcon_zfill_all(struct drm_device *dev);
 void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
 void nouveau_fbcon_restore_accel(struct drm_device *dev);
 
index afdf607..4c534b7 100644 (file)
@@ -1741,7 +1741,8 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
                }
        }
 
-       mthd  = (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
+       mthd  = (ffs(nv_encoder->dcb->heads) - 1) << 3;
+       mthd |= (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
        mthd |= nv_encoder->or;
 
        if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
index 34d6a85..0bf1e20 100644 (file)
@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
 
        pending = xchg(&qdev->ram_header->int_pending, 0);
 
+       if (!pending)
+               return IRQ_NONE;
+
        atomic_inc(&qdev->irq_received);
 
        if (pending & QXL_INTERRUPT_DISPLAY) {
index 26c12a3..30d242b 100644 (file)
@@ -1052,7 +1052,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
        int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
 
        /* pass the actual clock to atombios_crtc_program_pll for DCE5,6 for HDMI */
-       if (ASIC_IS_DCE5(rdev) && !ASIC_IS_DCE8(rdev) &&
+       if (ASIC_IS_DCE5(rdev) &&
            (encoder_mode == ATOM_ENCODER_MODE_HDMI) &&
            (radeon_crtc->bpc > 8))
                clock = radeon_crtc->adjusted_clock;
@@ -1136,6 +1136,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
        u32 tmp, viewport_w, viewport_h;
        int r;
+       bool bypass_lut = false;
 
        /* no fb bound */
        if (!atomic && !crtc->primary->fb) {
@@ -1174,33 +1175,73 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
        radeon_bo_unreserve(rbo);
 
-       switch (target_fb->bits_per_pixel) {
-       case 8:
+       switch (target_fb->pixel_format) {
+       case DRM_FORMAT_C8:
                fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
                             EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
                break;
-       case 15:
+       case DRM_FORMAT_XRGB4444:
+       case DRM_FORMAT_ARGB4444:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_ARGB1555:
                fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
                             EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_BGRA5551:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
                break;
-       case 16:
+       case DRM_FORMAT_RGB565:
                fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
                             EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
 #ifdef __BIG_ENDIAN
                fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
 #endif
                break;
-       case 24:
-       case 32:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
                fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
                             EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
 #ifdef __BIG_ENDIAN
                fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
 #endif
                break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+               /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+               bypass_lut = true;
+               break;
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_BGRA1010102:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+               /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+               bypass_lut = true;
+               break;
        default:
-               DRM_ERROR("Unsupported screen depth %d\n",
-                         target_fb->bits_per_pixel);
+               DRM_ERROR("Unsupported screen format %s\n",
+                         drm_get_format_name(target_fb->pixel_format));
                return -EINVAL;
        }
 
@@ -1329,6 +1370,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
        WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
 
+       /*
+        * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
+        * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
+        * retain the full precision throughout the pipeline.
+        */
+       WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + radeon_crtc->crtc_offset,
+                (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
+                ~EVERGREEN_LUT_10BIT_BYPASS_EN);
+
+       if (bypass_lut)
+               DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
        WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
        WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
        WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
@@ -1361,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
        WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
 
-       /* set pageflip to happen anywhere in vblank interval */
-       WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+       /* set pageflip to happen only at start of vblank interval (front porch) */
+       WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
 
        if (!atomic && fb && fb != crtc->primary->fb) {
                radeon_fb = to_radeon_framebuffer(fb);
@@ -1396,6 +1449,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
        u32 tmp, viewport_w, viewport_h;
        int r;
+       bool bypass_lut = false;
 
        /* no fb bound */
        if (!atomic && !crtc->primary->fb) {
@@ -1433,18 +1487,30 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
        radeon_bo_unreserve(rbo);
 
-       switch (target_fb->bits_per_pixel) {
-       case 8:
+       switch (target_fb->pixel_format) {
+       case DRM_FORMAT_C8:
                fb_format =
                    AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
                    AVIVO_D1GRPH_CONTROL_8BPP_INDEXED;
                break;
-       case 15:
+       case DRM_FORMAT_XRGB4444:
+       case DRM_FORMAT_ARGB4444:
+               fb_format =
+                   AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+                   AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444;
+#ifdef __BIG_ENDIAN
+               fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
+               break;
+       case DRM_FORMAT_XRGB1555:
                fb_format =
                    AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
                    AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
+#ifdef __BIG_ENDIAN
+               fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
                break;
-       case 16:
+       case DRM_FORMAT_RGB565:
                fb_format =
                    AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
                    AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
@@ -1452,8 +1518,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
                fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
 #endif
                break;
-       case 24:
-       case 32:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
                fb_format =
                    AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
                    AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
@@ -1461,9 +1527,20 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
                fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
 #endif
                break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+               fb_format =
+                   AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
+                   AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010;
+#ifdef __BIG_ENDIAN
+               fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
+#endif
+               /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+               bypass_lut = true;
+               break;
        default:
-               DRM_ERROR("Unsupported screen depth %d\n",
-                         target_fb->bits_per_pixel);
+               DRM_ERROR("Unsupported screen format %s\n",
+                         drm_get_format_name(target_fb->pixel_format));
                return -EINVAL;
        }
 
@@ -1502,6 +1579,13 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        if (rdev->family >= CHIP_R600)
                WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
 
+       /* LUT only has 256 slots for 8 bpc fb. Bypass for > 8 bpc scanout for precision */
+       WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset,
+                (bypass_lut ? AVIVO_LUT_10BIT_BYPASS_EN : 0), ~AVIVO_LUT_10BIT_BYPASS_EN);
+
+       if (bypass_lut)
+               DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
        WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
        WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
        WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
@@ -1530,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
        WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
 
-       /* set pageflip to happen anywhere in vblank interval */
-       WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+       /* set pageflip to happen only at start of vblank interval (front porch) */
+       WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
 
        if (!atomic && fb && fb != crtc->primary->fb) {
                radeon_fb = to_radeon_framebuffer(fb);
index c5b1f2d..b1e11f8 100644 (file)
@@ -127,7 +127,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
        /* flags not zero */
        if (args.v1.ucReplyStatus == 2) {
                DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
-               r = -EBUSY;
+               r = -EIO;
                goto done;
        }
 
@@ -403,16 +403,18 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
 {
        struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
        u8 msg[DP_DPCD_SIZE];
-       int ret, i;
+       int ret;
+
+       char dpcd_hex_dump[DP_DPCD_SIZE * 3];
 
        ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
                               DP_DPCD_SIZE);
        if (ret > 0) {
                memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
-               DRM_DEBUG_KMS("DPCD: ");
-               for (i = 0; i < DP_DPCD_SIZE; i++)
-                       DRM_DEBUG_KMS("%02x ", msg[i]);
-               DRM_DEBUG_KMS("\n");
+
+               hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd),
+                                  32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+               DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
 
                radeon_dp_probe_oui(radeon_connector);
 
index 2b29084..7d68203 100644 (file)
@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
        struct backlight_properties props;
        struct radeon_backlight_privdata *pdata;
        struct radeon_encoder_atom_dig *dig;
-       u8 backlight_level;
        char bl_name[16];
 
        /* Mac laptops with multiple GPUs use the gmux driver for backlight
@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
 
        pdata->encoder = radeon_encoder;
 
-       backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
-
        dig = radeon_encoder->enc_priv;
        dig->bl_dev = bd;
 
        bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+       /* Set a reasonable default here if the level is 0 otherwise
+        * fbdev will attempt to turn the backlight on after console
+        * unblanking and it will try and restore 0 which turns the backlight
+        * off again.
+        */
+       if (bd->props.brightness == 0)
+               bd->props.brightness = RADEON_MAX_BL_LEVEL;
        bd->props.power = FB_BLANK_UNBLANK;
        backlight_update_status(bd);
 
index 10dae41..584090a 100644 (file)
@@ -1179,7 +1179,7 @@ static int ci_stop_dpm(struct radeon_device *rdev)
        tmp &= ~GLOBAL_PWRMGT_EN;
        WREG32_SMC(GENERAL_PWRMGT, tmp);
 
-       tmp = RREG32(SCLK_PWRMGT_CNTL);
+       tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
        tmp &= ~DYNAMIC_PM_EN;
        WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
 
index dcd4518..c0ea661 100644 (file)
@@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
                                gb_tile_moden = 0;
                                break;
                        }
+                       rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
                        WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
                }
        } else if (num_pipe_configs == 8) {
@@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
@@ -7676,14 +7678,16 @@ restart_ih:
                        addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
                        status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
                        mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+                       /* reset addr and status */
+                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+                       if (addr == 0x0 && status == 0x0)
+                               break;
                        dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
                        dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
                                addr);
                        dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                                status);
                        cik_vm_decode_fault(rdev, status, addr, mc_client);
-                       /* reset addr and status */
-                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
                        break;
                case 167: /* VCE */
                        DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data);
index ae88660..0c6e1b5 100644 (file)
 #define                EOP_TC_WB_ACTION_EN                     (1 << 15) /* L2 */
 #define                EOP_TCL1_ACTION_EN                      (1 << 16)
 #define                EOP_TC_ACTION_EN                        (1 << 17) /* L2 */
+#define                EOP_TCL2_VOLATILE                       (1 << 24)
 #define                EOP_CACHE_POLICY(x)                     ((x) << 25)
                 /* 0 - LRU
                 * 1 - Stream
                 * 2 - Bypass
                 */
-#define                EOP_TCL2_VOLATILE                       (1 << 27)
 #define                DATA_SEL(x)                             ((x) << 29)
                 /* 0 - discard
                 * 1 - send low 32bit data
index 5a9a5f4..47d31e9 100644 (file)
@@ -1551,7 +1551,7 @@ int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
 
                table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
                table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
-                       cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+                       cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
        }
 
        return 0;
index e2f6052..15e4f28 100644 (file)
@@ -189,7 +189,7 @@ static const u32 evergreen_golden_registers[] =
        0x8c1c, 0xffffffff, 0x00001010,
        0x28350, 0xffffffff, 0x00000000,
        0xa008, 0xffffffff, 0x00010000,
-       0x5cc, 0xffffffff, 0x00000001,
+       0x5c4, 0xffffffff, 0x00000001,
        0x9508, 0xffffffff, 0x00000002,
        0x913c, 0x0000000f, 0x0000000a
 };
@@ -476,7 +476,7 @@ static const u32 cedar_golden_registers[] =
        0x8c1c, 0xffffffff, 0x00001010,
        0x28350, 0xffffffff, 0x00000000,
        0xa008, 0xffffffff, 0x00010000,
-       0x5cc, 0xffffffff, 0x00000001,
+       0x5c4, 0xffffffff, 0x00000001,
        0x9508, 0xffffffff, 0x00000002
 };
 
@@ -635,7 +635,7 @@ static const u32 juniper_mgcg_init[] =
 static const u32 supersumo_golden_registers[] =
 {
        0x5eb4, 0xffffffff, 0x00000002,
-       0x5cc, 0xffffffff, 0x00000001,
+       0x5c4, 0xffffffff, 0x00000001,
        0x7030, 0xffffffff, 0x00000011,
        0x7c30, 0xffffffff, 0x00000011,
        0x6104, 0x01000300, 0x00000000,
@@ -719,7 +719,7 @@ static const u32 sumo_golden_registers[] =
 static const u32 wrestler_golden_registers[] =
 {
        0x5eb4, 0xffffffff, 0x00000002,
-       0x5cc, 0xffffffff, 0x00000001,
+       0x5c4, 0xffffffff, 0x00000001,
        0x7030, 0xffffffff, 0x00000011,
        0x7c30, 0xffffffff, 0x00000011,
        0x6104, 0x01000300, 0x00000000,
@@ -2642,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
        for (i = 0; i < rdev->num_crtc; i++) {
                if (save->crtc_enabled[i]) {
                        tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
-                       if ((tmp & 0x3) != 0) {
-                               tmp &= ~0x3;
+                       if ((tmp & 0x7) != 3) {
+                               tmp &= ~0x7;
+                               tmp |= 0x3;
                                WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
                        }
                        tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -4755,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
@@ -5066,14 +5068,16 @@ restart_ih:
                case 147:
                        addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
                        status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+                       /* reset addr and status */
+                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+                       if (addr == 0x0 && status == 0x0)
+                               break;
                        dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
                        dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
                                addr);
                        dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                                status);
                        cayman_vm_decode_fault(rdev, status, addr);
-                       /* reset addr and status */
-                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
                        break;
                case 176: /* CP_INT in ring buffer */
                case 177: /* CP_INT in IB1 */
index a0f63ff..23bff59 100644 (file)
 #       define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED      1
 #       define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1      2
 #       define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1      4
+#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL         0x6808
+#       define EVERGREEN_LUT_10BIT_BYPASS_EN            (1 << 8)
 #define EVERGREEN_GRPH_SWAP_CONTROL                     0x680c
 #       define EVERGREEN_GRPH_ENDIAN_SWAP(x)            (((x) & 0x3) << 0)
 #       define EVERGREEN_GRPH_ENDIAN_NONE               0
 #       define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
 #define EVERGREEN_CRTC_STATUS_POSITION                  0x6e90
 #define EVERGREEN_CRTC_STATUS_HV_COUNT                  0x6ea0
-#define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
 #define EVERGREEN_CRTC_UPDATE_LOCK                      0x6ed4
 #define EVERGREEN_MASTER_UPDATE_LOCK                    0x6ef4
 #define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
index 3f6e817..9ef8c38 100644 (file)
@@ -2726,7 +2726,7 @@ int kv_dpm_init(struct radeon_device *rdev)
        pi->caps_sclk_ds = true;
        pi->enable_auto_thermal_throttling = true;
        pi->disable_nb_ps3_in_battery = false;
-       pi->bapm_enable = false;
+       pi->bapm_enable = true;
        pi->voltage_drop_t = 0;
        pi->caps_sclk_throttle_low_notification = false;
        pi->caps_fps = false; /* true? */
index 004c931..01fc488 100644 (file)
@@ -1315,7 +1315,7 @@ static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
 
                table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
                table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
-                       cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+                       cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
        }
 }
 
index 1dd0d32..136b7bc 100644 (file)
  * block and vice versa.  This applies to GRPH, CUR, etc.
  */
 #define AVIVO_D1GRPH_LUT_SEL                                    0x6108
+#       define AVIVO_LUT_10BIT_BYPASS_EN                        (1 << 8)
 #define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS                    0x6110
 #define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x6914
 #define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x6114
index c66952d..3c69f58 100644 (file)
@@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
index 4b0bbf8..60c47f8 100644 (file)
@@ -102,6 +102,7 @@ extern int radeon_runtime_pm;
 extern int radeon_hard_reset;
 extern int radeon_vm_size;
 extern int radeon_vm_block_size;
+extern int radeon_deep_color;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -448,6 +449,7 @@ struct radeon_bo_va {
 
        /* protected by vm mutex */
        struct list_head                vm_list;
+       struct list_head                vm_status;
 
        /* constant after initialization */
        struct radeon_vm                *vm;
@@ -683,10 +685,9 @@ struct radeon_flip_work {
        struct work_struct              unpin_work;
        struct radeon_device            *rdev;
        int                             crtc_id;
-       struct drm_framebuffer          *fb;
+       uint64_t                        base;
        struct drm_pending_vblank_event *event;
        struct radeon_bo                *old_rbo;
-       struct radeon_bo                *new_rbo;
        struct radeon_fence             *fence;
 };
 
@@ -749,10 +750,6 @@ union radeon_irq_stat_regs {
        struct cik_irq_stat_regs cik;
 };
 
-#define RADEON_MAX_HPD_PINS 7
-#define RADEON_MAX_CRTCS 6
-#define RADEON_MAX_AFMT_BLOCKS 7
-
 struct radeon_irq {
        bool                            installed;
        spinlock_t                      lock;
@@ -871,6 +868,9 @@ struct radeon_vm {
        struct list_head                va;
        unsigned                        id;
 
+       /* BOs freed, but not yet updated in the PT */
+       struct list_head                freed;
+
        /* contains the page directory */
        struct radeon_bo                *page_directory;
        uint64_t                        pd_gpu_addr;
@@ -879,6 +879,8 @@ struct radeon_vm {
        /* array of page tables, one for each page directory entry */
        struct radeon_vm_pt             *page_tables;
 
+       struct radeon_bo_va             *ib_bo_va;
+
        struct mutex                    mutex;
        /* last fence for cs using this vm */
        struct radeon_fence             *fence;
@@ -2836,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
 int radeon_vm_update_page_directory(struct radeon_device *rdev,
                                    struct radeon_vm *vm);
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+                         struct radeon_vm *vm);
 int radeon_vm_bo_update(struct radeon_device *rdev,
-                       struct radeon_vm *vm,
-                       struct radeon_bo *bo,
+                       struct radeon_bo_va *bo_va,
                        struct ttm_mem_reg *mem);
 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
                             struct radeon_bo *bo);
@@ -2851,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                          struct radeon_bo_va *bo_va,
                          uint64_t offset,
                          uint32_t flags);
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
-                    struct radeon_bo_va *bo_va);
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+                     struct radeon_bo_va *bo_va);
 
 /* audio */
 void r600_audio_update_hdmi(struct work_struct *work);
index 3084481..173f378 100644 (file)
@@ -1227,11 +1227,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
                        rdev->clock.default_dispclk =
                                le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
                        if (rdev->clock.default_dispclk == 0) {
-                               if (ASIC_IS_DCE5(rdev))
+                               if (ASIC_IS_DCE6(rdev))
+                                       rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+                               else if (ASIC_IS_DCE5(rdev))
                                        rdev->clock.default_dispclk = 54000; /* 540 Mhz */
                                else
                                        rdev->clock.default_dispclk = 60000; /* 600 Mhz */
                        }
+                       /* set a reasonable default for DP */
+                       if (ASIC_IS_DCE6(rdev) && (rdev->clock.default_dispclk < 53900)) {
+                               DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
+                                        rdev->clock.default_dispclk / 100);
+                               rdev->clock.default_dispclk = 60000;
+                       }
                        rdev->clock.dp_extclk =
                                le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
                        rdev->clock.current_dispclk = rdev->clock.default_dispclk;
index 933c5c3..4483119 100644 (file)
@@ -199,6 +199,9 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
                }
        }
 
+       if ((radeon_deep_color == 0) && (bpc > 8))
+               bpc = 8;
+
        DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
                          connector->name, connector->display_info.bpc, bpc);
 
@@ -1288,17 +1291,15 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
                    (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
                    (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
                        return MODE_OK;
-               else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
-                       if (ASIC_IS_DCE6(rdev)) {
-                               /* HDMI 1.3+ supports max clock of 340 Mhz */
-                               if (mode->clock > 340000)
-                                       return MODE_CLOCK_HIGH;
-                               else
-                                       return MODE_OK;
-                       } else
+               else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+                       /* HDMI 1.3+ supports max clock of 340 Mhz */
+                       if (mode->clock > 340000)
                                return MODE_CLOCK_HIGH;
-               } else
+                       else
+                               return MODE_OK;
+               } else {
                        return MODE_CLOCK_HIGH;
+               }
        }
 
        /* check against the max pixel clock */
@@ -1549,6 +1550,8 @@ out:
 static int radeon_dp_mode_valid(struct drm_connector *connector,
                                  struct drm_display_mode *mode)
 {
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
 
@@ -1579,14 +1582,23 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
                                        return MODE_PANEL;
                        }
                }
-               return MODE_OK;
        } else {
                if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
-                   (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+                   (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
                        return radeon_dp_mode_valid_helper(connector, mode);
-               else
-                       return MODE_OK;
+               } else {
+                       if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+                               /* HDMI 1.3+ supports max clock of 340 Mhz */
+                               if (mode->clock > 340000)
+                                       return MODE_CLOCK_HIGH;
+                       } else {
+                               if (mode->clock > 165000)
+                                       return MODE_CLOCK_HIGH;
+                       }
+               }
        }
+
+       return MODE_OK;
 }
 
 static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
index 71a1434..ae763f6 100644 (file)
@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
                                   struct radeon_vm *vm)
 {
        struct radeon_device *rdev = p->rdev;
+       struct radeon_bo_va *bo_va;
        int i, r;
 
        r = radeon_vm_update_page_directory(rdev, vm);
        if (r)
                return r;
 
-       r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
+       r = radeon_vm_clear_freed(rdev, vm);
+       if (r)
+               return r;
+
+       if (vm->ib_bo_va == NULL) {
+               DRM_ERROR("Tmp BO not in VM!\n");
+               return -EINVAL;
+       }
+
+       r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
                                &rdev->ring_tmp_bo.bo->tbo.mem);
        if (r)
                return r;
@@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
                        continue;
 
                bo = p->relocs[i].robj;
-               r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
+               bo_va = radeon_vm_bo_find(vm, bo);
+               if (bo_va == NULL) {
+                       dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+                       return -EINVAL;
+               }
+
+               r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
                if (r)
                        return r;
        }
index 03686fa..697add2 100644 (file)
@@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
        if (!radeon_check_pot_argument(radeon_vm_size)) {
                dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
-       if (radeon_vm_size < 4) {
-               dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
+       if (radeon_vm_size < 1) {
+               dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
        /*
         * Max GPUVM size for Cayman, SI and CI are 40 bits.
         */
-       if (radeon_vm_size > 1024*1024) {
-               dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
+       if (radeon_vm_size > 1024) {
+               dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
        /* defines number of bits in page table versus page directory,
         * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
         * page table and the remaining bits are in the page directory */
        if (radeon_vm_block_size < 9) {
-               dev_warn(rdev->dev, "VM page table size (%d) to small\n",
+               dev_warn(rdev->dev, "VM page table size (%d) too small\n",
                         radeon_vm_block_size);
                radeon_vm_block_size = 9;
        }
 
        if (radeon_vm_block_size > 24 ||
-           radeon_vm_size < (1ull << radeon_vm_block_size)) {
-               dev_warn(rdev->dev, "VM page table size (%d) to large\n",
+           (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
+               dev_warn(rdev->dev, "VM page table size (%d) too large\n",
                         radeon_vm_block_size);
                radeon_vm_block_size = 9;
        }
@@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
        /* Adjust VM size here.
         * Max GPUVM size for cayman+ is 40 bits.
         */
-       rdev->vm_manager.max_pfn = radeon_vm_size << 8;
+       rdev->vm_manager.max_pfn = radeon_vm_size << 18;
 
        /* Set asic functions */
        r = radeon_asic_init(rdev);
index 5ed6170..bf25061 100644 (file)
@@ -66,7 +66,8 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
                             (radeon_crtc->lut_b[i] << 0));
        }
 
-       WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
+       /* Only change bit 0 of LUT_SEL, other bits are set elsewhere */
+       WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id, ~1);
 }
 
 static void dce4_crtc_load_lut(struct drm_crtc *crtc)
@@ -284,7 +285,6 @@ static void radeon_unpin_work_func(struct work_struct *__work)
 void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
-       struct radeon_flip_work *work;
        unsigned long flags;
        u32 update_pending;
        int vpos, hpos;
@@ -294,8 +294,11 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
                return;
 
        spin_lock_irqsave(&rdev->ddev->event_lock, flags);
-       work = radeon_crtc->flip_work;
-       if (work == NULL) {
+       if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
+               DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
+                                "RADEON_FLIP_SUBMITTED(%d)\n",
+                                radeon_crtc->flip_status,
+                                RADEON_FLIP_SUBMITTED);
                spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
                return;
        }
@@ -343,12 +346,17 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
 
        spin_lock_irqsave(&rdev->ddev->event_lock, flags);
        work = radeon_crtc->flip_work;
-       if (work == NULL) {
+       if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
+               DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
+                                "RADEON_FLIP_SUBMITTED(%d)\n",
+                                radeon_crtc->flip_status,
+                                RADEON_FLIP_SUBMITTED);
                spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
                return;
        }
 
        /* Pageflip completed. Clean up. */
+       radeon_crtc->flip_status = RADEON_FLIP_NONE;
        radeon_crtc->flip_work = NULL;
 
        /* wakeup userspace */
@@ -357,8 +365,8 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
 
        spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
 
-       radeon_fence_unref(&work->fence);
-       radeon_irq_kms_pflip_irq_get(rdev, work->crtc_id);
+       drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
+       radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
        queue_work(radeon_crtc->flip_queue, &work->unpin_work);
 }
 
@@ -377,51 +385,108 @@ static void radeon_flip_work_func(struct work_struct *__work)
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
 
        struct drm_crtc *crtc = &radeon_crtc->base;
-       struct drm_framebuffer *fb = work->fb;
-
-       uint32_t tiling_flags, pitch_pixels;
-       uint64_t base;
-
        unsigned long flags;
        int r;
 
         down_read(&rdev->exclusive_lock);
-       while (work->fence) {
+       if (work->fence) {
                r = radeon_fence_wait(work->fence, false);
                if (r == -EDEADLK) {
                        up_read(&rdev->exclusive_lock);
                        r = radeon_gpu_reset(rdev);
                        down_read(&rdev->exclusive_lock);
                }
+               if (r)
+                       DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
 
-               if (r) {
-                       DRM_ERROR("failed to wait on page flip fence (%d)!\n",
-                                 r);
-                       goto cleanup;
-               } else
-                       radeon_fence_unref(&work->fence);
+               /* We continue with the page flip even if we failed to wait on
+                * the fence, otherwise the DRM core and userspace will be
+                * confused about which BO the CRTC is scanning out
+                */
+
+               radeon_fence_unref(&work->fence);
        }
 
+       /* We borrow the event spin lock for protecting flip_status */
+       spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+       /* set the proper interrupt */
+       radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
+
+       /* do the flip (mmio) */
+       radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
+
+       radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
+       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+       up_read(&rdev->exclusive_lock);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_pending_vblank_event *event,
+                                uint32_t page_flip_flags)
+{
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct radeon_framebuffer *old_radeon_fb;
+       struct radeon_framebuffer *new_radeon_fb;
+       struct drm_gem_object *obj;
+       struct radeon_flip_work *work;
+       struct radeon_bo *new_rbo;
+       uint32_t tiling_flags, pitch_pixels;
+       uint64_t base;
+       unsigned long flags;
+       int r;
+
+       work = kzalloc(sizeof *work, GFP_KERNEL);
+       if (work == NULL)
+               return -ENOMEM;
+
+       INIT_WORK(&work->flip_work, radeon_flip_work_func);
+       INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
+
+       work->rdev = rdev;
+       work->crtc_id = radeon_crtc->crtc_id;
+       work->event = event;
+
+       /* schedule unpin of the old buffer */
+       old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+       obj = old_radeon_fb->obj;
+
+       /* take a reference to the old object */
+       drm_gem_object_reference(obj);
+       work->old_rbo = gem_to_radeon_bo(obj);
+
+       new_radeon_fb = to_radeon_framebuffer(fb);
+       obj = new_radeon_fb->obj;
+       new_rbo = gem_to_radeon_bo(obj);
+
+       spin_lock(&new_rbo->tbo.bdev->fence_lock);
+       if (new_rbo->tbo.sync_obj)
+               work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
+       spin_unlock(&new_rbo->tbo.bdev->fence_lock);
+
        /* pin the new buffer */
-       DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
-                        work->old_rbo, work->new_rbo);
+       DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
+                        work->old_rbo, new_rbo);
 
-       r = radeon_bo_reserve(work->new_rbo, false);
+       r = radeon_bo_reserve(new_rbo, false);
        if (unlikely(r != 0)) {
                DRM_ERROR("failed to reserve new rbo buffer before flip\n");
                goto cleanup;
        }
        /* Only 27 bit offset for legacy CRTC */
-       r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM,
+       r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
                                     ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
        if (unlikely(r != 0)) {
-               radeon_bo_unreserve(work->new_rbo);
+               radeon_bo_unreserve(new_rbo);
                r = -EINVAL;
                DRM_ERROR("failed to pin new rbo buffer before flip\n");
                goto cleanup;
        }
-       radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL);
-       radeon_bo_unreserve(work->new_rbo);
+       radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
+       radeon_bo_unreserve(new_rbo);
 
        if (!ASIC_IS_AVIVO(rdev)) {
                /* crtc offset is from display base addr not FB location */
@@ -458,82 +523,24 @@ static void radeon_flip_work_func(struct work_struct *__work)
                }
                base &= ~7;
        }
+       work->base = base;
 
-       /* We borrow the event spin lock for protecting flip_work */
-       spin_lock_irqsave(&crtc->dev->event_lock, flags);
-
-       /* set the proper interrupt */
-       radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
-
-       /* do the flip (mmio) */
-       radeon_page_flip(rdev, radeon_crtc->crtc_id, base);
-
-       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-       up_read(&rdev->exclusive_lock);
-
-       return;
-
-cleanup:
-       drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
-       radeon_fence_unref(&work->fence);
-       kfree(work);
-       up_read(&rdev->exclusive_lock);
-}
-
-static int radeon_crtc_page_flip(struct drm_crtc *crtc,
-                                struct drm_framebuffer *fb,
-                                struct drm_pending_vblank_event *event,
-                                uint32_t page_flip_flags)
-{
-       struct drm_device *dev = crtc->dev;
-       struct radeon_device *rdev = dev->dev_private;
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-       struct radeon_framebuffer *old_radeon_fb;
-       struct radeon_framebuffer *new_radeon_fb;
-       struct drm_gem_object *obj;
-       struct radeon_flip_work *work;
-       unsigned long flags;
-
-       work = kzalloc(sizeof *work, GFP_KERNEL);
-       if (work == NULL)
-               return -ENOMEM;
-
-       INIT_WORK(&work->flip_work, radeon_flip_work_func);
-       INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
-
-       work->rdev = rdev;
-       work->crtc_id = radeon_crtc->crtc_id;
-       work->fb = fb;
-       work->event = event;
-
-       /* schedule unpin of the old buffer */
-       old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
-       obj = old_radeon_fb->obj;
-
-       /* take a reference to the old object */
-       drm_gem_object_reference(obj);
-       work->old_rbo = gem_to_radeon_bo(obj);
-
-       new_radeon_fb = to_radeon_framebuffer(fb);
-       obj = new_radeon_fb->obj;
-       work->new_rbo = gem_to_radeon_bo(obj);
-
-       spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
-       if (work->new_rbo->tbo.sync_obj)
-               work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
-       spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
+       r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
+       if (r) {
+               DRM_ERROR("failed to get vblank before flip\n");
+               goto pflip_cleanup;
+       }
 
        /* We borrow the event spin lock for protecting flip_work */
        spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
-       if (radeon_crtc->flip_work) {
+       if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
                DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
                spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-               drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
-               radeon_fence_unref(&work->fence);
-               kfree(work);
-               return -EBUSY;
+               r = -EBUSY;
+               goto vblank_cleanup;
        }
+       radeon_crtc->flip_status = RADEON_FLIP_PENDING;
        radeon_crtc->flip_work = work;
 
        /* update crtc fb */
@@ -542,8 +549,27 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
        spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 
        queue_work(radeon_crtc->flip_queue, &work->flip_work);
-
        return 0;
+
+vblank_cleanup:
+       drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
+
+pflip_cleanup:
+       if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
+               DRM_ERROR("failed to reserve new rbo in error path\n");
+               goto cleanup;
+       }
+       if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
+               DRM_ERROR("failed to unpin new rbo in error path\n");
+       }
+       radeon_bo_unreserve(new_rbo);
+
+cleanup:
+       drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+       radeon_fence_unref(&work->fence);
+       kfree(work);
+
+       return r;
 }
 
 static int
@@ -803,6 +829,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
        struct radeon_device *rdev = dev->dev_private;
        int ret = 0;
 
+       /* don't leak the edid if we already fetched it in detect() */
+       if (radeon_connector->edid)
+               goto got_edid;
+
        /* on hw with routers, select right port */
        if (radeon_connector->router.ddc_valid)
                radeon_router_select_ddc_port(radeon_connector);
@@ -841,6 +871,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
                        radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
        }
        if (radeon_connector->edid) {
+got_edid:
                drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
                ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
                drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
index 6e30174..e9e3610 100644 (file)
@@ -173,8 +173,9 @@ int radeon_dpm = -1;
 int radeon_aspm = -1;
 int radeon_runtime_pm = -1;
 int radeon_hard_reset = 0;
-int radeon_vm_size = 4096;
+int radeon_vm_size = 4;
 int radeon_vm_block_size = 9;
+int radeon_deep_color = 0;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -242,12 +243,15 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
 MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
 module_param_named(hard_reset, radeon_hard_reset, int, 0444);
 
-MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
+MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
 module_param_named(vm_size, radeon_vm_size, int, 0444);
 
 MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
 module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
 
+MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
+module_param_named(deep_color, radeon_deep_color, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
        radeon_PCI_IDS
 };
index 35d9318..d25ae6a 100644 (file)
@@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
        /* new gpu have virtual address space support */
        if (rdev->family >= CHIP_CAYMAN) {
                struct radeon_fpriv *fpriv;
-               struct radeon_bo_va *bo_va;
+               struct radeon_vm *vm;
                int r;
 
                fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        return -ENOMEM;
                }
 
-               r = radeon_vm_init(rdev, &fpriv->vm);
+               vm = &fpriv->vm;
+               r = radeon_vm_init(rdev, vm);
                if (r) {
                        kfree(fpriv);
                        return r;
@@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                if (rdev->accel_working) {
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (r) {
-                               radeon_vm_fini(rdev, &fpriv->vm);
+                               radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
                                return r;
                        }
 
                        /* map the ib pool buffer read only into
                         * virtual address space */
-                       bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
-                                                rdev->ring_tmp_bo.bo);
-                       r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+                       vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
+                                                       rdev->ring_tmp_bo.bo);
+                       r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
+                                                 RADEON_VA_IB_OFFSET,
                                                  RADEON_VM_PAGE_READABLE |
                                                  RADEON_VM_PAGE_SNOOPED);
 
                        radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                        if (r) {
-                               radeon_vm_fini(rdev, &fpriv->vm);
+                               radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
                                return r;
                        }
@@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
        /* new gpu have virtual address space support */
        if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
                struct radeon_fpriv *fpriv = file_priv->driver_priv;
-               struct radeon_bo_va *bo_va;
+               struct radeon_vm *vm = &fpriv->vm;
                int r;
 
                if (rdev->accel_working) {
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (!r) {
-                               bo_va = radeon_vm_bo_find(&fpriv->vm,
-                                                         rdev->ring_tmp_bo.bo);
-                               if (bo_va)
-                                       radeon_vm_bo_rmv(rdev, bo_va);
+                               if (vm->ib_bo_va)
+                                       radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
                                radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                        }
                }
 
-               radeon_vm_fini(rdev, &fpriv->vm);
+               radeon_vm_fini(rdev, vm);
                kfree(fpriv);
                file_priv->driver_priv = NULL;
        }
index ad0e4b8..0592ddb 100644 (file)
@@ -46,6 +46,10 @@ struct radeon_device;
 #define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
 #define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
 
+#define RADEON_MAX_HPD_PINS 7
+#define RADEON_MAX_CRTCS 6
+#define RADEON_MAX_AFMT_BLOCKS 7
+
 enum radeon_rmx_type {
        RMX_OFF,
        RMX_FULL,
@@ -233,8 +237,8 @@ struct radeon_mode_info {
        struct card_info *atom_card_info;
        enum radeon_connector_table connector_table;
        bool mode_config_initialized;
-       struct radeon_crtc *crtcs[6];
-       struct radeon_afmt *afmt[7];
+       struct radeon_crtc *crtcs[RADEON_MAX_CRTCS];
+       struct radeon_afmt *afmt[RADEON_MAX_AFMT_BLOCKS];
        /* DVI-I properties */
        struct drm_property *coherent_mode_property;
        /* DAC enable load detect */
@@ -302,6 +306,12 @@ struct radeon_atom_ss {
        uint16_t amount;
 };
 
+enum radeon_flip_status {
+       RADEON_FLIP_NONE,
+       RADEON_FLIP_PENDING,
+       RADEON_FLIP_SUBMITTED
+};
+
 struct radeon_crtc {
        struct drm_crtc base;
        int crtc_id;
@@ -327,6 +337,7 @@ struct radeon_crtc {
        /* page flipping */
        struct workqueue_struct *flip_queue;
        struct radeon_flip_work *flip_work;
+       enum radeon_flip_status flip_status;
        /* pll sharing */
        struct radeon_atom_ss ss;
        bool ss_enabled;
index 12c663e..e447e39 100644 (file)
@@ -73,8 +73,10 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
                        rdev->pm.dpm.ac_power = true;
                else
                        rdev->pm.dpm.ac_power = false;
-               if (rdev->asic->dpm.enable_bapm)
-                       radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+               if (rdev->family == CHIP_ARUBA) {
+                       if (rdev->asic->dpm.enable_bapm)
+                               radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+               }
                mutex_unlock(&rdev->pm.mutex);
         } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
                if (rdev->pm.profile == PM_PROFILE_AUTO) {
index 899d912..725d366 100644 (file)
@@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
        bo_va->ref_count = 1;
        INIT_LIST_HEAD(&bo_va->bo_list);
        INIT_LIST_HEAD(&bo_va->vm_list);
+       INIT_LIST_HEAD(&bo_va->vm_status);
 
        mutex_lock(&vm->mutex);
        list_add(&bo_va->vm_list, &vm->va);
@@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                head = &tmp->vm_list;
        }
 
+       if (bo_va->soffset) {
+               /* add a clone of the bo_va to clear the old address */
+               tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+               if (!tmp) {
+                       mutex_unlock(&vm->mutex);
+                       return -ENOMEM;
+               }
+               tmp->soffset = bo_va->soffset;
+               tmp->eoffset = bo_va->eoffset;
+               tmp->vm = vm;
+               list_add(&tmp->vm_status, &vm->freed);
+       }
+
        bo_va->soffset = soffset;
        bo_va->eoffset = eoffset;
        bo_va->flags = flags;
@@ -495,7 +509,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                mutex_unlock(&vm->mutex);
 
                r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
-                                    RADEON_GPU_PAGE_SIZE, false, 
+                                    RADEON_GPU_PAGE_SIZE, true,
                                     RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
                if (r)
                        return r;
@@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
  * Object have to be reserved and mutex must be locked!
  */
 int radeon_vm_bo_update(struct radeon_device *rdev,
-                       struct radeon_vm *vm,
-                       struct radeon_bo *bo,
+                       struct radeon_bo_va *bo_va,
                        struct ttm_mem_reg *mem)
 {
+       struct radeon_vm *vm = bo_va->vm;
        struct radeon_ib ib;
-       struct radeon_bo_va *bo_va;
        unsigned nptes, ndw;
        uint64_t addr;
        int r;
 
-       bo_va = radeon_vm_bo_find(vm, bo);
-       if (bo_va == NULL) {
-               dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
-               return -EINVAL;
-       }
 
        if (!bo_va->soffset) {
                dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
-                       bo, vm);
+                       bo_va->bo, vm);
                return -EINVAL;
        }
 
@@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
 
        trace_radeon_vm_bo_update(bo_va);
 
-       nptes = radeon_bo_ngpu_pages(bo);
+       nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
 
        /* padding, etc. */
        ndw = 64;
@@ -910,6 +918,34 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
        return 0;
 }
 
+/**
+ * radeon_vm_clear_freed - clear freed BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all freed BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+                         struct radeon_vm *vm)
+{
+       struct radeon_bo_va *bo_va, *tmp;
+       int r;
+
+       list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+               list_del(&bo_va->vm_status);
+               r = radeon_vm_bo_update(rdev, bo_va, NULL);
+               kfree(bo_va);
+               if (r)
+                       return r;
+       }
+       return 0;
+
+}
+
 /**
  * radeon_vm_bo_rmv - remove a bo to a specific vm
  *
@@ -917,27 +953,27 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
  * @bo_va: requested bo_va
  *
  * Remove @bo_va->bo from the requested vm (cayman+).
- * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
- * remove the ptes for @bo_va in the page table.
- * Returns 0 for success.
  *
  * Object have to be reserved!
  */
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
-                    struct radeon_bo_va *bo_va)
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+                     struct radeon_bo_va *bo_va)
 {
-       int r = 0;
+       struct radeon_vm *vm = bo_va->vm;
 
-       mutex_lock(&bo_va->vm->mutex);
-       if (bo_va->soffset)
-               r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
+       list_del(&bo_va->bo_list);
 
+       mutex_lock(&vm->mutex);
        list_del(&bo_va->vm_list);
-       mutex_unlock(&bo_va->vm->mutex);
-       list_del(&bo_va->bo_list);
 
-       kfree(bo_va);
-       return r;
+       if (bo_va->soffset) {
+               bo_va->bo = NULL;
+               list_add(&bo_va->vm_status, &vm->freed);
+       } else {
+               kfree(bo_va);
+       }
+
+       mutex_unlock(&vm->mutex);
 }
 
 /**
@@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
        int r;
 
        vm->id = 0;
+       vm->ib_bo_va = NULL;
        vm->fence = NULL;
        vm->last_flush = NULL;
        vm->last_id_use = NULL;
        mutex_init(&vm->mutex);
        INIT_LIST_HEAD(&vm->va);
+       INIT_LIST_HEAD(&vm->freed);
 
        pd_size = radeon_vm_directory_size(rdev);
        pd_entries = radeon_vm_num_pdes(rdev);
@@ -992,7 +1030,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
                return -ENOMEM;
        }
 
-       r = radeon_bo_create(rdev, pd_size, align, false,
+       r = radeon_bo_create(rdev, pd_size, align, true,
                             RADEON_GEM_DOMAIN_VRAM, NULL,
                             &vm->page_directory);
        if (r)
@@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
                        kfree(bo_va);
                }
        }
-
+       list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
+               kfree(bo_va);
 
        for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
                radeon_bo_unref(&vm->page_tables[i].bo);
index 237dd29..3e21e86 100644 (file)
@@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
        for (i = 0; i < rdev->num_crtc; i++) {
                if (save->crtc_enabled[i]) {
                        tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
-                       if ((tmp & 0x3) != 0) {
-                               tmp &= ~0x3;
+                       if ((tmp & 0x7) != 3) {
+                               tmp &= ~0x7;
+                               tmp |= 0x3;
                                WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
                        }
                        tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
index da041a4..3c76e1d 100644 (file)
@@ -2329,12 +2329,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
        pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
                                                       ASIC_INTERNAL_MEMORY_SS, 0);
 
-       /* disable ss, causes hangs on some cayman boards */
-       if (rdev->family == CHIP_CAYMAN) {
-               pi->sclk_ss = false;
-               pi->mclk_ss = false;
-       }
-
        if (pi->sclk_ss || pi->mclk_ss)
                pi->dynamic_ss = true;
        else
index 730cee2..9e854fd 100644 (file)
@@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
@@ -6376,14 +6377,16 @@ restart_ih:
                case 147:
                        addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
                        status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+                       /* reset addr and status */
+                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+                       if (addr == 0x0 && status == 0x0)
+                               break;
                        dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
                        dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
                                addr);
                        dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                                status);
                        si_vm_decode_fault(rdev, status, addr);
-                       /* reset addr and status */
-                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
                        break;
                case 176: /* RINGID0 CP_INT */
                        radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
index 2a2822c..32e50be 100644 (file)
@@ -1874,7 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
        for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
                pi->at[i] = TRINITY_AT_DFLT;
 
-       pi->enable_bapm = false;
+       /* There are stability issues reported on with
+        * bapm enabled when switching between AC and battery
+        * power.  At the same time, some MSI boards hang
+        * if it's not enabled and dpm is enabled.  Just enable
+        * it for MSI boards right now.
+        */
+       if (rdev->pdev->subsystem_vendor == 0x1462)
+               pi->enable_bapm = true;
+       else
+               pi->enable_bapm = false;
        pi->enable_nbps_policy = true;
        pi->enable_sclk_ds = true;
        pi->enable_gfx_power_gating = true;
index a89ad93..b031b48 100644 (file)
@@ -179,7 +179,6 @@ static int vmw_fb_set_par(struct fb_info *info)
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
-               vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
        }
 
index 800c8b6..5e79c6a 100644 (file)
@@ -810,7 +810,7 @@ config HID_ZYDACRON
 
 config HID_SENSOR_HUB
        tristate "HID Sensors framework support"
-       depends on HID
+       depends on HID && HAS_IOMEM
        select MFD_CORE
        default n
        ---help---
index 6d00bb9..48b66bb 100644 (file)
 
 #define USB_VENDOR_ID_ETURBOTOUCH      0x22b9
 #define USB_DEVICE_ID_ETURBOTOUCH      0x0006
+#define USB_DEVICE_ID_ETURBOTOUCH_2968 0x2968
 
 #define USB_VENDOR_ID_EZKEY            0x0518
 #define USB_DEVICE_ID_BTC_8193         0x0002
 
 #define USB_VENDOR_ID_PENMOUNT         0x14e1
 #define USB_DEVICE_ID_PENMOUNT_PCI     0x3500
+#define USB_DEVICE_ID_PENMOUNT_1610    0x1610
+#define USB_DEVICE_ID_PENMOUNT_1640    0x1640
 
 #define USB_VENDOR_ID_PETALYNX         0x18b1
 #define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE   0x0037
index 2451c7e..578bbe6 100644 (file)
@@ -428,6 +428,7 @@ static int rmi_raw_event(struct hid_device *hdev,
        return 0;
 }
 
+#ifdef CONFIG_PM
 static int rmi_post_reset(struct hid_device *hdev)
 {
        return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
@@ -437,6 +438,7 @@ static int rmi_post_resume(struct hid_device *hdev)
 {
        return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
 }
+#endif /* CONFIG_PM */
 
 #define RMI4_MAX_PAGE 0xff
 #define RMI4_PAGE_SIZE 0x0100
index a8d5c8f..e244e44 100644 (file)
@@ -159,17 +159,18 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
 {
        struct hid_sensor_hub_callbacks_list *callback;
        struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev);
+       unsigned long flags;
 
-       spin_lock(&pdata->dyn_callback_lock);
+       spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
        list_for_each_entry(callback, &pdata->dyn_callback_list, list)
                if (callback->usage_id == usage_id &&
                                                callback->hsdev == hsdev) {
-                       spin_unlock(&pdata->dyn_callback_lock);
+                       spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
                        return -EINVAL;
                }
        callback = kzalloc(sizeof(*callback), GFP_ATOMIC);
        if (!callback) {
-               spin_unlock(&pdata->dyn_callback_lock);
+               spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
                return -ENOMEM;
        }
        callback->hsdev = hsdev;
@@ -177,7 +178,7 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
        callback->usage_id = usage_id;
        callback->priv = NULL;
        list_add_tail(&callback->list, &pdata->dyn_callback_list);
-       spin_unlock(&pdata->dyn_callback_lock);
+       spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
 
        return 0;
 }
@@ -188,8 +189,9 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
 {
        struct hid_sensor_hub_callbacks_list *callback;
        struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev);
+       unsigned long flags;
 
-       spin_lock(&pdata->dyn_callback_lock);
+       spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
        list_for_each_entry(callback, &pdata->dyn_callback_list, list)
                if (callback->usage_id == usage_id &&
                                                callback->hsdev == hsdev) {
@@ -197,7 +199,7 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
                        kfree(callback);
                        break;
                }
-       spin_unlock(&pdata->dyn_callback_lock);
+       spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
 
        return 0;
 }
@@ -378,15 +380,16 @@ static int sensor_hub_suspend(struct hid_device *hdev, pm_message_t message)
 {
        struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
        struct hid_sensor_hub_callbacks_list *callback;
+       unsigned long flags;
 
        hid_dbg(hdev, " sensor_hub_suspend\n");
-       spin_lock(&pdata->dyn_callback_lock);
+       spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
        list_for_each_entry(callback, &pdata->dyn_callback_list, list) {
                if (callback->usage_callback->suspend)
                        callback->usage_callback->suspend(
                                        callback->hsdev, callback->priv);
        }
-       spin_unlock(&pdata->dyn_callback_lock);
+       spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
 
        return 0;
 }
@@ -395,15 +398,16 @@ static int sensor_hub_resume(struct hid_device *hdev)
 {
        struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
        struct hid_sensor_hub_callbacks_list *callback;
+       unsigned long flags;
 
        hid_dbg(hdev, " sensor_hub_resume\n");
-       spin_lock(&pdata->dyn_callback_lock);
+       spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
        list_for_each_entry(callback, &pdata->dyn_callback_list, list) {
                if (callback->usage_callback->resume)
                        callback->usage_callback->resume(
                                        callback->hsdev, callback->priv);
        }
-       spin_unlock(&pdata->dyn_callback_lock);
+       spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
 
        return 0;
 }
@@ -632,6 +636,7 @@ static int sensor_hub_probe(struct hid_device *hdev,
                        if (name == NULL) {
                                hid_err(hdev, "Failed MFD device name\n");
                                        ret = -ENOMEM;
+                                       kfree(hsdev);
                                        goto err_no_mem;
                        }
                        sd->hid_sensor_hub_client_devs[
index 59badc1..31e6727 100644 (file)
@@ -49,6 +49,7 @@ static const struct hid_blacklist {
 
        { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
        { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
@@ -76,6 +77,8 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
index e84f452..ae22e3c 100644 (file)
@@ -339,9 +339,13 @@ static void process_chn_event(u32 relid)
                 */
 
                do {
-                       hv_begin_read(&channel->inbound);
+                       if (read_state)
+                               hv_begin_read(&channel->inbound);
                        channel->onchannel_callback(arg);
-                       bytes_to_read = hv_end_read(&channel->inbound);
+                       if (read_state)
+                               bytes_to_read = hv_end_read(&channel->inbound);
+                       else
+                               bytes_to_read = 0;
                } while (read_state && (bytes_to_read != 0));
        } else {
                pr_err("no channel callback for relid - %u\n", relid);
index eaaa3d8..23b2ce2 100644 (file)
@@ -246,8 +246,8 @@ void hv_fcopy_onchannelcallback(void *context)
                /*
                 * Send the information to the user-level daemon.
                 */
-               fcopy_send_data();
                schedule_delayed_work(&fcopy_work, 5*HZ);
+               fcopy_send_data();
                return;
        }
        icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
index ea85253..521c146 100644 (file)
@@ -127,6 +127,17 @@ kvp_work_func(struct work_struct *dummy)
        kvp_respond_to_host(NULL, HV_E_FAIL);
 }
 
+static void poll_channel(struct vmbus_channel *channel)
+{
+       if (channel->target_cpu != smp_processor_id())
+               smp_call_function_single(channel->target_cpu,
+                                        hv_kvp_onchannelcallback,
+                                        channel, true);
+       else
+               hv_kvp_onchannelcallback(channel);
+}
+
+
 static int kvp_handle_handshake(struct hv_kvp_msg *msg)
 {
        int ret = 1;
@@ -155,7 +166,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
                kvp_register(dm_reg_value);
                kvp_transaction.active = false;
                if (kvp_transaction.kvp_context)
-                       hv_kvp_onchannelcallback(kvp_transaction.kvp_context);
+                       poll_channel(kvp_transaction.kvp_context);
        }
        return ret;
 }
@@ -568,7 +579,7 @@ response_done:
 
        vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
                                VM_PKT_DATA_INBAND, 0);
-
+       poll_channel(channel);
 }
 
 /*
@@ -603,7 +614,7 @@ void hv_kvp_onchannelcallback(void *context)
                return;
        }
 
-       vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+       vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
                         &requestid);
 
        if (recvlen > 0) {
index dd76180..3b9c9ef 100644 (file)
@@ -319,7 +319,7 @@ static int util_probe(struct hv_device *dev,
                (struct hv_util_service *)dev_id->driver_data;
        int ret;
 
-       srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
+       srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
        if (!srv->recv_buffer)
                return -ENOMEM;
        if (srv->util_init) {
index 08531a1..02d3d85 100644 (file)
@@ -1052,7 +1052,7 @@ config SENSORS_PC87427
          will be called pc87427.
 
 config SENSORS_NTC_THERMISTOR
-       tristate "NTC thermistor support"
+       tristate "NTC thermistor support from Murata"
        depends on !OF || IIO=n || IIO
        help
          This driver supports NTC thermistors sensor reading and its
@@ -1060,7 +1060,8 @@ config SENSORS_NTC_THERMISTOR
          send notifications about the temperature.
 
          Currently, this driver supports
-         NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333.
+         NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333
+         from Murata.
 
          This driver can also be built as a module.  If so, the module
          will be called ntc-thermistor.
@@ -1176,6 +1177,7 @@ config SENSORS_DME1737
 config SENSORS_EMC1403
        tristate "SMSC EMC1403/23 thermal sensor"
        depends on I2C
+       select REGMAP_I2C
        help
          If you say yes here you get support for the SMSC EMC1403/23
          temperature monitoring chip.
index 5ffd81f..0625e50 100644 (file)
@@ -239,50 +239,50 @@ static ssize_t adc128_show_alarm(struct device *dev,
        return sprintf(buf, "%u\n", !!(alarms & mask));
 }
 
-static SENSOR_DEVICE_ATTR_2(in0_input, S_IWUSR | S_IRUGO,
-                           adc128_show_in, adc128_set_in, 0, 0);
+static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO,
+                           adc128_show_in, NULL, 0, 0);
 static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 0, 1);
 static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 0, 2);
 
-static SENSOR_DEVICE_ATTR_2(in1_input, S_IWUSR | S_IRUGO,
-                           adc128_show_in, adc128_set_in, 1, 0);
+static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO,
+                           adc128_show_in, NULL, 1, 0);
 static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 1, 1);
 static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 1, 2);
 
-static SENSOR_DEVICE_ATTR_2(in2_input, S_IWUSR | S_IRUGO,
-                           adc128_show_in, adc128_set_in, 2, 0);
+static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO,
+                           adc128_show_in, NULL, 2, 0);
 static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 2, 1);
 static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 2, 2);
 
-static SENSOR_DEVICE_ATTR_2(in3_input, S_IWUSR | S_IRUGO,
-                           adc128_show_in, adc128_set_in, 3, 0);
+static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO,
+                           adc128_show_in, NULL, 3, 0);
 static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 3, 1);
 static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 3, 2);
 
-static SENSOR_DEVICE_ATTR_2(in4_input, S_IWUSR | S_IRUGO,
-                           adc128_show_in, adc128_set_in, 4, 0);
+static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO,
+                           adc128_show_in, NULL, 4, 0);
 static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 4, 1);
 static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 4, 2);
 
-static SENSOR_DEVICE_ATTR_2(in5_input, S_IWUSR | S_IRUGO,
-                           adc128_show_in, adc128_set_in, 5, 0);
+static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO,
+                           adc128_show_in, NULL, 5, 0);
 static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 5, 1);
 static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 5, 2);
 
-static SENSOR_DEVICE_ATTR_2(in6_input, S_IWUSR | S_IRUGO,
-                           adc128_show_in, adc128_set_in, 6, 0);
+static SENSOR_DEVICE_ATTR_2(in6_input, S_IRUGO,
+                           adc128_show_in, NULL, 6, 0);
 static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 6, 1);
 static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO,
index 3eb4281..d74241b 100644 (file)
@@ -185,7 +185,7 @@ static ssize_t set_temp_max(struct device *dev,
        struct adm1021_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
        long temp;
-       int err;
+       int reg_val, err;
 
        err = kstrtol(buf, 10, &temp);
        if (err)
@@ -193,10 +193,11 @@ static ssize_t set_temp_max(struct device *dev,
        temp /= 1000;
 
        mutex_lock(&data->update_lock);
-       data->temp_max[index] = clamp_val(temp, -128, 127);
+       reg_val = clamp_val(temp, -128, 127);
+       data->temp_max[index] = reg_val * 1000;
        if (!read_only)
                i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index),
-                                         data->temp_max[index]);
+                                         reg_val);
        mutex_unlock(&data->update_lock);
 
        return count;
@@ -210,7 +211,7 @@ static ssize_t set_temp_min(struct device *dev,
        struct adm1021_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
        long temp;
-       int err;
+       int reg_val, err;
 
        err = kstrtol(buf, 10, &temp);
        if (err)
@@ -218,10 +219,11 @@ static ssize_t set_temp_min(struct device *dev,
        temp /= 1000;
 
        mutex_lock(&data->update_lock);
-       data->temp_min[index] = clamp_val(temp, -128, 127);
+       reg_val = clamp_val(temp, -128, 127);
+       data->temp_min[index] = reg_val * 1000;
        if (!read_only)
                i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index),
-                                         data->temp_min[index]);
+                                         reg_val);
        mutex_unlock(&data->update_lock);
 
        return count;
index 78339e8..2804571 100644 (file)
@@ -232,6 +232,9 @@ static ssize_t set_fan_div(struct device *dev,
        /* Update the value */
        reg = (reg & 0x3F) | (val << 6);
 
+       /* Update the cache */
+       data->fan_div[attr->index] = reg;
+
        /* Write value */
        i2c_smbus_write_byte_data(client,
                                  ADM1029_REG_FAN_DIV[attr->index], reg);
index a8a540c..51c1a5a 100644 (file)
@@ -365,6 +365,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
+       val = clamp_val(val, 0, 127000);
        mutex_lock(&data->update_lock);
        data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]);
        adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr),
@@ -394,6 +395,7 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
+       val = clamp_val(val, 0, 127000);
        mutex_lock(&data->update_lock);
        data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr],
                                                  data->pwm[nr]);
@@ -696,7 +698,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
-       val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
+       val = clamp_val(val, -55000, 127000);
        mutex_lock(&data->update_lock);
        data->temp_min[nr] = TEMP_TO_REG(val);
        adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr),
@@ -717,7 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
-       val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
+       val = clamp_val(val, -55000, 127000);
        mutex_lock(&data->update_lock);
        data->temp_max[nr] = TEMP_TO_REG(val);
        adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr),
@@ -738,7 +740,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
-       val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
+       val = clamp_val(val, -55000, 127000);
        mutex_lock(&data->update_lock);
        data->temp_crit[nr] = TEMP_TO_REG(val);
        adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr),
index 0f4dea5..9ee3913 100644 (file)
@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = clamp_val(temp, 0, 255);
+       temp = clamp_val(temp, -128, 127);
 
        mutex_lock(&data->lock);
        data->temp_min[attr->index] = temp;
@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = clamp_val(temp, 0, 255);
+       temp = clamp_val(temp, -128, 127);
 
        mutex_lock(&data->lock);
        data->temp_max[attr->index] = temp;
@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = clamp_val(temp, 0, 255);
+       temp = clamp_val(temp, -128, 127);
 
        mutex_lock(&data->lock);
        data->pwm_tmin[attr->index] = temp;
index eea8172..9f2be3d 100644 (file)
@@ -704,7 +704,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
        get_temp_alarm, NULL, IDX_TEMP1_MAX);
 static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
        get_temp_alarm, NULL, IDX_TEMP1_CRIT);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
        get_temp, NULL, IDX_TEMP2_INPUT);
 static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp,
        set_temp, IDX_TEMP2_MIN);
index afd3104..d14ab3c 100644 (file)
@@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
                                      struct device_attribute *devattr,
                                      char *buf)
 {
-       return sprintf(buf, "da9052-hwmon\n");
+       return sprintf(buf, "da9052\n");
 }
 
 static ssize_t show_label(struct device *dev,
index 73b3865..35eb773 100644 (file)
@@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
                                      struct device_attribute *devattr,
                                      char *buf)
 {
-       return sprintf(buf, "da9055-hwmon\n");
+       return sprintf(buf, "da9055\n");
 }
 
 static ssize_t show_label(struct device *dev,
index fd892dd..78002de 100644 (file)
@@ -250,9 +250,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
        if (result < 0)
                return result;
 
-       val = DIV_ROUND_CLOSEST(val, 1000);
-       if ((val < -63) || (val > 127))
-               return -EINVAL;
+       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
 
        mutex_lock(&data->update_lock);
        data->temp_min[nr] = val;
@@ -274,9 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
        if (result < 0)
                return result;
 
-       val = DIV_ROUND_CLOSEST(val, 1000);
-       if ((val < -63) || (val > 127))
-               return -EINVAL;
+       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
 
        mutex_lock(&data->update_lock);
        data->temp_max[nr] = val;
@@ -390,15 +386,14 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
 {
        struct emc2103_data *data = emc2103_update_device(dev);
        struct i2c_client *client = to_i2c_client(dev);
-       long rpm_target;
+       unsigned long rpm_target;
 
-       int result = kstrtol(buf, 10, &rpm_target);
+       int result = kstrtoul(buf, 10, &rpm_target);
        if (result < 0)
                return result;
 
        /* Datasheet states 16384 as maximum RPM target (table 3.2) */
-       if ((rpm_target < 0) || (rpm_target > 16384))
-               return -EINVAL;
+       rpm_target = clamp_val(rpm_target, 0, 16384);
 
        mutex_lock(&data->update_lock);
 
index ba35e4d..2566c43 100644 (file)
@@ -538,7 +538,7 @@ static int gpio_fan_probe(struct platform_device *pdev)
 
        /* Make this driver part of hwmon class. */
        fan_data->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
-                                               "gpio-fan", fan_data,
+                                               "gpio_fan", fan_data,
                                                gpio_fan_groups);
        if (IS_ERR(fan_data->hwmon_dev))
                return PTR_ERR(fan_data->hwmon_dev);
index e76feb8..ae66f42 100644 (file)
@@ -163,6 +163,18 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
 }
 
 static const struct of_device_id ntc_match[] = {
+       { .compatible = "murata,ncp15wb473",
+               .data = &ntc_thermistor_id[0] },
+       { .compatible = "murata,ncp18wb473",
+               .data = &ntc_thermistor_id[1] },
+       { .compatible = "murata,ncp21wb473",
+               .data = &ntc_thermistor_id[2] },
+       { .compatible = "murata,ncp03wb473",
+               .data = &ntc_thermistor_id[3] },
+       { .compatible = "murata,ncp15wl333",
+               .data = &ntc_thermistor_id[4] },
+
+       /* Usage of vendor name "ntc" is deprecated */
        { .compatible = "ntc,ncp15wb473",
                .data = &ntc_thermistor_id[0] },
        { .compatible = "ntc,ncp18wb473",
@@ -500,7 +512,7 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
        }
 
        dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n",
-                                                               pdev->name);
+                                                               pdev_id->name);
 
        return 0;
 err_after_sysfs:
@@ -534,7 +546,7 @@ static struct platform_driver ntc_thermistor_driver = {
 
 module_platform_driver(ntc_thermistor_driver);
 
-MODULE_DESCRIPTION("NTC Thermistor Driver");
+MODULE_DESCRIPTION("NTC Thermistor Driver from Murata");
 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:ntc-thermistor");
index efee4c5..34b9a60 100644 (file)
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
  */
 static inline s8 TEMP_TO_REG(int val)
 {
-       return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
+       return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
 }
 
 static inline int TEMP_FROM_REG(s8 val)
@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
        err = kstrtoul(buf, 10, &val);
        if (err)
                return err;
+       if (val > 255)
+               return -EINVAL;
 
        data->vrm = val;
        return count;
index 6ed76ce..32487c1 100644 (file)
@@ -249,7 +249,7 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
        int nr = to_sensor_dev_attr(attr)->index; \
        struct w83l786ng_data *data = w83l786ng_update_device(dev); \
        return sprintf(buf, "%d\n", \
-               FAN_FROM_REG(data->fan[nr], DIV_FROM_REG(data->fan_div[nr]))); \
+               FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \
 }
 
 show_fan_reg(fan);
index 620d100..9f7d585 100644 (file)
@@ -676,6 +676,16 @@ config I2C_RIIC
          This driver can also be built as a module.  If so, the module
          will be called i2c-riic.
 
+config I2C_RK3X
+       tristate "Rockchip RK3xxx I2C adapter"
+       depends on OF
+       help
+         Say Y here to include support for the I2C adapter in Rockchip RK3xxx
+         SoCs.
+
+         This driver can also be built as a module. If so, the module will
+         be called i2c-rk3x.
+
 config HAVE_S3C2410_I2C
        bool
        help
@@ -764,6 +774,19 @@ config I2C_STU300
          This driver can also be built as a module. If so, the module
          will be called i2c-stu300.
 
+config I2C_SUN6I_P2WI
+       tristate "Allwinner sun6i internal P2WI controller"
+       depends on RESET_CONTROLLER
+       depends on MACH_SUN6I || COMPILE_TEST
+       help
+         If you say yes to this option, support will be included for the
+         P2WI (Push/Pull 2 Wire Interface) controller embedded in some sunxi
+         SOCs.
+         The P2WI looks like an SMBus controller (which supports only byte
+         accesses), except that it only supports one slave device.
+         This interface is used to connect to specific PMIC devices (like the
+         AXP221).
+
 config I2C_TEGRA
        tristate "NVIDIA Tegra internal I2C controller"
        depends on ARCH_TEGRA
index 298692c..dd9a7f8 100644 (file)
@@ -66,6 +66,7 @@ obj-$(CONFIG_I2C_PXA)         += i2c-pxa.o
 obj-$(CONFIG_I2C_PXA_PCI)      += i2c-pxa-pci.o
 obj-$(CONFIG_I2C_QUP)          += i2c-qup.o
 obj-$(CONFIG_I2C_RIIC)         += i2c-riic.o
+obj-$(CONFIG_I2C_RK3X)         += i2c-rk3x.o
 obj-$(CONFIG_I2C_S3C2410)      += i2c-s3c2410.o
 obj-$(CONFIG_I2C_S6000)                += i2c-s6000.o
 obj-$(CONFIG_I2C_SH7760)       += i2c-sh7760.o
@@ -74,6 +75,7 @@ obj-$(CONFIG_I2C_SIMTEC)      += i2c-simtec.o
 obj-$(CONFIG_I2C_SIRF)         += i2c-sirf.o
 obj-$(CONFIG_I2C_ST)           += i2c-st.o
 obj-$(CONFIG_I2C_STU300)       += i2c-stu300.o
+obj-$(CONFIG_I2C_SUN6I_P2WI)   += i2c-sun6i-p2wi.o
 obj-$(CONFIG_I2C_TEGRA)                += i2c-tegra.o
 obj-$(CONFIG_I2C_VERSATILE)    += i2c-versatile.o
 obj-$(CONFIG_I2C_WMT)          += i2c-wmt.o
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
new file mode 100644 (file)
index 0000000..a979150
--- /dev/null
@@ -0,0 +1,763 @@
+/*
+ * Driver for I2C adapter in Rockchip RK3xxx SoC
+ *
+ * Max Schwarz <max.schwarz@online.de>
+ * based on the patches by Rockchip Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/wait.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+
+/* Register Map */
+#define REG_CON        0x00 /* control register */
+#define REG_CLKDIV     0x04 /* clock divisor register */
+#define REG_MRXADDR    0x08 /* slave address for REGISTER_TX */
+#define REG_MRXRADDR   0x0c /* slave register address for REGISTER_TX */
+#define REG_MTXCNT     0x10 /* number of bytes to be transmitted */
+#define REG_MRXCNT     0x14 /* number of bytes to be received */
+#define REG_IEN        0x18 /* interrupt enable */
+#define REG_IPD        0x1c /* interrupt pending */
+#define REG_FCNT       0x20 /* finished count */
+
+/* Data buffer offsets */
+#define TXBUFFER_BASE 0x100
+#define RXBUFFER_BASE 0x200
+
+/* REG_CON bits */
+#define REG_CON_EN        BIT(0)
+enum {
+       REG_CON_MOD_TX = 0,      /* transmit data */
+       REG_CON_MOD_REGISTER_TX, /* select register and restart */
+       REG_CON_MOD_RX,          /* receive data */
+       REG_CON_MOD_REGISTER_RX, /* broken: transmits read addr AND writes
+                                 * register addr */
+};
+#define REG_CON_MOD(mod)  ((mod) << 1)
+#define REG_CON_MOD_MASK  (BIT(1) | BIT(2))
+#define REG_CON_START     BIT(3)
+#define REG_CON_STOP      BIT(4)
+#define REG_CON_LASTACK   BIT(5) /* 1: send NACK after last received byte */
+#define REG_CON_ACTACK    BIT(6) /* 1: stop if NACK is received */
+
+/* REG_MRXADDR bits */
+#define REG_MRXADDR_VALID(x) BIT(24 + (x)) /* [x*8+7:x*8] of MRX[R]ADDR valid */
+
+/* REG_IEN/REG_IPD bits */
+#define REG_INT_BTF       BIT(0) /* a byte was transmitted */
+#define REG_INT_BRF       BIT(1) /* a byte was received */
+#define REG_INT_MBTF      BIT(2) /* master data transmit finished */
+#define REG_INT_MBRF      BIT(3) /* master data receive finished */
+#define REG_INT_START     BIT(4) /* START condition generated */
+#define REG_INT_STOP      BIT(5) /* STOP condition generated */
+#define REG_INT_NAKRCV    BIT(6) /* NACK received */
+#define REG_INT_ALL       0x7f
+
+/* Constants */
+#define WAIT_TIMEOUT      200 /* ms */
+#define DEFAULT_SCL_RATE  (100 * 1000) /* Hz */
+
+enum rk3x_i2c_state {
+       STATE_IDLE,
+       STATE_START,
+       STATE_READ,
+       STATE_WRITE,
+       STATE_STOP
+};
+
+/**
+ * @grf_offset: offset inside the grf regmap for setting the i2c type
+ */
+struct rk3x_i2c_soc_data {
+       int grf_offset;
+};
+
+struct rk3x_i2c {
+       struct i2c_adapter adap;
+       struct device *dev;
+       struct rk3x_i2c_soc_data *soc_data;
+
+       /* Hardware resources */
+       void __iomem *regs;
+       struct clk *clk;
+
+       /* Settings */
+       unsigned int scl_frequency;
+
+       /* Synchronization & notification */
+       spinlock_t lock;
+       wait_queue_head_t wait;
+       bool busy;
+
+       /* Current message */
+       struct i2c_msg *msg;
+       u8 addr;
+       unsigned int mode;
+       bool is_last_msg;
+
+       /* I2C state machine */
+       enum rk3x_i2c_state state;
+       unsigned int processed; /* sent/received bytes */
+       int error;
+};
+
+static inline void i2c_writel(struct rk3x_i2c *i2c, u32 value,
+                             unsigned int offset)
+{
+       writel(value, i2c->regs + offset);
+}
+
+static inline u32 i2c_readl(struct rk3x_i2c *i2c, unsigned int offset)
+{
+       return readl(i2c->regs + offset);
+}
+
+/* Reset all interrupt pending bits */
+static inline void rk3x_i2c_clean_ipd(struct rk3x_i2c *i2c)
+{
+       i2c_writel(i2c, REG_INT_ALL, REG_IPD);
+}
+
+/**
+ * Generate a START condition, which triggers a REG_INT_START interrupt.
+ */
+static void rk3x_i2c_start(struct rk3x_i2c *i2c)
+{
+       u32 val;
+
+       rk3x_i2c_clean_ipd(i2c);
+       i2c_writel(i2c, REG_INT_START, REG_IEN);
+
+       /* enable adapter with correct mode, send START condition */
+       val = REG_CON_EN | REG_CON_MOD(i2c->mode) | REG_CON_START;
+
+       /* if we want to react to NACK, set ACTACK bit */
+       if (!(i2c->msg->flags & I2C_M_IGNORE_NAK))
+               val |= REG_CON_ACTACK;
+
+       i2c_writel(i2c, val, REG_CON);
+}
+
+/**
+ * Generate a STOP condition, which triggers a REG_INT_STOP interrupt.
+ *
+ * @error: Error code to return in rk3x_i2c_xfer
+ */
+static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
+{
+       unsigned int ctrl;
+
+       i2c->processed = 0;
+       i2c->msg = NULL;
+       i2c->error = error;
+
+       if (i2c->is_last_msg) {
+               /* Enable stop interrupt */
+               i2c_writel(i2c, REG_INT_STOP, REG_IEN);
+
+               i2c->state = STATE_STOP;
+
+               ctrl = i2c_readl(i2c, REG_CON);
+               ctrl |= REG_CON_STOP;
+               i2c_writel(i2c, ctrl, REG_CON);
+       } else {
+               /* Signal rk3x_i2c_xfer to start the next message. */
+               i2c->busy = false;
+               i2c->state = STATE_IDLE;
+
+               /*
+                * The HW is actually not capable of REPEATED START. But we can
+                * get the intended effect by resetting its internal state
+                * and issuing an ordinary START.
+                */
+               i2c_writel(i2c, 0, REG_CON);
+
+               /* signal that we are finished with the current msg */
+               wake_up(&i2c->wait);
+       }
+}
+
+/**
+ * Setup a read according to i2c->msg
+ */
+static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
+{
+       unsigned int len = i2c->msg->len - i2c->processed;
+       u32 con;
+
+       con = i2c_readl(i2c, REG_CON);
+
+       /*
+        * The hw can read up to 32 bytes at a time. If we need more than one
+        * chunk, send an ACK after the last byte of the current chunk.
+        */
+       if (unlikely(len > 32)) {
+               len = 32;
+               con &= ~REG_CON_LASTACK;
+       } else {
+               con |= REG_CON_LASTACK;
+       }
+
+       /* make sure we are in plain RX mode if we read a second chunk */
+       if (i2c->processed != 0) {
+               con &= ~REG_CON_MOD_MASK;
+               con |= REG_CON_MOD(REG_CON_MOD_RX);
+       }
+
+       i2c_writel(i2c, con, REG_CON);
+       i2c_writel(i2c, len, REG_MRXCNT);
+}
+
+/**
+ * Fill the transmit buffer with data from i2c->msg
+ */
+static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c)
+{
+       unsigned int i, j;
+       u32 cnt = 0;
+       u32 val;
+       u8 byte;
+
+       for (i = 0; i < 8; ++i) {
+               val = 0;
+               for (j = 0; j < 4; ++j) {
+                       if (i2c->processed == i2c->msg->len)
+                               break;
+
+                       if (i2c->processed == 0 && cnt == 0)
+                               byte = (i2c->addr & 0x7f) << 1;
+                       else
+                               byte = i2c->msg->buf[i2c->processed++];
+
+                       val |= byte << (j * 8);
+                       cnt++;
+               }
+
+               i2c_writel(i2c, val, TXBUFFER_BASE + 4 * i);
+
+               if (i2c->processed == i2c->msg->len)
+                       break;
+       }
+
+       i2c_writel(i2c, cnt, REG_MTXCNT);
+}
+
+
+/* IRQ handlers for individual states */
+
+static void rk3x_i2c_handle_start(struct rk3x_i2c *i2c, unsigned int ipd)
+{
+       if (!(ipd & REG_INT_START)) {
+               rk3x_i2c_stop(i2c, -EIO);
+               dev_warn(i2c->dev, "unexpected irq in START: 0x%x\n", ipd);
+               rk3x_i2c_clean_ipd(i2c);
+               return;
+       }
+
+       /* ack interrupt */
+       i2c_writel(i2c, REG_INT_START, REG_IPD);
+
+       /* disable start bit */
+       i2c_writel(i2c, i2c_readl(i2c, REG_CON) & ~REG_CON_START, REG_CON);
+
+       /* enable appropriate interrupts and transition */
+       if (i2c->mode == REG_CON_MOD_TX) {
+               i2c_writel(i2c, REG_INT_MBTF | REG_INT_NAKRCV, REG_IEN);
+               i2c->state = STATE_WRITE;
+               rk3x_i2c_fill_transmit_buf(i2c);
+       } else {
+               /* in any other case, we are going to be reading. */
+               i2c_writel(i2c, REG_INT_MBRF | REG_INT_NAKRCV, REG_IEN);
+               i2c->state = STATE_READ;
+               rk3x_i2c_prepare_read(i2c);
+       }
+}
+
+static void rk3x_i2c_handle_write(struct rk3x_i2c *i2c, unsigned int ipd)
+{
+       if (!(ipd & REG_INT_MBTF)) {
+               rk3x_i2c_stop(i2c, -EIO);
+               dev_err(i2c->dev, "unexpected irq in WRITE: 0x%x\n", ipd);
+               rk3x_i2c_clean_ipd(i2c);
+               return;
+       }
+
+       /* ack interrupt */
+       i2c_writel(i2c, REG_INT_MBTF, REG_IPD);
+
+       /* are we finished? */
+       if (i2c->processed == i2c->msg->len)
+               rk3x_i2c_stop(i2c, i2c->error);
+       else
+               rk3x_i2c_fill_transmit_buf(i2c);
+}
+
+static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
+{
+       unsigned int i;
+       unsigned int len = i2c->msg->len - i2c->processed;
+       u32 uninitialized_var(val);
+       u8 byte;
+
+       /* we only care for MBRF here. */
+       if (!(ipd & REG_INT_MBRF))
+               return;
+
+       /* ack interrupt */
+       i2c_writel(i2c, REG_INT_MBRF, REG_IPD);
+
+       /* read the data from receive buffer */
+       for (i = 0; i < len; ++i) {
+               if (i % 4 == 0)
+                       val = i2c_readl(i2c, RXBUFFER_BASE + (i / 4) * 4);
+
+               byte = (val >> ((i % 4) * 8)) & 0xff;
+               i2c->msg->buf[i2c->processed++] = byte;
+       }
+
+       /* are we finished? */
+       if (i2c->processed == i2c->msg->len)
+               rk3x_i2c_stop(i2c, i2c->error);
+       else
+               rk3x_i2c_prepare_read(i2c);
+}
+
+static void rk3x_i2c_handle_stop(struct rk3x_i2c *i2c, unsigned int ipd)
+{
+       unsigned int con;
+
+       if (!(ipd & REG_INT_STOP)) {
+               rk3x_i2c_stop(i2c, -EIO);
+               dev_err(i2c->dev, "unexpected irq in STOP: 0x%x\n", ipd);
+               rk3x_i2c_clean_ipd(i2c);
+               return;
+       }
+
+       /* ack interrupt */
+       i2c_writel(i2c, REG_INT_STOP, REG_IPD);
+
+       /* disable STOP bit */
+       con = i2c_readl(i2c, REG_CON);
+       con &= ~REG_CON_STOP;
+       i2c_writel(i2c, con, REG_CON);
+
+       i2c->busy = false;
+       i2c->state = STATE_IDLE;
+
+       /* signal rk3x_i2c_xfer that we are finished */
+       wake_up(&i2c->wait);
+}
+
+static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id)
+{
+       struct rk3x_i2c *i2c = dev_id;
+       unsigned int ipd;
+
+       spin_lock(&i2c->lock);
+
+       ipd = i2c_readl(i2c, REG_IPD);
+       if (i2c->state == STATE_IDLE) {
+               dev_warn(i2c->dev, "irq in STATE_IDLE, ipd = 0x%x\n", ipd);
+               rk3x_i2c_clean_ipd(i2c);
+               goto out;
+       }
+
+       dev_dbg(i2c->dev, "IRQ: state %d, ipd: %x\n", i2c->state, ipd);
+
+       /* Clean interrupt bits we don't care about */
+       ipd &= ~(REG_INT_BRF | REG_INT_BTF);
+
+       if (ipd & REG_INT_NAKRCV) {
+               /*
+                * We got a NACK in the last operation. Depending on whether
+                * IGNORE_NAK is set, we have to stop the operation and report
+                * an error.
+                */
+               i2c_writel(i2c, REG_INT_NAKRCV, REG_IPD);
+
+               ipd &= ~REG_INT_NAKRCV;
+
+               if (!(i2c->msg->flags & I2C_M_IGNORE_NAK))
+                       rk3x_i2c_stop(i2c, -ENXIO);
+       }
+
+       /* is there anything left to handle? */
+       if (unlikely(ipd == 0))
+               goto out;
+
+       switch (i2c->state) {
+       case STATE_START:
+               rk3x_i2c_handle_start(i2c, ipd);
+               break;
+       case STATE_WRITE:
+               rk3x_i2c_handle_write(i2c, ipd);
+               break;
+       case STATE_READ:
+               rk3x_i2c_handle_read(i2c, ipd);
+               break;
+       case STATE_STOP:
+               rk3x_i2c_handle_stop(i2c, ipd);
+               break;
+       case STATE_IDLE:
+               break;
+       }
+
+out:
+       spin_unlock(&i2c->lock);
+       return IRQ_HANDLED;
+}
+
+static void rk3x_i2c_set_scl_rate(struct rk3x_i2c *i2c, unsigned long scl_rate)
+{
+       unsigned long i2c_rate = clk_get_rate(i2c->clk);
+       unsigned int div;
+
+       /* SCL rate = (clk rate) / (8 * DIV) */
+       div = DIV_ROUND_UP(i2c_rate, scl_rate * 8);
+
+       /* The lower and upper half of the CLKDIV reg describe the length of
+        * SCL low & high periods. */
+       div = DIV_ROUND_UP(div, 2);
+
+       i2c_writel(i2c, (div << 16) | (div & 0xffff), REG_CLKDIV);
+}
+
+/**
+ * Setup I2C registers for an I2C operation specified by msgs, num.
+ *
+ * Must be called with i2c->lock held.
+ *
+ * @msgs: I2C msgs to process
+ * @num: Number of msgs
+ *
+ * returns: Number of I2C msgs processed or negative in case of error
+ */
+static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num)
+{
+       u32 addr = (msgs[0].addr & 0x7f) << 1;
+       int ret = 0;
+
+       /*
+        * The I2C adapter can issue a small (len < 4) write packet before
+        * reading. This speeds up SMBus-style register reads.
+        * The MRXADDR/MRXRADDR hold the slave address and the slave register
+        * address in this case.
+        */
+
+       if (num >= 2 && msgs[0].len < 4 &&
+           !(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD)) {
+               u32 reg_addr = 0;
+               int i;
+
+               dev_dbg(i2c->dev, "Combined write/read from addr 0x%x\n",
+                       addr >> 1);
+
+               /* Fill MRXRADDR with the register address(es) */
+               for (i = 0; i < msgs[0].len; ++i) {
+                       reg_addr |= msgs[0].buf[i] << (i * 8);
+                       reg_addr |= REG_MRXADDR_VALID(i);
+               }
+
+               /* msgs[0] is handled by hw. */
+               i2c->msg = &msgs[1];
+
+               i2c->mode = REG_CON_MOD_REGISTER_TX;
+
+               i2c_writel(i2c, addr | REG_MRXADDR_VALID(0), REG_MRXADDR);
+               i2c_writel(i2c, reg_addr, REG_MRXRADDR);
+
+               ret = 2;
+       } else {
+               /*
+                * We'll have to do it the boring way and process the msgs
+                * one-by-one.
+                */
+
+               if (msgs[0].flags & I2C_M_RD) {
+                       addr |= 1; /* set read bit */
+
+                       /*
+                        * We have to transmit the slave addr first. Use
+                        * MOD_REGISTER_TX for that purpose.
+                        */
+                       i2c->mode = REG_CON_MOD_REGISTER_TX;
+                       i2c_writel(i2c, addr | REG_MRXADDR_VALID(0),
+                                  REG_MRXADDR);
+                       i2c_writel(i2c, 0, REG_MRXRADDR);
+               } else {
+                       i2c->mode = REG_CON_MOD_TX;
+               }
+
+               i2c->msg = &msgs[0];
+
+               ret = 1;
+       }
+
+       i2c->addr = msgs[0].addr;
+       i2c->busy = true;
+       i2c->state = STATE_START;
+       i2c->processed = 0;
+       i2c->error = 0;
+
+       rk3x_i2c_clean_ipd(i2c);
+
+       return ret;
+}
+
+static int rk3x_i2c_xfer(struct i2c_adapter *adap,
+                        struct i2c_msg *msgs, int num)
+{
+       struct rk3x_i2c *i2c = (struct rk3x_i2c *)adap->algo_data;
+       unsigned long timeout, flags;
+       int ret = 0;
+       int i;
+
+       spin_lock_irqsave(&i2c->lock, flags);
+
+       clk_enable(i2c->clk);
+
+       /* The clock rate might have changed, so setup the divider again */
+       rk3x_i2c_set_scl_rate(i2c, i2c->scl_frequency);
+
+       i2c->is_last_msg = false;
+
+       /*
+        * Process msgs. We can handle more than one message at once (see
+        * rk3x_i2c_setup()).
+        */
+       for (i = 0; i < num; i += ret) {
+               ret = rk3x_i2c_setup(i2c, msgs + i, num - i);
+
+               if (ret < 0) {
+                       dev_err(i2c->dev, "rk3x_i2c_setup() failed\n");
+                       break;
+               }
+
+               if (i + ret >= num)
+                       i2c->is_last_msg = true;
+
+               spin_unlock_irqrestore(&i2c->lock, flags);
+
+               rk3x_i2c_start(i2c);
+
+               timeout = wait_event_timeout(i2c->wait, !i2c->busy,
+                                            msecs_to_jiffies(WAIT_TIMEOUT));
+
+               spin_lock_irqsave(&i2c->lock, flags);
+
+               if (timeout == 0) {
+                       dev_err(i2c->dev, "timeout, ipd: 0x%02x, state: %d\n",
+                               i2c_readl(i2c, REG_IPD), i2c->state);
+
+                       /* Force a STOP condition without interrupt */
+                       i2c_writel(i2c, 0, REG_IEN);
+                       i2c_writel(i2c, REG_CON_EN | REG_CON_STOP, REG_CON);
+
+                       i2c->state = STATE_IDLE;
+
+                       ret = -ETIMEDOUT;
+                       break;
+               }
+
+               if (i2c->error) {
+                       ret = i2c->error;
+                       break;
+               }
+       }
+
+       clk_disable(i2c->clk);
+       spin_unlock_irqrestore(&i2c->lock, flags);
+
+       return ret;
+}
+
+static u32 rk3x_i2c_func(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+}
+
+static const struct i2c_algorithm rk3x_i2c_algorithm = {
+       .master_xfer            = rk3x_i2c_xfer,
+       .functionality          = rk3x_i2c_func,
+};
+
+static struct rk3x_i2c_soc_data soc_data[3] = {
+       { .grf_offset = 0x154 }, /* rk3066 */
+       { .grf_offset = 0x0a4 }, /* rk3188 */
+       { .grf_offset = -1 },    /* no I2C switching needed */
+};
+
+static const struct of_device_id rk3x_i2c_match[] = {
+       { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
+       { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
+       { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
+       {},
+};
+
+static int rk3x_i2c_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       const struct of_device_id *match;
+       struct rk3x_i2c *i2c;
+       struct resource *mem;
+       int ret = 0;
+       int bus_nr;
+       u32 value;
+       int irq;
+
+       i2c = devm_kzalloc(&pdev->dev, sizeof(struct rk3x_i2c), GFP_KERNEL);
+       if (!i2c)
+               return -ENOMEM;
+
+       match = of_match_node(rk3x_i2c_match, np);
+       i2c->soc_data = (struct rk3x_i2c_soc_data *)match->data;
+
+       if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+                                &i2c->scl_frequency)) {
+               dev_info(&pdev->dev, "using default SCL frequency: %d\n",
+                        DEFAULT_SCL_RATE);
+               i2c->scl_frequency = DEFAULT_SCL_RATE;
+       }
+
+       if (i2c->scl_frequency == 0 || i2c->scl_frequency > 400 * 1000) {
+               dev_warn(&pdev->dev, "invalid SCL frequency specified.\n");
+               dev_warn(&pdev->dev, "using default SCL frequency: %d\n",
+                        DEFAULT_SCL_RATE);
+               i2c->scl_frequency = DEFAULT_SCL_RATE;
+       }
+
+       strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
+       i2c->adap.owner = THIS_MODULE;
+       i2c->adap.algo = &rk3x_i2c_algorithm;
+       i2c->adap.retries = 3;
+       i2c->adap.dev.of_node = np;
+       i2c->adap.algo_data = i2c;
+       i2c->adap.dev.parent = &pdev->dev;
+
+       i2c->dev = &pdev->dev;
+
+       spin_lock_init(&i2c->lock);
+       init_waitqueue_head(&i2c->wait);
+
+       i2c->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(i2c->clk)) {
+               dev_err(&pdev->dev, "cannot get clock\n");
+               return PTR_ERR(i2c->clk);
+       }
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(i2c->regs))
+               return PTR_ERR(i2c->regs);
+
+       /* Try to set the I2C adapter number from dt */
+       bus_nr = of_alias_get_id(np, "i2c");
+
+       /*
+        * Switch to new interface if the SoC also offers the old one.
+        * The control bit is located in the GRF register space.
+        */
+       if (i2c->soc_data->grf_offset >= 0) {
+               struct regmap *grf;
+
+               grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+               if (IS_ERR(grf)) {
+                       dev_err(&pdev->dev,
+                               "rk3x-i2c needs 'rockchip,grf' property\n");
+                       return PTR_ERR(grf);
+               }
+
+               if (bus_nr < 0) {
+                       dev_err(&pdev->dev, "rk3x-i2c needs i2cX alias");
+                       return -EINVAL;
+               }
+
+               /* 27+i: write mask, 11+i: value */
+               value = BIT(27 + bus_nr) | BIT(11 + bus_nr);
+
+               ret = regmap_write(grf, i2c->soc_data->grf_offset, value);
+               if (ret != 0) {
+                       dev_err(i2c->dev, "Could not write to GRF: %d\n", ret);
+                       return ret;
+               }
+       }
+
+       /* IRQ setup */
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "cannot find rk3x IRQ\n");
+               return irq;
+       }
+
+       ret = devm_request_irq(&pdev->dev, irq, rk3x_i2c_irq,
+                              0, dev_name(&pdev->dev), i2c);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "cannot request IRQ\n");
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, i2c);
+
+       ret = clk_prepare(i2c->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Could not prepare clock\n");
+               return ret;
+       }
+
+       ret = i2c_add_adapter(&i2c->adap);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Could not register adapter\n");
+               goto err_clk;
+       }
+
+       dev_info(&pdev->dev, "Initialized RK3xxx I2C bus at %p\n", i2c->regs);
+
+       return 0;
+
+err_clk:
+       clk_unprepare(i2c->clk);
+       return ret;
+}
+
+static int rk3x_i2c_remove(struct platform_device *pdev)
+{
+       struct rk3x_i2c *i2c = platform_get_drvdata(pdev);
+
+       i2c_del_adapter(&i2c->adap);
+       clk_unprepare(i2c->clk);
+
+       return 0;
+}
+
+static struct platform_driver rk3x_i2c_driver = {
+       .probe   = rk3x_i2c_probe,
+       .remove  = rk3x_i2c_remove,
+       .driver  = {
+               .owner = THIS_MODULE,
+               .name  = "rk3x-i2c",
+               .of_match_table = rk3x_i2c_match,
+       },
+};
+
+module_platform_driver(rk3x_i2c_driver);
+
+MODULE_DESCRIPTION("Rockchip RK3xxx I2C Bus driver");
+MODULE_AUTHOR("Max Schwarz <max.schwarz@online.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
new file mode 100644 (file)
index 0000000..4d75d47
--- /dev/null
@@ -0,0 +1,344 @@
+/*
+ * P2WI (Push-Pull Two Wire Interface) bus driver.
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * The P2WI controller looks like an SMBus controller which only supports byte
+ * data transfers. But, it differs from standard SMBus protocol on several
+ * aspects:
+ * - it supports only one slave device, and thus drop the address field
+ * - it adds a parity bit every 8bits of data
+ * - only one read access is required to read a byte (instead of a write
+ *   followed by a read access in standard SMBus protocol)
+ * - there's no Ack bit after each byte transfer
+ *
+ * This means this bus cannot be used to interface with standard SMBus
+ * devices (the only known device to support this interface is the AXP221
+ * PMIC).
+ *
+ */
+#include <linux/clk.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+
+/* P2WI registers */
+#define P2WI_CTRL              0x0
+#define P2WI_CCR               0x4
+#define P2WI_INTE              0x8
+#define P2WI_INTS              0xc
+#define P2WI_DADDR0            0x10
+#define P2WI_DADDR1            0x14
+#define P2WI_DLEN              0x18
+#define P2WI_DATA0             0x1c
+#define P2WI_DATA1             0x20
+#define P2WI_LCR               0x24
+#define P2WI_PMCR              0x28
+
+/* CTRL fields */
+#define P2WI_CTRL_START_TRANS          BIT(7)
+#define P2WI_CTRL_ABORT_TRANS          BIT(6)
+#define P2WI_CTRL_GLOBAL_INT_ENB       BIT(1)
+#define P2WI_CTRL_SOFT_RST             BIT(0)
+
+/* CLK CTRL fields */
+#define P2WI_CCR_SDA_OUT_DELAY(v)      (((v) & 0x7) << 8)
+#define P2WI_CCR_MAX_CLK_DIV           0xff
+#define P2WI_CCR_CLK_DIV(v)            ((v) & P2WI_CCR_MAX_CLK_DIV)
+
+/* STATUS fields */
+#define P2WI_INTS_TRANS_ERR_ID(v)      (((v) >> 8) & 0xff)
+#define P2WI_INTS_LOAD_BSY             BIT(2)
+#define P2WI_INTS_TRANS_ERR            BIT(1)
+#define P2WI_INTS_TRANS_OVER           BIT(0)
+
+/* DATA LENGTH fields*/
+#define P2WI_DLEN_READ                 BIT(4)
+#define P2WI_DLEN_DATA_LENGTH(v)       ((v - 1) & 0x7)
+
+/* LINE CTRL fields*/
+#define P2WI_LCR_SCL_STATE             BIT(5)
+#define P2WI_LCR_SDA_STATE             BIT(4)
+#define P2WI_LCR_SCL_CTL               BIT(3)
+#define P2WI_LCR_SCL_CTL_EN            BIT(2)
+#define P2WI_LCR_SDA_CTL               BIT(1)
+#define P2WI_LCR_SDA_CTL_EN            BIT(0)
+
+/* PMU MODE CTRL fields */
+#define P2WI_PMCR_PMU_INIT_SEND                BIT(31)
+#define P2WI_PMCR_PMU_INIT_DATA(v)     (((v) & 0xff) << 16)
+#define P2WI_PMCR_PMU_MODE_REG(v)      (((v) & 0xff) << 8)
+#define P2WI_PMCR_PMU_DEV_ADDR(v)      ((v) & 0xff)
+
+#define P2WI_MAX_FREQ                  6000000
+
+struct p2wi {
+       struct i2c_adapter adapter;
+       struct completion complete;
+       unsigned int status;
+       void __iomem *regs;
+       struct clk *clk;
+       struct reset_control *rstc;
+       int slave_addr;
+};
+
+static irqreturn_t p2wi_interrupt(int irq, void *dev_id)
+{
+       struct p2wi *p2wi = dev_id;
+       unsigned long status;
+
+       status = readl(p2wi->regs + P2WI_INTS);
+       p2wi->status = status;
+
+       /* Clear interrupts */
+       status &= (P2WI_INTS_LOAD_BSY | P2WI_INTS_TRANS_ERR |
+                  P2WI_INTS_TRANS_OVER);
+       writel(status, p2wi->regs + P2WI_INTS);
+
+       complete(&p2wi->complete);
+
+       return IRQ_HANDLED;
+}
+
+static u32 p2wi_functionality(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_SMBUS_BYTE_DATA;
+}
+
+static int p2wi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+                          unsigned short flags, char read_write,
+                          u8 command, int size, union i2c_smbus_data *data)
+{
+       struct p2wi *p2wi = i2c_get_adapdata(adap);
+       unsigned long dlen = P2WI_DLEN_DATA_LENGTH(1);
+
+       if (p2wi->slave_addr >= 0 && addr != p2wi->slave_addr) {
+               dev_err(&adap->dev, "invalid P2WI address\n");
+               return -EINVAL;
+       }
+
+       if (!data)
+               return -EINVAL;
+
+       writel(command, p2wi->regs + P2WI_DADDR0);
+
+       if (read_write == I2C_SMBUS_READ)
+               dlen |= P2WI_DLEN_READ;
+       else
+               writel(data->byte, p2wi->regs + P2WI_DATA0);
+
+       writel(dlen, p2wi->regs + P2WI_DLEN);
+
+       if (readl(p2wi->regs + P2WI_CTRL) & P2WI_CTRL_START_TRANS) {
+               dev_err(&adap->dev, "P2WI bus busy\n");
+               return -EBUSY;
+       }
+
+       reinit_completion(&p2wi->complete);
+
+       writel(P2WI_INTS_LOAD_BSY | P2WI_INTS_TRANS_ERR | P2WI_INTS_TRANS_OVER,
+              p2wi->regs + P2WI_INTE);
+
+       writel(P2WI_CTRL_START_TRANS | P2WI_CTRL_GLOBAL_INT_ENB,
+              p2wi->regs + P2WI_CTRL);
+
+       wait_for_completion(&p2wi->complete);
+
+       if (p2wi->status & P2WI_INTS_LOAD_BSY) {
+               dev_err(&adap->dev, "P2WI bus busy\n");
+               return -EBUSY;
+       }
+
+       if (p2wi->status & P2WI_INTS_TRANS_ERR) {
+               dev_err(&adap->dev, "P2WI bus xfer error\n");
+               return -ENXIO;
+       }
+
+       if (read_write == I2C_SMBUS_READ)
+               data->byte = readl(p2wi->regs + P2WI_DATA0);
+
+       return 0;
+}
+
+static const struct i2c_algorithm p2wi_algo = {
+       .smbus_xfer = p2wi_smbus_xfer,
+       .functionality = p2wi_functionality,
+};
+
+static const struct of_device_id p2wi_of_match_table[] = {
+       { .compatible = "allwinner,sun6i-a31-p2wi" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, p2wi_of_match_table);
+
+static int p2wi_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct device_node *childnp;
+       unsigned long parent_clk_freq;
+       u32 clk_freq = 100000;
+       struct resource *r;
+       struct p2wi *p2wi;
+       u32 slave_addr;
+       int clk_div;
+       int irq;
+       int ret;
+
+       of_property_read_u32(np, "clock-frequency", &clk_freq);
+       if (clk_freq > P2WI_MAX_FREQ) {
+               dev_err(dev,
+                       "required clock-frequency (%u Hz) is too high (max = 6MHz)",
+                       clk_freq);
+               return -EINVAL;
+       }
+
+       if (of_get_child_count(np) > 1) {
+               dev_err(dev, "P2WI only supports one slave device\n");
+               return -EINVAL;
+       }
+
+       p2wi = devm_kzalloc(dev, sizeof(struct p2wi), GFP_KERNEL);
+       if (!p2wi)
+               return -ENOMEM;
+
+       p2wi->slave_addr = -1;
+
+       /*
+        * Authorize a p2wi node without any children to be able to use an
+        * i2c-dev from userpace.
+        * In this case the slave_addr is set to -1 and won't be checked when
+        * launching a P2WI transfer.
+        */
+       childnp = of_get_next_available_child(np, NULL);
+       if (childnp) {
+               ret = of_property_read_u32(childnp, "reg", &slave_addr);
+               if (ret) {
+                       dev_err(dev, "invalid slave address on node %s\n",
+                               childnp->full_name);
+                       return -EINVAL;
+               }
+
+               p2wi->slave_addr = slave_addr;
+       }
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       p2wi->regs = devm_ioremap_resource(dev, r);
+       if (IS_ERR(p2wi->regs))
+               return PTR_ERR(p2wi->regs);
+
+       strlcpy(p2wi->adapter.name, pdev->name, sizeof(p2wi->adapter.name));
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(dev, "failed to retrieve irq: %d\n", irq);
+               return irq;
+       }
+
+       p2wi->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(p2wi->clk)) {
+               ret = PTR_ERR(p2wi->clk);
+               dev_err(dev, "failed to retrieve clk: %d\n", ret);
+               return ret;
+       }
+
+       ret = clk_prepare_enable(p2wi->clk);
+       if (ret) {
+               dev_err(dev, "failed to enable clk: %d\n", ret);
+               return ret;
+       }
+
+       parent_clk_freq = clk_get_rate(p2wi->clk);
+
+       p2wi->rstc = devm_reset_control_get(dev, NULL);
+       if (IS_ERR(p2wi->rstc)) {
+               ret = PTR_ERR(p2wi->rstc);
+               dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
+               goto err_clk_disable;
+       }
+
+       ret = reset_control_deassert(p2wi->rstc);
+       if (ret) {
+               dev_err(dev, "failed to deassert reset line: %d\n", ret);
+               goto err_clk_disable;
+       }
+
+       init_completion(&p2wi->complete);
+       p2wi->adapter.dev.parent = dev;
+       p2wi->adapter.algo = &p2wi_algo;
+       p2wi->adapter.owner = THIS_MODULE;
+       p2wi->adapter.dev.of_node = pdev->dev.of_node;
+       platform_set_drvdata(pdev, p2wi);
+       i2c_set_adapdata(&p2wi->adapter, p2wi);
+
+       ret = devm_request_irq(dev, irq, p2wi_interrupt, 0, pdev->name, p2wi);
+       if (ret) {
+               dev_err(dev, "can't register interrupt handler irq%d: %d\n",
+                       irq, ret);
+               goto err_reset_assert;
+       }
+
+       writel(P2WI_CTRL_SOFT_RST, p2wi->regs + P2WI_CTRL);
+
+       clk_div = parent_clk_freq / clk_freq;
+       if (!clk_div) {
+               dev_warn(dev,
+                        "clock-frequency is too high, setting it to %lu Hz\n",
+                        parent_clk_freq);
+               clk_div = 1;
+       } else if (clk_div > P2WI_CCR_MAX_CLK_DIV) {
+               dev_warn(dev,
+                        "clock-frequency is too low, setting it to %lu Hz\n",
+                        parent_clk_freq / P2WI_CCR_MAX_CLK_DIV);
+               clk_div = P2WI_CCR_MAX_CLK_DIV;
+       }
+
+       writel(P2WI_CCR_SDA_OUT_DELAY(1) | P2WI_CCR_CLK_DIV(clk_div),
+              p2wi->regs + P2WI_CCR);
+
+       ret = i2c_add_adapter(&p2wi->adapter);
+       if (!ret)
+               return 0;
+
+err_reset_assert:
+       reset_control_assert(p2wi->rstc);
+
+err_clk_disable:
+       clk_disable_unprepare(p2wi->clk);
+
+       return ret;
+}
+
+static int p2wi_remove(struct platform_device *dev)
+{
+       struct p2wi *p2wi = platform_get_drvdata(dev);
+
+       reset_control_assert(p2wi->rstc);
+       clk_disable_unprepare(p2wi->clk);
+       i2c_del_adapter(&p2wi->adapter);
+
+       return 0;
+}
+
+static struct platform_driver p2wi_driver = {
+       .probe  = p2wi_probe,
+       .remove = p2wi_remove,
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = "i2c-sunxi-p2wi",
+               .of_match_table = p2wi_of_match_table,
+       },
+};
+module_platform_driver(p2wi_driver);
+
+MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner P2WI driver");
+MODULE_LICENSE("GPL v2");
index 7c7f4b8..66aa83b 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
+#include <linux/clk/clk-conf.h>
 #include <linux/completion.h>
 #include <linux/hardirq.h>
 #include <linux/irqflags.h>
@@ -274,6 +275,10 @@ static int i2c_device_probe(struct device *dev)
                                        client->flags & I2C_CLIENT_WAKE);
        dev_dbg(dev, "probe\n");
 
+       status = of_clk_set_defaults(dev->of_node, false);
+       if (status < 0)
+               return status;
+
        acpi_dev_pm_attach(&client->dev, true);
        status = driver->probe(client, i2c_match_id(driver->id_table, client));
        if (status)
index f7f9865..f6d313e 100644 (file)
@@ -40,6 +40,7 @@ config I2C_MUX_PCA9541
 
 config I2C_MUX_PCA954x
        tristate "Philips PCA954x I2C Mux/switches"
+       depends on GPIOLIB
        help
          If you say yes here you get support for the Philips PCA954x
          I2C mux/switch devices.
index 8fb46aa..a04c49f 100644 (file)
@@ -416,6 +416,7 @@ config BLK_DEV_CY82C693
 
 config BLK_DEV_CS5520
        tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
+       depends on X86_32 || COMPILE_TEST
        select BLK_DEV_IDEDMA_PCI
        help
          Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@ -426,6 +427,7 @@ config BLK_DEV_CS5520
 
 config BLK_DEV_CS5530
        tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
+       depends on X86_32 || COMPILE_TEST
        select BLK_DEV_IDEDMA_PCI
        help
          Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
@@ -435,7 +437,7 @@ config BLK_DEV_CS5530
 
 config BLK_DEV_CS5535
        tristate "AMD CS5535 chipset support"
-       depends on X86 && !X86_64
+       depends on X86_32
        select BLK_DEV_IDEDMA_PCI
        help
          Include support for UDMA on the NSC/AMD CS5535 companion chipset.
@@ -486,6 +488,7 @@ config BLK_DEV_JMICRON
 
 config BLK_DEV_SC1200
        tristate "National SCx200 chipset support"
+       depends on X86_32 || COMPILE_TEST
        select BLK_DEV_IDEDMA_PCI
        help
          This driver adds support for the on-board IDE controller on the
index 2a744a9..a3d3b17 100644 (file)
@@ -853,8 +853,9 @@ static int init_irq (ide_hwif_t *hwif)
        if (irq_handler == NULL)
                irq_handler = ide_intr;
 
-       if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
-               goto out_up;
+       if (!host->get_lock)
+               if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
+                       goto out_up;
 
 #if !defined(__mc68000__)
        printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
@@ -1533,7 +1534,8 @@ static void ide_unregister(ide_hwif_t *hwif)
 
        ide_proc_unregister_port(hwif);
 
-       free_irq(hwif->irq, hwif);
+       if (!hwif->host->get_lock)
+               free_irq(hwif->irq, hwif);
 
        device_unregister(hwif->portdev);
        device_unregister(&hwif->gendev);
index a7e68c8..a077cc8 100644 (file)
 /* Defaults values */
 #define BMA180_DEF_PMODE       0
 #define BMA180_DEF_BW          20
-#define BMA180_DEF_SCALE       250
+#define BMA180_DEF_SCALE       2452
 
 /* Available values for sysfs */
 #define BMA180_FLP_FREQ_AVAILABLE \
        "10 20 40 75 150 300"
 #define BMA180_SCALE_AVAILABLE \
-       "0.000130 0.000190 0.000250 0.000380 0.000500 0.000990 0.001980"
+       "0.001275 0.001863 0.002452 0.003727 0.004903 0.009709 0.019417"
 
 struct bma180_data {
        struct i2c_client *client;
@@ -94,7 +94,7 @@ enum bma180_axis {
 };
 
 static int bw_table[] = { 10, 20, 40, 75, 150, 300 }; /* Hz */
-static int scale_table[] = { 130, 190, 250, 380, 500, 990, 1980 };
+static int scale_table[] = { 1275, 1863, 2452, 3727, 4903, 9709, 19417 };
 
 static int bma180_get_acc_reg(struct bma180_data *data, enum bma180_axis axis)
 {
@@ -376,6 +376,8 @@ static int bma180_write_raw(struct iio_dev *indio_dev,
                mutex_unlock(&data->mutex);
                return ret;
        case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+               if (val2)
+                       return -EINVAL;
                mutex_lock(&data->mutex);
                ret = bma180_set_bw(data, val);
                mutex_unlock(&data->mutex);
index 69abf91..54e464e 100644 (file)
@@ -110,7 +110,6 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
        struct accel_3d_state *accel_state = iio_priv(indio_dev);
        int report_id = -1;
        u32 address;
-       int ret;
        int ret_type;
        s32 poll_value;
 
@@ -151,14 +150,12 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
                ret_type = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SAMP_FREQ:
-               ret = hid_sensor_read_samp_freq_value(
+               ret_type = hid_sensor_read_samp_freq_value(
                        &accel_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        case IIO_CHAN_INFO_HYSTERESIS:
-               ret = hid_sensor_read_raw_hyst_value(
+               ret_type = hid_sensor_read_raw_hyst_value(
                        &accel_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        default:
                ret_type = -EINVAL;
index 17aeea1..2a5fa9a 100644 (file)
@@ -111,8 +111,14 @@ static const int mma8452_samp_freq[8][2] = {
        {6, 250000}, {1, 560000}
 };
 
+/* 
+ * Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048
+ * The userspace interface uses m/s^2 and we declare micro units
+ * So scale factor is given by:
+ *     g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
+ */
 static const int mma8452_scales[3][2] = {
-       {0, 977}, {0, 1953}, {0, 3906}
+       {0, 9577}, {0, 19154}, {0, 38307}
 };
 
 static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
index 39b4cb4..6eba301 100644 (file)
@@ -427,9 +427,12 @@ static int ad799x_write_event_value(struct iio_dev *indio_dev,
        int ret;
        struct ad799x_state *st = iio_priv(indio_dev);
 
+       if (val < 0 || val > RES_MASK(chan->scan_type.realbits))
+               return -EINVAL;
+
        mutex_lock(&indio_dev->mlock);
        ret = ad799x_i2c_write16(st, ad799x_threshold_reg(chan, dir, info),
-               val);
+               val << chan->scan_type.shift);
        mutex_unlock(&indio_dev->mlock);
 
        return ret;
@@ -452,7 +455,8 @@ static int ad799x_read_event_value(struct iio_dev *indio_dev,
        mutex_unlock(&indio_dev->mlock);
        if (ret < 0)
                return ret;
-       *val = valin;
+       *val = (valin >> chan->scan_type.shift) &
+               RES_MASK(chan->scan_type.realbits);
 
        return IIO_VAL_INT;
 }
index 3b5bacd..2b6a9ce 100644 (file)
@@ -510,12 +510,11 @@ static int at91_adc_channel_init(struct iio_dev *idev)
        return idev->num_channels;
 }
 
-static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
+static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
                                             struct at91_adc_trigger *triggers,
                                             const char *trigger_name)
 {
        struct at91_adc_state *st = iio_priv(idev);
-       u8 value = 0;
        int i;
 
        for (i = 0; i < st->trigger_number; i++) {
@@ -528,15 +527,16 @@ static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
                        return -ENOMEM;
 
                if (strcmp(trigger_name, name) == 0) {
-                       value = triggers[i].value;
                        kfree(name);
-                       break;
+                       if (triggers[i].value == 0)
+                               return -EINVAL;
+                       return triggers[i].value;
                }
 
                kfree(name);
        }
 
-       return value;
+       return -EINVAL;
 }
 
 static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
@@ -546,14 +546,14 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
        struct iio_buffer *buffer = idev->buffer;
        struct at91_adc_reg_desc *reg = st->registers;
        u32 status = at91_adc_readl(st, reg->trigger_register);
-       u8 value;
+       int value;
        u8 bit;
 
        value = at91_adc_get_trigger_value_by_name(idev,
                                                   st->trigger_list,
                                                   idev->trig->name);
-       if (value == 0)
-               return -EINVAL;
+       if (value < 0)
+               return value;
 
        if (state) {
                st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL);
index 6989c16..b58d630 100644 (file)
@@ -121,8 +121,8 @@ static int men_z188_probe(struct mcb_device *dev,
        indio_dev->num_channels = ARRAY_SIZE(z188_adc_iio_channels);
 
        mem = mcb_request_mem(dev, "z188-adc");
-       if (!mem)
-               return -ENOMEM;
+       if (IS_ERR(mem))
+               return PTR_ERR(mem);
 
        adc->base = ioremap(mem->start, resource_size(mem));
        if (adc->base == NULL)
index a4db302..d5dc4c6 100644 (file)
@@ -374,7 +374,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
                        return -EAGAIN;
                }
        }
-       map_val = chan->channel + TOTAL_CHANNELS;
+       map_val = adc_dev->channel_step[chan->scan_index];
 
        /*
         * We check the complete FIFO. We programmed just one entry but in case
index 7de1c4c..eb86786 100644 (file)
@@ -645,6 +645,7 @@ int twl4030_get_madc_conversion(int channel_no)
        req.channels = (1 << channel_no);
        req.method = TWL4030_MADC_SW2;
        req.active = 0;
+       req.raw = 0;
        req.func_cb = NULL;
        ret = twl4030_madc_conversion(&req);
        if (ret < 0)
index 73282ce..a3109a6 100644 (file)
@@ -75,6 +75,9 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
                                        (s32)report_val);
        }
 
+       sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
+                                       st->power_state.index,
+                                       &state_val);
        return 0;
 }
 EXPORT_SYMBOL(hid_sensor_power_state);
index 40f4e49..fa034a3 100644 (file)
@@ -110,7 +110,6 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
        struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
        int report_id = -1;
        u32 address;
-       int ret;
        int ret_type;
        s32 poll_value;
 
@@ -151,14 +150,12 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
                ret_type = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SAMP_FREQ:
-               ret = hid_sensor_read_samp_freq_value(
+               ret_type = hid_sensor_read_samp_freq_value(
                        &gyro_state->common_attributes, val, val2);
-                       ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        case IIO_CHAN_INFO_HYSTERESIS:
-               ret = hid_sensor_read_raw_hyst_value(
+               ret_type = hid_sensor_read_raw_hyst_value(
                        &gyro_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        default:
                ret_type = -EINVAL;
index 36b1ae9..9f1a140 100644 (file)
@@ -966,7 +966,7 @@ static int iio_buffer_update_demux(struct iio_dev *indio_dev,
 
        /* Now we have the two masks, work from least sig and build up sizes */
        for_each_set_bit(out_ind,
-                        indio_dev->active_scan_mask,
+                        buffer->scan_mask,
                         indio_dev->masklength) {
                in_ind = find_next_bit(indio_dev->active_scan_mask,
                                       indio_dev->masklength,
index 258a973..bfbf4d4 100644 (file)
@@ -345,6 +345,9 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
                        &indio_dev->event_interface->dev_attr_list);
                kfree(postfix);
 
+               if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
+                       continue;
+
                if (ret)
                        return ret;
 
index d833d55..c749700 100644 (file)
@@ -183,7 +183,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
                else if (name && index >= 0) {
                        pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
                                np->full_name, name ? name : "", index);
-                       return chan;
+                       return NULL;
                }
 
                /*
@@ -193,8 +193,9 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
                 */
                np = np->parent;
                if (np && !of_get_property(np, "io-channel-ranges", NULL))
-                       break;
+                       return NULL;
        }
+
        return chan;
 }
 
@@ -317,6 +318,7 @@ struct iio_channel *iio_channel_get(struct device *dev,
                if (channel != NULL)
                        return channel;
        }
+
        return iio_channel_get_sys(name, channel_name);
 }
 EXPORT_SYMBOL_GPL(iio_channel_get);
index f34c943..96e71e1 100644 (file)
@@ -79,7 +79,6 @@ static int als_read_raw(struct iio_dev *indio_dev,
        struct als_state *als_state = iio_priv(indio_dev);
        int report_id = -1;
        u32 address;
-       int ret;
        int ret_type;
        s32 poll_value;
 
@@ -129,14 +128,12 @@ static int als_read_raw(struct iio_dev *indio_dev,
                ret_type = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SAMP_FREQ:
-               ret = hid_sensor_read_samp_freq_value(
+               ret_type = hid_sensor_read_samp_freq_value(
                                &als_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        case IIO_CHAN_INFO_HYSTERESIS:
-               ret = hid_sensor_read_raw_hyst_value(
+               ret_type = hid_sensor_read_raw_hyst_value(
                                &als_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        default:
                ret_type = -EINVAL;
index d203ef4..412bae8 100644 (file)
@@ -74,7 +74,6 @@ static int prox_read_raw(struct iio_dev *indio_dev,
        struct prox_state *prox_state = iio_priv(indio_dev);
        int report_id = -1;
        u32 address;
-       int ret;
        int ret_type;
        s32 poll_value;
 
@@ -125,14 +124,12 @@ static int prox_read_raw(struct iio_dev *indio_dev,
                ret_type = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SAMP_FREQ:
-               ret = hid_sensor_read_samp_freq_value(
+               ret_type = hid_sensor_read_samp_freq_value(
                                &prox_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        case IIO_CHAN_INFO_HYSTERESIS:
-               ret = hid_sensor_read_raw_hyst_value(
+               ret_type = hid_sensor_read_raw_hyst_value(
                                &prox_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        default:
                ret_type = -EINVAL;
index fe063a0..7525699 100644 (file)
@@ -52,6 +52,7 @@
 
 struct tcs3472_data {
        struct i2c_client *client;
+       struct mutex lock;
        u8 enable;
        u8 control;
        u8 atime;
@@ -116,10 +117,17 @@ static int tcs3472_read_raw(struct iio_dev *indio_dev,
 
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
+               if (iio_buffer_enabled(indio_dev))
+                       return -EBUSY;
+
+               mutex_lock(&data->lock);
                ret = tcs3472_req_data(data);
-               if (ret < 0)
+               if (ret < 0) {
+                       mutex_unlock(&data->lock);
                        return ret;
+               }
                ret = i2c_smbus_read_word_data(data->client, chan->address);
+               mutex_unlock(&data->lock);
                if (ret < 0)
                        return ret;
                *val = ret;
@@ -255,6 +263,7 @@ static int tcs3472_probe(struct i2c_client *client,
        data = iio_priv(indio_dev);
        i2c_set_clientdata(client, indio_dev);
        data->client = client;
+       mutex_init(&data->lock);
 
        indio_dev->dev.parent = &client->dev;
        indio_dev->info = &tcs3472_info;
index 09ea5c4..ea08313 100644 (file)
@@ -373,8 +373,6 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
 {
        struct ak8975_data *data = iio_priv(indio_dev);
        struct i2c_client *client = data->client;
-       u16 meas_reg;
-       s16 raw;
        int ret;
 
        mutex_lock(&data->lock);
@@ -422,16 +420,11 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
                dev_err(&client->dev, "Read axis data fails\n");
                goto exit;
        }
-       meas_reg = ret;
 
        mutex_unlock(&data->lock);
 
-       /* Endian conversion of the measured values. */
-       raw = (s16) (le16_to_cpu(meas_reg));
-
        /* Clamp to valid range. */
-       raw = clamp_t(s16, raw, -4096, 4095);
-       *val = raw;
+       *val = clamp_t(s16, ret, -4096, 4095);
        return IIO_VAL_INT;
 
 exit:
index 41cf29e..b2b0937 100644 (file)
@@ -110,7 +110,6 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
        struct magn_3d_state *magn_state = iio_priv(indio_dev);
        int report_id = -1;
        u32 address;
-       int ret;
        int ret_type;
        s32 poll_value;
 
@@ -153,14 +152,12 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
                ret_type = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SAMP_FREQ:
-               ret = hid_sensor_read_samp_freq_value(
+               ret_type = hid_sensor_read_samp_freq_value(
                        &magn_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        case IIO_CHAN_INFO_HYSTERESIS:
-               ret = hid_sensor_read_raw_hyst_value(
+               ret_type = hid_sensor_read_raw_hyst_value(
                        &magn_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        default:
                ret_type = -EINVAL;
index 1cd190c..2c0d2a4 100644 (file)
@@ -78,7 +78,6 @@ static int press_read_raw(struct iio_dev *indio_dev,
        struct press_state *press_state = iio_priv(indio_dev);
        int report_id = -1;
        u32 address;
-       int ret;
        int ret_type;
        s32 poll_value;
 
@@ -128,14 +127,12 @@ static int press_read_raw(struct iio_dev *indio_dev,
                ret_type = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SAMP_FREQ:
-               ret = hid_sensor_read_samp_freq_value(
+               ret_type = hid_sensor_read_samp_freq_value(
                                &press_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        case IIO_CHAN_INFO_HYSTERESIS:
-               ret = hid_sensor_read_raw_hyst_value(
+               ret_type = hid_sensor_read_raw_hyst_value(
                                &press_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        default:
                ret_type = -EINVAL;
index ba6d0c5..01b2e0b 100644 (file)
@@ -98,7 +98,7 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
                        mutex_unlock(&data->lock);
                        if (ret < 0)
                                return ret;
-                       *val = sign_extend32(be32_to_cpu(tmp) >> 12, 23);
+                       *val = be32_to_cpu(tmp) >> 12;
                        return IIO_VAL_INT;
                case IIO_TEMP: /* in 0.0625 celsius / LSB */
                        mutex_lock(&data->lock);
@@ -112,7 +112,7 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
                        mutex_unlock(&data->lock);
                        if (ret < 0)
                                return ret;
-                       *val = sign_extend32(be32_to_cpu(tmp) >> 20, 15);
+                       *val = sign_extend32(be32_to_cpu(tmp) >> 20, 11);
                        return IIO_VAL_INT;
                default:
                        return -EINVAL;
@@ -185,7 +185,7 @@ static const struct iio_chan_spec mpl3115_channels[] = {
                        BIT(IIO_CHAN_INFO_SCALE),
                .scan_index = 0,
                .scan_type = {
-                       .sign = 's',
+                       .sign = 'u',
                        .realbits = 20,
                        .storagebits = 32,
                        .shift = 12,
index 5e153f6..768a0fb 100644 (file)
@@ -432,8 +432,17 @@ static void arp_failure_discard(void *handle, struct sk_buff *skb)
  */
 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
 {
+       struct c4iw_ep *ep = handle;
+
        printk(KERN_ERR MOD "ARP failure duing connect\n");
        kfree_skb(skb);
+       connect_reply_upcall(ep, -EHOSTUNREACH);
+       state_set(&ep->com, DEAD);
+       remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+       cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
+       dst_release(ep->dst);
+       cxgb4_l2t_release(ep->l2t);
+       c4iw_put_ep(&ep->com);
 }
 
 /*
@@ -658,7 +667,7 @@ static int send_connect(struct c4iw_ep *ep)
                opt2 |= T5_OPT_2_VALID;
                opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
        }
-       t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
+       t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
 
        if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
                if (ep->com.remote_addr.ss_family == AF_INET) {
@@ -2180,7 +2189,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
        PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
        BUG_ON(skb_cloned(skb));
        skb_trim(skb, sizeof(struct cpl_tid_release));
-       skb_get(skb);
        release_tid(&dev->rdev, hwtid, skb);
        return;
 }
@@ -3917,7 +3925,7 @@ int __init c4iw_cm_init(void)
        return 0;
 }
 
-void __exit c4iw_cm_term(void)
+void c4iw_cm_term(void)
 {
        WARN_ON(!list_empty(&timeout_list));
        flush_workqueue(workq);
index dd93aad..7db82b2 100644 (file)
@@ -696,6 +696,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
                pr_err(MOD "error allocating status page\n");
                goto err4;
        }
+       rdev->status_page->db_off = 0;
        return 0;
 err4:
        c4iw_rqtpool_destroy(rdev);
@@ -729,7 +730,6 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
        if (ctx->dev->rdev.oc_mw_kva)
                iounmap(ctx->dev->rdev.oc_mw_kva);
        ib_dealloc_device(&ctx->dev->ibdev);
-       iwpm_exit(RDMA_NL_C4IW);
        ctx->dev = NULL;
 }
 
@@ -826,12 +826,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
                setup_debugfs(devp);
        }
 
-       ret = iwpm_init(RDMA_NL_C4IW);
-       if (ret) {
-               pr_err("port mapper initialization failed with %d\n", ret);
-               ib_dealloc_device(&devp->ibdev);
-               return ERR_PTR(ret);
-       }
 
        return devp;
 }
@@ -1332,6 +1326,15 @@ static int __init c4iw_init_module(void)
                pr_err("%s[%u]: Failed to add netlink callback\n"
                       , __func__, __LINE__);
 
+       err = iwpm_init(RDMA_NL_C4IW);
+       if (err) {
+               pr_err("port mapper initialization failed with %d\n", err);
+               ibnl_remove_client(RDMA_NL_C4IW);
+               c4iw_cm_term();
+               debugfs_remove_recursive(c4iw_debugfs_root);
+               return err;
+       }
+
        cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
 
        return 0;
@@ -1349,6 +1352,7 @@ static void __exit c4iw_exit_module(void)
        }
        mutex_unlock(&dev_mutex);
        cxgb4_unregister_uld(CXGB4_ULD_RDMA);
+       iwpm_exit(RDMA_NL_C4IW);
        ibnl_remove_client(RDMA_NL_C4IW);
        c4iw_cm_term();
        debugfs_remove_recursive(c4iw_debugfs_root);
index 125bc5d..361fff7 100644 (file)
@@ -908,7 +908,7 @@ int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
 int c4iw_register_device(struct c4iw_dev *dev);
 void c4iw_unregister_device(struct c4iw_dev *dev);
 int __init c4iw_cm_init(void);
-void __exit c4iw_cm_term(void);
+void c4iw_cm_term(void);
 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
                               struct c4iw_dev_ucontext *uctx);
 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
index d13ddf1..bbbcf38 100644 (file)
@@ -675,7 +675,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
        int err;
 
        uuari = &dev->mdev.priv.uuari;
-       if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN)
+       if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
                return -EINVAL;
 
        if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
index 1c4c0db..29ca0bb 100644 (file)
@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
 }
 
 static int input_get_disposition(struct input_dev *dev,
-                         unsigned int type, unsigned int code, int value)
+                         unsigned int type, unsigned int code, int *pval)
 {
        int disposition = INPUT_IGNORE_EVENT;
+       int value = *pval;
 
        switch (type) {
 
@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
                break;
        }
 
+       *pval = value;
        return disposition;
 }
 
@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
 {
        int disposition;
 
-       disposition = input_get_disposition(dev, type, code, value);
+       disposition = input_get_disposition(dev, type, code, &value);
 
        if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
                dev->event(dev, type, code, value);
index 758b487..de7be4f 100644 (file)
@@ -215,6 +215,7 @@ static int keyscan_probe(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int keyscan_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -249,6 +250,7 @@ static int keyscan_resume(struct device *dev)
        mutex_unlock(&input->mutex);
        return retval;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
 
index e4104f9..fed5102 100644 (file)
@@ -213,7 +213,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
 
 module_platform_driver(sirfsoc_pwrc_driver);
 
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
 MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
 MODULE_ALIAS("platform:sirfsoc-pwrc");
index ec772d9..ef9e0b8 100644 (file)
@@ -132,7 +132,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                1232, 5710, 1156, 4696
        },
        {
-               (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
+               (const char * const []){"LEN0034", "LEN0036", "LEN2002",
+                                       "LEN2004", NULL},
                1024, 5112, 2024, 4832
        },
        {
@@ -168,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0049",
        "LEN2000",
        "LEN2001", /* Edge E431 */
-       "LEN2002",
+       "LEN2002", /* Edge E531 */
        "LEN2003",
        "LEN2004", /* L440 */
        "LEN2005",
index 381b20d..136b7b2 100644 (file)
@@ -401,6 +401,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
                },
        },
+       {
+               /* Acer Aspire 5710 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
+               },
+       },
        {
                /* Gericom Bellagio */
                .matches = {
index 977d05c..e73cf2c 100644 (file)
@@ -1217,9 +1217,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
                         * a=(pi*r^2)/C.
                         */
                        int a = data[5];
-                       int x_res  = input_abs_get_res(input, ABS_X);
-                       int y_res  = input_abs_get_res(input, ABS_Y);
-                       width  = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
+                       int x_res = input_abs_get_res(input, ABS_MT_POSITION_X);
+                       int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y);
+                       width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
                        height = width * y_res / x_res;
                }
 
@@ -1587,7 +1587,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
                input_abs_set_res(input_dev, ABS_X, features->x_resolution);
                input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
        } else {
-               if (features->touch_max <= 2) {
+               if (features->touch_max == 1) {
                        input_set_abs_params(input_dev, ABS_X, 0,
                                features->x_max, features->x_fuzz, 0);
                        input_set_abs_params(input_dev, ABS_Y, 0,
@@ -1815,14 +1815,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
        case MTTPC:
        case MTTPC_B:
        case TABLETPC2FG:
-               if (features->device_type == BTN_TOOL_FINGER) {
-                       unsigned int flags = INPUT_MT_DIRECT;
-
-                       if (wacom_wac->features.type == TABLETPC2FG)
-                               flags = 0;
-
-                       input_mt_init_slots(input_dev, features->touch_max, flags);
-               }
+               if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
+                       input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
                /* fall through */
 
        case TABLETPC:
@@ -1883,10 +1877,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                        __set_bit(BTN_RIGHT, input_dev->keybit);
 
                        if (features->touch_max) {
-                               /* touch interface */
-                               unsigned int flags = INPUT_MT_POINTER;
-
-                               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
                                if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
                                        input_set_abs_params(input_dev,
                                                     ABS_MT_TOUCH_MAJOR,
@@ -1894,12 +1884,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                                        input_set_abs_params(input_dev,
                                                     ABS_MT_TOUCH_MINOR,
                                                     0, features->y_max, 0, 0);
-                               } else {
-                                       __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
-                                       __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
-                                       flags = 0;
                                }
-                               input_mt_init_slots(input_dev, features->touch_max, flags);
+                               input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
                        } else {
                                /* buttons/keys only interface */
                                __clear_bit(ABS_X, input_dev->absbit);
index 4e793a1..2ce6495 100644 (file)
@@ -359,9 +359,12 @@ static int titsc_parse_dt(struct platform_device *pdev,
         */
        err = of_property_read_u32(node, "ti,coordinate-readouts",
                        &ts_dev->coordinate_readouts);
-       if (err < 0)
+       if (err < 0) {
+               dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n");
                err = of_property_read_u32(node, "ti,coordiante-readouts",
                                &ts_dev->coordinate_readouts);
+       }
+
        if (err < 0)
                return err;
 
index d4daa05..499b436 100644 (file)
@@ -45,7 +45,7 @@ struct pri_queue {
 struct pasid_state {
        struct list_head list;                  /* For global state-list */
        atomic_t count;                         /* Reference count */
-       atomic_t mmu_notifier_count;            /* Counting nested mmu_notifier
+       unsigned mmu_notifier_count;            /* Counting nested mmu_notifier
                                                   calls */
        struct task_struct *task;               /* Task bound to this PASID */
        struct mm_struct *mm;                   /* mm_struct for the faults */
@@ -53,7 +53,8 @@ struct pasid_state {
        struct pri_queue pri[PRI_QUEUE_SIZE];   /* PRI tag states */
        struct device_state *device_state;      /* Link to our device_state */
        int pasid;                              /* PASID index */
-       spinlock_t lock;                        /* Protect pri_queues */
+       spinlock_t lock;                        /* Protect pri_queues and
+                                                  mmu_notifer_count */
        wait_queue_head_t wq;                   /* To wait for count == 0 */
 };
 
@@ -431,15 +432,19 @@ static void mn_invalidate_range_start(struct mmu_notifier *mn,
 {
        struct pasid_state *pasid_state;
        struct device_state *dev_state;
+       unsigned long flags;
 
        pasid_state = mn_to_state(mn);
        dev_state   = pasid_state->device_state;
 
-       if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) {
+       spin_lock_irqsave(&pasid_state->lock, flags);
+       if (pasid_state->mmu_notifier_count == 0) {
                amd_iommu_domain_set_gcr3(dev_state->domain,
                                          pasid_state->pasid,
                                          __pa(empty_page_table));
        }
+       pasid_state->mmu_notifier_count += 1;
+       spin_unlock_irqrestore(&pasid_state->lock, flags);
 }
 
 static void mn_invalidate_range_end(struct mmu_notifier *mn,
@@ -448,15 +453,19 @@ static void mn_invalidate_range_end(struct mmu_notifier *mn,
 {
        struct pasid_state *pasid_state;
        struct device_state *dev_state;
+       unsigned long flags;
 
        pasid_state = mn_to_state(mn);
        dev_state   = pasid_state->device_state;
 
-       if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) {
+       spin_lock_irqsave(&pasid_state->lock, flags);
+       pasid_state->mmu_notifier_count -= 1;
+       if (pasid_state->mmu_notifier_count == 0) {
                amd_iommu_domain_set_gcr3(dev_state->domain,
                                          pasid_state->pasid,
                                          __pa(pasid_state->mm->pgd));
        }
+       spin_unlock_irqrestore(&pasid_state->lock, flags);
 }
 
 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -650,7 +659,6 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
                goto out;
 
        atomic_set(&pasid_state->count, 1);
-       atomic_set(&pasid_state->mmu_notifier_count, 0);
        init_waitqueue_head(&pasid_state->wq);
        spin_lock_init(&pasid_state->lock);
 
index b99dd88..bb446d7 100644 (file)
@@ -170,10 +170,10 @@ int pamu_disable_liodn(int liodn)
 static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
 {
        /* Bug if not a power of 2 */
-       BUG_ON(!is_power_of_2(addrspace_size));
+       BUG_ON((addrspace_size & (addrspace_size - 1)));
 
        /* window size is 2^(WSE+1) bytes */
-       return __ffs(addrspace_size) - 1;
+       return fls64(addrspace_size) - 2;
 }
 
 /* Derive the PAACE window count encoding for the subwindow count */
@@ -351,7 +351,7 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
        struct paace *ppaace;
        unsigned long fspi;
 
-       if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) {
+       if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
                pr_debug("window size too small or not a power of two %llx\n", win_size);
                return -EINVAL;
        }
@@ -464,7 +464,7 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
                return -ENOENT;
        }
 
-       if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) {
+       if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) {
                pr_debug("subwindow size out of range, or not a power of 2\n");
                return -EINVAL;
        }
index 93072ba..af47648 100644 (file)
@@ -301,7 +301,7 @@ static int check_size(u64 size, dma_addr_t iova)
         * Size must be a power of two and at least be equal
         * to PAMU page size.
         */
-       if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) {
+       if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
                pr_debug("%s: size too small or not a power of two\n", __func__);
                return -EINVAL;
        }
@@ -335,11 +335,6 @@ static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
        return domain;
 }
 
-static inline struct device_domain_info *find_domain(struct device *dev)
-{
-       return dev->archdata.iommu_domain;
-}
-
 static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
 {
        unsigned long flags;
@@ -380,7 +375,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
         * Check here if the device is already attached to domain or not.
         * If the device is already attached to a domain detach it.
         */
-       old_domain_info = find_domain(dev);
+       old_domain_info = dev->archdata.iommu_domain;
        if (old_domain_info && old_domain_info->domain != dma_domain) {
                spin_unlock_irqrestore(&device_domain_lock, flags);
                detach_device(dev, old_domain_info->domain);
@@ -399,7 +394,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
         * the info for the first LIODN as all
         * LIODNs share the same domain
         */
-       if (!old_domain_info)
+       if (!dev->archdata.iommu_domain)
                dev->archdata.iommu_domain = info;
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
@@ -1042,12 +1037,15 @@ root_bus:
                        group = get_shared_pci_device_group(pdev);
        }
 
+       if (!group)
+               group = ERR_PTR(-ENODEV);
+
        return group;
 }
 
 static int fsl_pamu_add_device(struct device *dev)
 {
-       struct iommu_group *group = NULL;
+       struct iommu_group *group = ERR_PTR(-ENODEV);
        struct pci_dev *pdev;
        const u32 *prop;
        int ret, len;
@@ -1070,7 +1068,7 @@ static int fsl_pamu_add_device(struct device *dev)
                        group = get_device_iommu_group(dev);
        }
 
-       if (!group || IS_ERR(group))
+       if (IS_ERR(group))
                return PTR_ERR(group);
 
        ret = iommu_group_add_device(group, dev);
index 6bb3277..51b6b77 100644 (file)
@@ -3816,14 +3816,11 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
                                ((void *)rmrr) + rmrr->header.length,
                                rmrr->segment, rmrru->devices,
                                rmrru->devices_cnt);
-                       if (ret > 0)
-                               break;
-                       else if(ret < 0)
+                       if(ret < 0)
                                return ret;
                } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
-                       if (dmar_remove_dev_scope(info, rmrr->segment,
-                               rmrru->devices, rmrru->devices_cnt))
-                               break;
+                       dmar_remove_dev_scope(info, rmrr->segment,
+                               rmrru->devices, rmrru->devices_cnt);
                }
        }
 
index bbb746e..7f0c2a3 100644 (file)
@@ -10,6 +10,11 @@ config ARM_GIC
 config GIC_NON_BANKED
        bool
 
+config ARM_GIC_V3
+       bool
+       select IRQ_DOMAIN
+       select MULTI_IRQ_HANDLER
+
 config ARM_NVIC
        bool
        select IRQ_DOMAIN
index 62a13e5..c57e642 100644 (file)
@@ -15,7 +15,8 @@ obj-$(CONFIG_ORION_IRQCHIP)           += irq-orion.o
 obj-$(CONFIG_ARCH_SUNXI)               += irq-sun4i.o
 obj-$(CONFIG_ARCH_SUNXI)               += irq-sunxi-nmi.o
 obj-$(CONFIG_ARCH_SPEAR3XX)            += spear-shirq.o
-obj-$(CONFIG_ARM_GIC)                  += irq-gic.o
+obj-$(CONFIG_ARM_GIC)                  += irq-gic.o irq-gic-common.o
+obj-$(CONFIG_ARM_GIC_V3)               += irq-gic-v3.o irq-gic-common.o
 obj-$(CONFIG_ARM_NVIC)                 += irq-nvic.o
 obj-$(CONFIG_ARM_VIC)                  += irq-vic.o
 obj-$(CONFIG_IMGPDC_IRQ)               += irq-imgpdc.o
index c887e6e..574aba0 100644 (file)
@@ -334,6 +334,15 @@ static void armada_mpic_send_doorbell(const struct cpumask *mask,
 
 static void armada_xp_mpic_smp_cpu_init(void)
 {
+       u32 control;
+       int nr_irqs, i;
+
+       control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
+       nr_irqs = (control >> 2) & 0x3ff;
+
+       for (i = 0; i < nr_irqs; i++)
+               writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
+
        /* Clear pending IPIs */
        writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
 
@@ -474,7 +483,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
                                             struct device_node *parent)
 {
        struct resource main_int_res, per_cpu_int_res;
-       int parent_irq;
+       int parent_irq, nr_irqs, i;
        u32 control;
 
        BUG_ON(of_address_to_resource(node, 0, &main_int_res));
@@ -496,9 +505,13 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
        BUG_ON(!per_cpu_int_base);
 
        control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
+       nr_irqs = (control >> 2) & 0x3ff;
+
+       for (i = 0; i < nr_irqs; i++)
+               writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
 
        armada_370_xp_mpic_domain =
-               irq_domain_add_linear(node, (control >> 2) & 0x3ff,
+               irq_domain_add_linear(node, nr_irqs,
                                &armada_370_xp_mpic_irq_ops, NULL);
 
        BUG_ON(!armada_370_xp_mpic_domain);
index 8ee2a36..c15c840 100644 (file)
@@ -150,7 +150,7 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
 
        /* Allocate a single Generic IRQ chip for this node */
        ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
-                               np->full_name, handle_level_irq, clr, 0, 0);
+                               np->full_name, handle_edge_irq, clr, 0, 0);
        if (ret) {
                pr_err("failed to allocate generic irq chip\n");
                goto out_free_domain;
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
new file mode 100644 (file)
index 0000000..60ac704
--- /dev/null
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip/arm-gic.h>
+
+#include "irq-gic-common.h"
+
+void gic_configure_irq(unsigned int irq, unsigned int type,
+                      void __iomem *base, void (*sync_access)(void))
+{
+       u32 enablemask = 1 << (irq % 32);
+       u32 enableoff = (irq / 32) * 4;
+       u32 confmask = 0x2 << ((irq % 16) * 2);
+       u32 confoff = (irq / 16) * 4;
+       bool enabled = false;
+       u32 val;
+
+       /*
+        * Read current configuration register, and insert the config
+        * for "irq", depending on "type".
+        */
+       val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
+       if (type == IRQ_TYPE_LEVEL_HIGH)
+               val &= ~confmask;
+       else if (type == IRQ_TYPE_EDGE_RISING)
+               val |= confmask;
+
+       /*
+        * As recommended by the spec, disable the interrupt before changing
+        * the configuration
+        */
+       if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
+               writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
+               if (sync_access)
+                       sync_access();
+               enabled = true;
+       }
+
+       /*
+        * Write back the new configuration, and possibly re-enable
+        * the interrupt.
+        */
+       writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
+
+       if (enabled)
+               writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+
+       if (sync_access)
+               sync_access();
+}
+
+void __init gic_dist_config(void __iomem *base, int gic_irqs,
+                           void (*sync_access)(void))
+{
+       unsigned int i;
+
+       /*
+        * Set all global interrupts to be level triggered, active low.
+        */
+       for (i = 32; i < gic_irqs; i += 16)
+               writel_relaxed(0, base + GIC_DIST_CONFIG + i / 4);
+
+       /*
+        * Set priority on all global interrupts.
+        */
+       for (i = 32; i < gic_irqs; i += 4)
+               writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i);
+
+       /*
+        * Disable all interrupts.  Leave the PPI and SGIs alone
+        * as they are enabled by redistributor registers.
+        */
+       for (i = 32; i < gic_irqs; i += 32)
+               writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i / 8);
+
+       if (sync_access)
+               sync_access();
+}
+
+void gic_cpu_config(void __iomem *base, void (*sync_access)(void))
+{
+       int i;
+
+       /*
+        * Deal with the banked PPI and SGI interrupts - disable all
+        * PPI interrupts, ensure all SGI interrupts are enabled.
+        */
+       writel_relaxed(0xffff0000, base + GIC_DIST_ENABLE_CLEAR);
+       writel_relaxed(0x0000ffff, base + GIC_DIST_ENABLE_SET);
+
+       /*
+        * Set priority on PPI and SGI interrupts
+        */
+       for (i = 0; i < 32; i += 4)
+               writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
+
+       if (sync_access)
+               sync_access();
+}
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
new file mode 100644 (file)
index 0000000..b41f024
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2002 ARM Limited, All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _IRQ_GIC_COMMON_H
+#define _IRQ_GIC_COMMON_H
+
+#include <linux/of.h>
+#include <linux/irqdomain.h>
+
+void gic_configure_irq(unsigned int irq, unsigned int type,
+                       void __iomem *base, void (*sync_access)(void));
+void gic_dist_config(void __iomem *base, int gic_irqs,
+                    void (*sync_access)(void));
+void gic_cpu_config(void __iomem *base, void (*sync_access)(void));
+
+#endif /* _IRQ_GIC_COMMON_H */
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
new file mode 100644 (file)
index 0000000..57eaa5a
--- /dev/null
@@ -0,0 +1,692 @@
+/*
+ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include <asm/cputype.h>
+#include <asm/exception.h>
+#include <asm/smp_plat.h>
+
+#include "irq-gic-common.h"
+#include "irqchip.h"
+
+struct gic_chip_data {
+       void __iomem            *dist_base;
+       void __iomem            **redist_base;
+       void __percpu __iomem   **rdist;
+       struct irq_domain       *domain;
+       u64                     redist_stride;
+       u32                     redist_regions;
+       unsigned int            irq_nr;
+};
+
+static struct gic_chip_data gic_data __read_mostly;
+
+#define gic_data_rdist()               (this_cpu_ptr(gic_data.rdist))
+#define gic_data_rdist_rd_base()       (*gic_data_rdist())
+#define gic_data_rdist_sgi_base()      (gic_data_rdist_rd_base() + SZ_64K)
+
+/* Our default, arbitrary priority value. Linux only uses one anyway. */
+#define DEFAULT_PMR_VALUE      0xf0
+
+static inline unsigned int gic_irq(struct irq_data *d)
+{
+       return d->hwirq;
+}
+
+static inline int gic_irq_in_rdist(struct irq_data *d)
+{
+       return gic_irq(d) < 32;
+}
+
+static inline void __iomem *gic_dist_base(struct irq_data *d)
+{
+       if (gic_irq_in_rdist(d))        /* SGI+PPI -> SGI_base for this CPU */
+               return gic_data_rdist_sgi_base();
+
+       if (d->hwirq <= 1023)           /* SPI -> dist_base */
+               return gic_data.dist_base;
+
+       if (d->hwirq >= 8192)
+               BUG();          /* LPI Detected!!! */
+
+       return NULL;
+}
+
+static void gic_do_wait_for_rwp(void __iomem *base)
+{
+       u32 count = 1000000;    /* 1s! */
+
+       while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) {
+               count--;
+               if (!count) {
+                       pr_err_ratelimited("RWP timeout, gone fishing\n");
+                       return;
+               }
+               cpu_relax();
+               udelay(1);
+       };
+}
+
+/* Wait for completion of a distributor change */
+static void gic_dist_wait_for_rwp(void)
+{
+       gic_do_wait_for_rwp(gic_data.dist_base);
+}
+
+/* Wait for completion of a redistributor change */
+static void gic_redist_wait_for_rwp(void)
+{
+       gic_do_wait_for_rwp(gic_data_rdist_rd_base());
+}
+
+/* Low level accessors */
+static u64 gic_read_iar(void)
+{
+       u64 irqstat;
+
+       asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
+       return irqstat;
+}
+
+static void gic_write_pmr(u64 val)
+{
+       asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val));
+}
+
+static void gic_write_ctlr(u64 val)
+{
+       asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val));
+       isb();
+}
+
+static void gic_write_grpen1(u64 val)
+{
+       asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val));
+       isb();
+}
+
+static void gic_write_sgi1r(u64 val)
+{
+       asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
+}
+
+static void gic_enable_sre(void)
+{
+       u64 val;
+
+       asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
+       val |= ICC_SRE_EL1_SRE;
+       asm volatile("msr_s " __stringify(ICC_SRE_EL1) ", %0" : : "r" (val));
+       isb();
+
+       /*
+        * Need to check that the SRE bit has actually been set. If
+        * not, it means that SRE is disabled at EL2. We're going to
+        * die painfully, and there is nothing we can do about it.
+        *
+        * Kindly inform the luser.
+        */
+       asm volatile("mrs_s %0, " __stringify(ICC_SRE_EL1) : "=r" (val));
+       if (!(val & ICC_SRE_EL1_SRE))
+               pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
+}
+
+static void gic_enable_redist(void)
+{
+       void __iomem *rbase;
+       u32 count = 1000000;    /* 1s! */
+       u32 val;
+
+       rbase = gic_data_rdist_rd_base();
+
+       /* Wake up this CPU redistributor */
+       val = readl_relaxed(rbase + GICR_WAKER);
+       val &= ~GICR_WAKER_ProcessorSleep;
+       writel_relaxed(val, rbase + GICR_WAKER);
+
+       while (readl_relaxed(rbase + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) {
+               count--;
+               if (!count) {
+                       pr_err_ratelimited("redist didn't wake up...\n");
+                       return;
+               }
+               cpu_relax();
+               udelay(1);
+       };
+}
+
+/*
+ * Routines to disable, enable, EOI and route interrupts
+ */
+static void gic_poke_irq(struct irq_data *d, u32 offset)
+{
+       u32 mask = 1 << (gic_irq(d) % 32);
+       void (*rwp_wait)(void);
+       void __iomem *base;
+
+       if (gic_irq_in_rdist(d)) {
+               base = gic_data_rdist_sgi_base();
+               rwp_wait = gic_redist_wait_for_rwp;
+       } else {
+               base = gic_data.dist_base;
+               rwp_wait = gic_dist_wait_for_rwp;
+       }
+
+       writel_relaxed(mask, base + offset + (gic_irq(d) / 32) * 4);
+       rwp_wait();
+}
+
+static int gic_peek_irq(struct irq_data *d, u32 offset)
+{
+       u32 mask = 1 << (gic_irq(d) % 32);
+       void __iomem *base;
+
+       if (gic_irq_in_rdist(d))
+               base = gic_data_rdist_sgi_base();
+       else
+               base = gic_data.dist_base;
+
+       return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
+}
+
+static void gic_mask_irq(struct irq_data *d)
+{
+       gic_poke_irq(d, GICD_ICENABLER);
+}
+
+static void gic_unmask_irq(struct irq_data *d)
+{
+       gic_poke_irq(d, GICD_ISENABLER);
+}
+
+static void gic_eoi_irq(struct irq_data *d)
+{
+       gic_write_eoir(gic_irq(d));
+}
+
+static int gic_set_type(struct irq_data *d, unsigned int type)
+{
+       unsigned int irq = gic_irq(d);
+       void (*rwp_wait)(void);
+       void __iomem *base;
+
+       /* Interrupt configuration for SGIs can't be changed */
+       if (irq < 16)
+               return -EINVAL;
+
+       if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
+               return -EINVAL;
+
+       if (gic_irq_in_rdist(d)) {
+               base = gic_data_rdist_sgi_base();
+               rwp_wait = gic_redist_wait_for_rwp;
+       } else {
+               base = gic_data.dist_base;
+               rwp_wait = gic_dist_wait_for_rwp;
+       }
+
+       gic_configure_irq(irq, type, base, rwp_wait);
+
+       return 0;
+}
+
+static u64 gic_mpidr_to_affinity(u64 mpidr)
+{
+       u64 aff;
+
+       aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
+              MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
+              MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8  |
+              MPIDR_AFFINITY_LEVEL(mpidr, 0));
+
+       return aff;
+}
+
+static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+       u64 irqnr;
+
+       do {
+               irqnr = gic_read_iar();
+
+               if (likely(irqnr > 15 && irqnr < 1020)) {
+                       u64 irq = irq_find_mapping(gic_data.domain, irqnr);
+                       if (likely(irq)) {
+                               handle_IRQ(irq, regs);
+                               continue;
+                       }
+
+                       WARN_ONCE(true, "Unexpected SPI received!\n");
+                       gic_write_eoir(irqnr);
+               }
+               if (irqnr < 16) {
+                       gic_write_eoir(irqnr);
+#ifdef CONFIG_SMP
+                       handle_IPI(irqnr, regs);
+#else
+                       WARN_ONCE(true, "Unexpected SGI received!\n");
+#endif
+                       continue;
+               }
+       } while (irqnr != ICC_IAR1_EL1_SPURIOUS);
+}
+
+static void __init gic_dist_init(void)
+{
+       unsigned int i;
+       u64 affinity;
+       void __iomem *base = gic_data.dist_base;
+
+       /* Disable the distributor */
+       writel_relaxed(0, base + GICD_CTLR);
+       gic_dist_wait_for_rwp();
+
+       gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
+
+       /* Enable distributor with ARE, Group1 */
+       writel_relaxed(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1,
+                      base + GICD_CTLR);
+
+       /*
+        * Set all global interrupts to the boot CPU only. ARE must be
+        * enabled.
+        */
+       affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id()));
+       for (i = 32; i < gic_data.irq_nr; i++)
+               writeq_relaxed(affinity, base + GICD_IROUTER + i * 8);
+}
+
+static int gic_populate_rdist(void)
+{
+       u64 mpidr = cpu_logical_map(smp_processor_id());
+       u64 typer;
+       u32 aff;
+       int i;
+
+       /*
+        * Convert affinity to a 32bit value that can be matched to
+        * GICR_TYPER bits [63:32].
+        */
+       aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
+              MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
+              MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
+              MPIDR_AFFINITY_LEVEL(mpidr, 0));
+
+       for (i = 0; i < gic_data.redist_regions; i++) {
+               void __iomem *ptr = gic_data.redist_base[i];
+               u32 reg;
+
+               reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
+               if (reg != GIC_PIDR2_ARCH_GICv3 &&
+                   reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
+                       pr_warn("No redistributor present @%p\n", ptr);
+                       break;
+               }
+
+               do {
+                       typer = readq_relaxed(ptr + GICR_TYPER);
+                       if ((typer >> 32) == aff) {
+                               gic_data_rdist_rd_base() = ptr;
+                               pr_info("CPU%d: found redistributor %llx @%p\n",
+                                       smp_processor_id(),
+                                       (unsigned long long)mpidr, ptr);
+                               return 0;
+                       }
+
+                       if (gic_data.redist_stride) {
+                               ptr += gic_data.redist_stride;
+                       } else {
+                               ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
+                               if (typer & GICR_TYPER_VLPIS)
+                                       ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
+                       }
+               } while (!(typer & GICR_TYPER_LAST));
+       }
+
+       /* We couldn't even deal with ourselves... */
+       WARN(true, "CPU%d: mpidr %llx has no re-distributor!\n",
+            smp_processor_id(), (unsigned long long)mpidr);
+       return -ENODEV;
+}
+
+static void gic_cpu_init(void)
+{
+       void __iomem *rbase;
+
+       /* Register ourselves with the rest of the world */
+       if (gic_populate_rdist())
+               return;
+
+       gic_enable_redist();
+
+       rbase = gic_data_rdist_sgi_base();
+
+       gic_cpu_config(rbase, gic_redist_wait_for_rwp);
+
+       /* Enable system registers */
+       gic_enable_sre();
+
+       /* Set priority mask register */
+       gic_write_pmr(DEFAULT_PMR_VALUE);
+
+       /* EOI deactivates interrupt too (mode 0) */
+       gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
+
+       /* ... and let's hit the road... */
+       gic_write_grpen1(1);
+}
+
+#ifdef CONFIG_SMP
+static int gic_secondary_init(struct notifier_block *nfb,
+                             unsigned long action, void *hcpu)
+{
+       if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
+               gic_cpu_init();
+       return NOTIFY_OK;
+}
+
+/*
+ * Notifier for enabling the GIC CPU interface. Set an arbitrarily high
+ * priority because the GIC needs to be up before the ARM generic timers.
+ */
+static struct notifier_block gic_cpu_notifier = {
+       .notifier_call = gic_secondary_init,
+       .priority = 100,
+};
+
+static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
+                                  u64 cluster_id)
+{
+       int cpu = *base_cpu;
+       u64 mpidr = cpu_logical_map(cpu);
+       u16 tlist = 0;
+
+       while (cpu < nr_cpu_ids) {
+               /*
+                * If we ever get a cluster of more than 16 CPUs, just
+                * scream and skip that CPU.
+                */
+               if (WARN_ON((mpidr & 0xff) >= 16))
+                       goto out;
+
+               tlist |= 1 << (mpidr & 0xf);
+
+               cpu = cpumask_next(cpu, mask);
+               if (cpu == nr_cpu_ids)
+                       goto out;
+
+               mpidr = cpu_logical_map(cpu);
+
+               if (cluster_id != (mpidr & ~0xffUL)) {
+                       cpu--;
+                       goto out;
+               }
+       }
+out:
+       *base_cpu = cpu;
+       return tlist;
+}
+
+static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
+{
+       u64 val;
+
+       val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48        |
+              MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32        |
+              irq << 24                                        |
+              MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16        |
+              tlist);
+
+       pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
+       gic_write_sgi1r(val);
+}
+
+static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+{
+       int cpu;
+
+       if (WARN_ON(irq >= 16))
+               return;
+
+       /*
+        * Ensure that stores to Normal memory are visible to the
+        * other CPUs before issuing the IPI.
+        */
+       smp_wmb();
+
+       for_each_cpu_mask(cpu, *mask) {
+               u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
+               u16 tlist;
+
+               tlist = gic_compute_target_list(&cpu, mask, cluster_id);
+               gic_send_sgi(cluster_id, tlist, irq);
+       }
+
+       /* Force the above writes to ICC_SGI1R_EL1 to be executed */
+       isb();
+}
+
+static void gic_smp_init(void)
+{
+       set_smp_cross_call(gic_raise_softirq);
+       register_cpu_notifier(&gic_cpu_notifier);
+}
+
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+                           bool force)
+{
+       unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       void __iomem *reg;
+       int enabled;
+       u64 val;
+
+       if (gic_irq_in_rdist(d))
+               return -EINVAL;
+
+       /* If interrupt was enabled, disable it first */
+       enabled = gic_peek_irq(d, GICD_ISENABLER);
+       if (enabled)
+               gic_mask_irq(d);
+
+       reg = gic_dist_base(d) + GICD_IROUTER + (gic_irq(d) * 8);
+       val = gic_mpidr_to_affinity(cpu_logical_map(cpu));
+
+       writeq_relaxed(val, reg);
+
+       /*
+        * If the interrupt was enabled, enabled it again. Otherwise,
+        * just wait for the distributor to have digested our changes.
+        */
+       if (enabled)
+               gic_unmask_irq(d);
+       else
+               gic_dist_wait_for_rwp();
+
+       return IRQ_SET_MASK_OK;
+}
+#else
+#define gic_set_affinity       NULL
+#define gic_smp_init()         do { } while(0)
+#endif
+
+static struct irq_chip gic_chip = {
+       .name                   = "GICv3",
+       .irq_mask               = gic_mask_irq,
+       .irq_unmask             = gic_unmask_irq,
+       .irq_eoi                = gic_eoi_irq,
+       .irq_set_type           = gic_set_type,
+       .irq_set_affinity       = gic_set_affinity,
+};
+
+static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
+                             irq_hw_number_t hw)
+{
+       /* SGIs are private to the core kernel */
+       if (hw < 16)
+               return -EPERM;
+       /* PPIs */
+       if (hw < 32) {
+               irq_set_percpu_devid(irq);
+               irq_set_chip_and_handler(irq, &gic_chip,
+                                        handle_percpu_devid_irq);
+               set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
+       }
+       /* SPIs */
+       if (hw >= 32 && hw < gic_data.irq_nr) {
+               irq_set_chip_and_handler(irq, &gic_chip,
+                                        handle_fasteoi_irq);
+               set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+       }
+       irq_set_chip_data(irq, d->host_data);
+       return 0;
+}
+
+static int gic_irq_domain_xlate(struct irq_domain *d,
+                               struct device_node *controller,
+                               const u32 *intspec, unsigned int intsize,
+                               unsigned long *out_hwirq, unsigned int *out_type)
+{
+       if (d->of_node != controller)
+               return -EINVAL;
+       if (intsize < 3)
+               return -EINVAL;
+
+       switch(intspec[0]) {
+       case 0:                 /* SPI */
+               *out_hwirq = intspec[1] + 32;
+               break;
+       case 1:                 /* PPI */
+               *out_hwirq = intspec[1] + 16;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
+       return 0;
+}
+
+static const struct irq_domain_ops gic_irq_domain_ops = {
+       .map = gic_irq_domain_map,
+       .xlate = gic_irq_domain_xlate,
+};
+
+static int __init gic_of_init(struct device_node *node, struct device_node *parent)
+{
+       void __iomem *dist_base;
+       void __iomem **redist_base;
+       u64 redist_stride;
+       u32 redist_regions;
+       u32 reg;
+       int gic_irqs;
+       int err;
+       int i;
+
+       dist_base = of_iomap(node, 0);
+       if (!dist_base) {
+               pr_err("%s: unable to map gic dist registers\n",
+                       node->full_name);
+               return -ENXIO;
+       }
+
+       reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
+       if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) {
+               pr_err("%s: no distributor detected, giving up\n",
+                       node->full_name);
+               err = -ENODEV;
+               goto out_unmap_dist;
+       }
+
+       if (of_property_read_u32(node, "#redistributor-regions", &redist_regions))
+               redist_regions = 1;
+
+       redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL);
+       if (!redist_base) {
+               err = -ENOMEM;
+               goto out_unmap_dist;
+       }
+
+       for (i = 0; i < redist_regions; i++) {
+               redist_base[i] = of_iomap(node, 1 + i);
+               if (!redist_base[i]) {
+                       pr_err("%s: couldn't map region %d\n",
+                              node->full_name, i);
+                       err = -ENODEV;
+                       goto out_unmap_rdist;
+               }
+       }
+
+       if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
+               redist_stride = 0;
+
+       gic_data.dist_base = dist_base;
+       gic_data.redist_base = redist_base;
+       gic_data.redist_regions = redist_regions;
+       gic_data.redist_stride = redist_stride;
+
+       /*
+        * Find out how many interrupts are supported.
+        * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI)
+        */
+       gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f;
+       gic_irqs = (gic_irqs + 1) * 32;
+       if (gic_irqs > 1020)
+               gic_irqs = 1020;
+       gic_data.irq_nr = gic_irqs;
+
+       gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops,
+                                             &gic_data);
+       gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist));
+
+       if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) {
+               err = -ENOMEM;
+               goto out_free;
+       }
+
+       set_handle_irq(gic_handle_irq);
+
+       gic_smp_init();
+       gic_dist_init();
+       gic_cpu_init();
+
+       return 0;
+
+out_free:
+       if (gic_data.domain)
+               irq_domain_remove(gic_data.domain);
+       free_percpu(gic_data.rdist);
+out_unmap_rdist:
+       for (i = 0; i < redist_regions; i++)
+               if (redist_base[i])
+                       iounmap(redist_base[i]);
+       kfree(redist_base);
+out_unmap_dist:
+       iounmap(dist_base);
+       return err;
+}
+
+IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
index 7e11c9d..9c1f883 100644 (file)
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqchip/arm-gic.h>
 
+#include <asm/cputype.h>
 #include <asm/irq.h>
 #include <asm/exception.h>
 #include <asm/smp_plat.h>
 
+#include "irq-gic-common.h"
 #include "irqchip.h"
 
 union gic_base {
@@ -188,12 +190,6 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
 {
        void __iomem *base = gic_dist_base(d);
        unsigned int gicirq = gic_irq(d);
-       u32 enablemask = 1 << (gicirq % 32);
-       u32 enableoff = (gicirq / 32) * 4;
-       u32 confmask = 0x2 << ((gicirq % 16) * 2);
-       u32 confoff = (gicirq / 16) * 4;
-       bool enabled = false;
-       u32 val;
 
        /* Interrupt configuration for SGIs can't be changed */
        if (gicirq < 16)
@@ -207,25 +203,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
        if (gic_arch_extn.irq_set_type)
                gic_arch_extn.irq_set_type(d, type);
 
-       val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
-       if (type == IRQ_TYPE_LEVEL_HIGH)
-               val &= ~confmask;
-       else if (type == IRQ_TYPE_EDGE_RISING)
-               val |= confmask;
-
-       /*
-        * As recommended by the spec, disable the interrupt before changing
-        * the configuration
-        */
-       if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
-               writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
-               enabled = true;
-       }
-
-       writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
-
-       if (enabled)
-               writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
+       gic_configure_irq(gicirq, type, base, NULL);
 
        raw_spin_unlock(&irq_controller_lock);
 
@@ -386,12 +364,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
 
        writel_relaxed(0, base + GIC_DIST_CTRL);
 
-       /*
-        * Set all global interrupts to be level triggered, active low.
-        */
-       for (i = 32; i < gic_irqs; i += 16)
-               writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
-
        /*
         * Set all global interrupts to this CPU only.
         */
@@ -401,18 +373,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
        for (i = 32; i < gic_irqs; i += 4)
                writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
 
-       /*
-        * Set priority on all global interrupts.
-        */
-       for (i = 32; i < gic_irqs; i += 4)
-               writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
-
-       /*
-        * Disable all interrupts.  Leave the PPI and SGIs alone
-        * as these enables are banked registers.
-        */
-       for (i = 32; i < gic_irqs; i += 32)
-               writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
+       gic_dist_config(base, gic_irqs, NULL);
 
        writel_relaxed(1, base + GIC_DIST_CTRL);
 }
@@ -439,18 +400,7 @@ static void gic_cpu_init(struct gic_chip_data *gic)
                if (i != cpu)
                        gic_cpu_map[i] &= ~cpu_mask;
 
-       /*
-        * Deal with the banked PPI and SGI interrupts - disable all
-        * PPI interrupts, ensure all SGI interrupts are enabled.
-        */
-       writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
-       writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
-
-       /*
-        * Set priority on PPI and SGI interrupts
-        */
-       for (i = 0; i < 32; i += 4)
-               writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
+       gic_cpu_config(dist_base, NULL);
 
        writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
        writel_relaxed(1, base + GIC_CPU_CTRL);
@@ -954,7 +904,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
                }
 
                for_each_possible_cpu(cpu) {
-                       unsigned long offset = percpu_offset * cpu_logical_map(cpu);
+                       u32 mpidr = cpu_logical_map(cpu);
+                       u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+                       unsigned long offset = percpu_offset * core_id;
                        *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
                        *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
                }
@@ -1071,8 +1023,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
        gic_cnt++;
        return 0;
 }
+IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
 
index 3fdda3a..6ce6bd3 100644 (file)
@@ -125,7 +125,7 @@ static struct spear_shirq spear320_shirq_ras2 = {
 };
 
 static struct spear_shirq spear320_shirq_ras3 = {
-       .irq_nr = 3,
+       .irq_nr = 7,
        .irq_bit_off = 0,
        .invalid_irq = 1,
        .regs = {
index c44950d..b7ae0a0 100644 (file)
@@ -2400,6 +2400,7 @@ allocerr:
 error:
        freeurbs(cs);
        usb_set_intfdata(interface, NULL);
+       usb_put_dev(udev);
        gigaset_freecs(cs);
        return rc;
 }
index d9edcc9..97465ac 100644 (file)
@@ -16,7 +16,7 @@ config ISDN_DRV_HISAX
          also to the configuration option of the driver for your particular
          card, below.
 
-if ISDN_DRV_HISAX!=n
+if ISDN_DRV_HISAX
 
 comment "D-channel protocol features"
 
@@ -348,10 +348,6 @@ config HISAX_ENTERNOW_PCI
          This enables HiSax support for the Formula-n enter:now PCI
          ISDN card.
 
-endif
-
-if ISDN_DRV_HISAX
-
 config HISAX_DEBUG
        bool "HiSax debugging"
        help
@@ -420,11 +416,6 @@ config HISAX_FRITZ_PCIPNP
          (the latter also needs you to select "ISA Plug and Play support"
          from the menu "Plug and Play configuration")
 
-config HISAX_AVM_A1_PCMCIA
-       bool
-       depends on HISAX_AVM_A1_CS
-       default y
-
 endif
 
 endmenu
index 0df6691..8dc791b 100644 (file)
@@ -2059,13 +2059,17 @@ static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic)
                        memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */
                        l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */
 
-                       if (ic->parm.ni1_io.timeout > 0)
-                               if (!(pc = ni1_new_l3_process(st, -1)))
-                               { free_invoke_id(st, id);
+                       if (ic->parm.ni1_io.timeout > 0) {
+                               pc = ni1_new_l3_process(st, -1);
+                               if (!pc) {
+                                       free_invoke_id(st, id);
                                        return (-2);
                                }
-                       pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; /* remember id */
-                       pc->prot.ni1.proc = ic->parm.ni1_io.proc; /* and procedure */
+                               /* remember id */
+                               pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id;
+                               /* and procedure */
+                               pc->prot.ni1.proc = ic->parm.ni1_io.proc;
+                       }
 
                        if (!(skb = l3_alloc_skb(l)))
                        { free_invoke_id(st, id);
index 61ac632..62f0688 100644 (file)
@@ -442,7 +442,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
 {
        struct sock_fprog uprog;
        struct sock_filter *code = NULL;
-       int len, err;
+       int len;
 
        if (copy_from_user(&uprog, arg, sizeof(uprog)))
                return -EFAULT;
@@ -458,12 +458,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
        if (IS_ERR(code))
                return PTR_ERR(code);
 
-       err = sk_chk_filter(code, uprog.len);
-       if (err) {
-               kfree(code);
-               return err;
-       }
-
        *p = code;
        return uprog.len;
 }
@@ -644,9 +638,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
                fprog.len = len;
                fprog.filter = code;
 
-               if (is->pass_filter)
+               if (is->pass_filter) {
                        sk_unattached_filter_destroy(is->pass_filter);
-               err = sk_unattached_filter_create(&is->pass_filter, &fprog);
+                       is->pass_filter = NULL;
+               }
+               if (fprog.filter != NULL)
+                       err = sk_unattached_filter_create(&is->pass_filter,
+                                                         &fprog);
+               else
+                       err = 0;
                kfree(code);
 
                return err;
@@ -663,9 +663,15 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
                fprog.len = len;
                fprog.filter = code;
 
-               if (is->active_filter)
+               if (is->active_filter) {
                        sk_unattached_filter_destroy(is->active_filter);
-               err = sk_unattached_filter_create(&is->active_filter, &fprog);
+                       is->active_filter = NULL;
+               }
+               if (fprog.filter != NULL)
+                       err = sk_unattached_filter_create(&is->active_filter,
+                                                         &fprog);
+               else
+                       err = 0;
                kfree(code);
 
                return err;
index 23b4a3b..4eab93a 100644 (file)
@@ -1257,7 +1257,8 @@ static unsigned int smu_fpoll(struct file *file, poll_table *wait)
                if (pp->busy && pp->cmd.status != 1)
                        mask |= POLLIN;
                spin_unlock_irqrestore(&pp->lock, flags);
-       } if (pp->mode == smu_file_events) {
+       }
+       if (pp->mode == smu_file_events) {
                /* Not yet implemented */
        }
        return mask;
index 4e84095..ab472c5 100644 (file)
@@ -614,16 +614,6 @@ static void write_endio(struct bio *bio, int error)
        wake_up_bit(&b->state, B_WRITING);
 }
 
-/*
- * This function is called when wait_on_bit is actually waiting.
- */
-static int do_io_schedule(void *word)
-{
-       io_schedule();
-
-       return 0;
-}
-
 /*
  * Initiate a write on a dirty buffer, but don't wait for it.
  *
@@ -640,8 +630,7 @@ static void __write_dirty_buffer(struct dm_buffer *b,
                return;
 
        clear_bit(B_DIRTY, &b->state);
-       wait_on_bit_lock(&b->state, B_WRITING,
-                        do_io_schedule, TASK_UNINTERRUPTIBLE);
+       wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
 
        if (!write_list)
                submit_io(b, WRITE, b->block, write_endio);
@@ -675,9 +664,9 @@ static void __make_buffer_clean(struct dm_buffer *b)
        if (!b->state)  /* fast case */
                return;
 
-       wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
+       wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
        __write_dirty_buffer(b, NULL);
-       wait_on_bit(&b->state, B_WRITING, do_io_schedule, TASK_UNINTERRUPTIBLE);
+       wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
 }
 
 /*
@@ -1030,7 +1019,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
        if (need_submit)
                submit_io(b, READ, b->block, read_endio);
 
-       wait_on_bit(&b->state, B_READING, do_io_schedule, TASK_UNINTERRUPTIBLE);
+       wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
 
        if (b->read_error) {
                int error = b->read_error;
@@ -1209,15 +1198,13 @@ again:
                                dropped_lock = 1;
                                b->hold_count++;
                                dm_bufio_unlock(c);
-                               wait_on_bit(&b->state, B_WRITING,
-                                           do_io_schedule,
-                                           TASK_UNINTERRUPTIBLE);
+                               wait_on_bit_io(&b->state, B_WRITING,
+                                              TASK_UNINTERRUPTIBLE);
                                dm_bufio_lock(c);
                                b->hold_count--;
                        } else
-                               wait_on_bit(&b->state, B_WRITING,
-                                           do_io_schedule,
-                                           TASK_UNINTERRUPTIBLE);
+                               wait_on_bit_io(&b->state, B_WRITING,
+                                              TASK_UNINTERRUPTIBLE);
                }
 
                if (!test_bit(B_DIRTY, &b->state) &&
@@ -1321,15 +1308,15 @@ retry:
 
        __write_dirty_buffer(b, NULL);
        if (b->hold_count == 1) {
-               wait_on_bit(&b->state, B_WRITING,
-                           do_io_schedule, TASK_UNINTERRUPTIBLE);
+               wait_on_bit_io(&b->state, B_WRITING,
+                              TASK_UNINTERRUPTIBLE);
                set_bit(B_DIRTY, &b->state);
                __unlink_buffer(b);
                __link_buffer(b, new_block, LIST_DIRTY);
        } else {
                sector_t old_block;
-               wait_on_bit_lock(&b->state, B_WRITING,
-                                do_io_schedule, TASK_UNINTERRUPTIBLE);
+               wait_on_bit_lock_io(&b->state, B_WRITING,
+                                   TASK_UNINTERRUPTIBLE);
                /*
                 * Relink buffer to "new_block" so that write_callback
                 * sees "new_block" as a block number.
@@ -1341,8 +1328,8 @@ retry:
                __unlink_buffer(b);
                __link_buffer(b, new_block, b->list_mode);
                submit_io(b, WRITE, new_block, write_endio);
-               wait_on_bit(&b->state, B_WRITING,
-                           do_io_schedule, TASK_UNINTERRUPTIBLE);
+               wait_on_bit_io(&b->state, B_WRITING,
+                              TASK_UNINTERRUPTIBLE);
                __unlink_buffer(b);
                __link_buffer(b, old_block, b->list_mode);
        }
@@ -1541,7 +1528,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
        BUG_ON(block_size < 1 << SECTOR_SHIFT ||
               (block_size & (block_size - 1)));
 
-       c = kmalloc(sizeof(*c), GFP_KERNEL);
+       c = kzalloc(sizeof(*c), GFP_KERNEL);
        if (!c) {
                r = -ENOMEM;
                goto bad_client;
index 4ead4ba..d2899e7 100644 (file)
@@ -425,6 +425,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
 
        disk_super = dm_block_data(sblock);
 
+       /* Verify the data block size hasn't changed */
+       if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
+               DMERR("changing the data block size (from %u to %llu) is not supported",
+                     le32_to_cpu(disk_super->data_block_size),
+                     (unsigned long long)cmd->data_block_size);
+               r = -EINVAL;
+               goto bad;
+       }
+
        r = __check_incompat_features(disk_super, cmd);
        if (r < 0)
                goto bad;
index 5f054c4..2c63326 100644 (file)
@@ -231,7 +231,7 @@ struct cache {
        /*
         * cache_size entries, dirty if set
         */
-       dm_cblock_t nr_dirty;
+       atomic_t nr_dirty;
        unsigned long *dirty_bitset;
 
        /*
@@ -492,7 +492,7 @@ static bool is_dirty(struct cache *cache, dm_cblock_t b)
 static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
 {
        if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
-               cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
+               atomic_inc(&cache->nr_dirty);
                policy_set_dirty(cache->policy, oblock);
        }
 }
@@ -501,8 +501,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl
 {
        if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
                policy_clear_dirty(cache->policy, oblock);
-               cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
-               if (!from_cblock(cache->nr_dirty))
+               if (atomic_dec_return(&cache->nr_dirty) == 0)
                        dm_table_event(cache->ti->table);
        }
 }
@@ -2269,7 +2268,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
        atomic_set(&cache->quiescing_ack, 0);
 
        r = -ENOMEM;
-       cache->nr_dirty = 0;
+       atomic_set(&cache->nr_dirty, 0);
        cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
        if (!cache->dirty_bitset) {
                *error = "could not allocate dirty bitset";
@@ -2808,7 +2807,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
 
                residency = policy_residency(cache->policy);
 
-               DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %llu ",
+               DMEMIT("%u %llu/%llu %u %llu/%llu %u %u %u %u %u %u %lu ",
                       (unsigned)(DM_CACHE_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
                       (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
                       (unsigned long long)nr_blocks_metadata,
@@ -2821,7 +2820,7 @@ static void cache_status(struct dm_target *ti, status_type_t type,
                       (unsigned) atomic_read(&cache->stats.write_miss),
                       (unsigned) atomic_read(&cache->stats.demotion),
                       (unsigned) atomic_read(&cache->stats.promotion),
-                      (unsigned long long) from_cblock(cache->nr_dirty));
+                      (unsigned long) atomic_read(&cache->nr_dirty));
 
                if (writethrough_mode(&cache->features))
                        DMEMIT("1 writethrough ");
index 53b2132..4cba2d8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
+ * Copyright (C) 2003 Jana Saout <jana@saout.de>
  * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
  * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
  * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
@@ -1996,6 +1996,6 @@ static void __exit dm_crypt_exit(void)
 module_init(dm_crypt_init);
 module_exit(dm_crypt_exit);
 
-MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
+MODULE_AUTHOR("Jana Saout <jana@saout.de>");
 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
 MODULE_LICENSE("GPL");
index 3842ac7..db404a0 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/device-mapper.h>
 
 #include <linux/bio.h>
+#include <linux/completion.h>
 #include <linux/mempool.h>
 #include <linux/module.h>
 #include <linux/sched.h>
@@ -32,7 +33,7 @@ struct dm_io_client {
 struct io {
        unsigned long error_bits;
        atomic_t count;
-       struct task_struct *sleeper;
+       struct completion *wait;
        struct dm_io_client *client;
        io_notify_fn callback;
        void *context;
@@ -121,8 +122,8 @@ static void dec_count(struct io *io, unsigned int region, int error)
                        invalidate_kernel_vmap_range(io->vma_invalidate_address,
                                                     io->vma_invalidate_size);
 
-               if (io->sleeper)
-                       wake_up_process(io->sleeper);
+               if (io->wait)
+                       complete(io->wait);
 
                else {
                        unsigned long r = io->error_bits;
@@ -387,6 +388,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
         */
        volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
        struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
+       DECLARE_COMPLETION_ONSTACK(wait);
 
        if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
                WARN_ON(1);
@@ -395,7 +397,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
 
        io->error_bits = 0;
        atomic_set(&io->count, 1); /* see dispatch_io() */
-       io->sleeper = current;
+       io->wait = &wait;
        io->client = client;
 
        io->vma_invalidate_address = dp->vma_invalidate_address;
@@ -403,15 +405,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
 
        dispatch_io(rw, num_regions, where, dp, io, 1);
 
-       while (1) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-
-               if (!atomic_read(&io->count))
-                       break;
-
-               io_schedule();
-       }
-       set_current_state(TASK_RUNNING);
+       wait_for_completion_io(&wait);
 
        if (error_bits)
                *error_bits = io->error_bits;
@@ -434,7 +428,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
        io = mempool_alloc(client->pool, GFP_NOIO);
        io->error_bits = 0;
        atomic_set(&io->count, 1); /* see dispatch_io() */
-       io->sleeper = NULL;
+       io->wait = NULL;
        io->client = client;
        io->callback = fn;
        io->context = context;
index 3f6fd9d..f4167b0 100644 (file)
@@ -1611,8 +1611,9 @@ static int multipath_busy(struct dm_target *ti)
 
        spin_lock_irqsave(&m->lock, flags);
 
-       /* pg_init in progress, requeue until done */
-       if (!pg_ready(m)) {
+       /* pg_init in progress or no paths available */
+       if (m->pg_init_in_progress ||
+           (!m->nr_valid_paths && m->queue_if_no_path)) {
                busy = 1;
                goto out;
        }
index 5bd2290..864b03f 100644 (file)
@@ -1032,21 +1032,13 @@ static void start_merge(struct dm_snapshot *s)
                snapshot_merge_next_chunks(s);
 }
 
-static int wait_schedule(void *ptr)
-{
-       schedule();
-
-       return 0;
-}
-
 /*
  * Stop the merging process and wait until it finishes.
  */
 static void stop_merge(struct dm_snapshot *s)
 {
        set_bit(SHUTDOWN_MERGE, &s->state_bits);
-       wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule,
-                   TASK_UNINTERRUPTIBLE);
+       wait_on_bit(&s->state_bits, RUNNING_MERGE, TASK_UNINTERRUPTIBLE);
        clear_bit(SHUTDOWN_MERGE, &s->state_bits);
 }
 
index b086a94..e9d33ad 100644 (file)
@@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
 
        disk_super = dm_block_data(sblock);
 
+       /* Verify the data block size hasn't changed */
+       if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
+               DMERR("changing the data block size (from %u to %llu) is not supported",
+                     le32_to_cpu(disk_super->data_block_size),
+                     (unsigned long long)pmd->data_block_size);
+               r = -EINVAL;
+               goto bad_unlock_sblock;
+       }
+
        r = __check_incompat_features(disk_super, pmd);
        if (r < 0)
                goto bad_unlock_sblock;
index c99003e..b9a64bb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
+ * Copyright (C) 2003 Jana Saout <jana@saout.de>
  *
  * This file is released under the GPL.
  */
@@ -79,6 +79,6 @@ static void __exit dm_zero_exit(void)
 module_init(dm_zero_init)
 module_exit(dm_zero_exit)
 
-MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
+MODULE_AUTHOR("Jana Saout <jana@saout.de>");
 MODULE_DESCRIPTION(DM_NAME " dummy target returning zeros");
 MODULE_LICENSE("GPL");
index 437d990..32b958d 100644 (file)
@@ -54,6 +54,8 @@ static void do_deferred_remove(struct work_struct *w);
 
 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
 
+static struct workqueue_struct *deferred_remove_workqueue;
+
 /*
  * For bio-based dm.
  * One of these is allocated per bio.
@@ -276,16 +278,24 @@ static int __init local_init(void)
        if (r)
                goto out_free_rq_tio_cache;
 
+       deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
+       if (!deferred_remove_workqueue) {
+               r = -ENOMEM;
+               goto out_uevent_exit;
+       }
+
        _major = major;
        r = register_blkdev(_major, _name);
        if (r < 0)
-               goto out_uevent_exit;
+               goto out_free_workqueue;
 
        if (!_major)
                _major = r;
 
        return 0;
 
+out_free_workqueue:
+       destroy_workqueue(deferred_remove_workqueue);
 out_uevent_exit:
        dm_uevent_exit();
 out_free_rq_tio_cache:
@@ -299,6 +309,7 @@ out_free_io_cache:
 static void local_exit(void)
 {
        flush_scheduled_work();
+       destroy_workqueue(deferred_remove_workqueue);
 
        kmem_cache_destroy(_rq_tio_cache);
        kmem_cache_destroy(_io_cache);
@@ -407,7 +418,7 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode)
 
        if (atomic_dec_and_test(&md->open_count) &&
            (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
-               schedule_work(&deferred_remove_work);
+               queue_work(deferred_remove_workqueue, &deferred_remove_work);
 
        dm_put(md);
 
index 3484685..32fc19c 100644 (file)
@@ -5599,7 +5599,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg)
        if (mddev->in_sync)
                info.state = (1<<MD_SB_CLEAN);
        if (mddev->bitmap && mddev->bitmap_info.offset)
-               info.state = (1<<MD_SB_BITMAP_PRESENT);
+               info.state |= (1<<MD_SB_BITMAP_PRESENT);
        info.active_disks  = insync;
        info.working_disks = working;
        info.failed_disks  = failed;
@@ -7501,6 +7501,19 @@ void md_do_sync(struct md_thread *thread)
                            rdev->recovery_offset < j)
                                j = rdev->recovery_offset;
                rcu_read_unlock();
+
+               /* If there is a bitmap, we need to make sure all
+                * writes that started before we added a spare
+                * complete before we start doing a recovery.
+                * Otherwise the write might complete and (via
+                * bitmap_endwrite) set a bit in the bitmap after the
+                * recovery has checked that bit and skipped that
+                * region.
+                */
+               if (mddev->bitmap) {
+                       mddev->pers->quiesce(mddev, 1);
+                       mddev->pers->quiesce(mddev, 0);
+               }
        }
 
        printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
index 8637d2e..2e3cdcf 100644 (file)
@@ -60,7 +60,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
                                jiffies_to_msecs(jiffies) -
                                (jiffies_to_msecs(timeout) - TIMEOUT));
 
-               if (!(cmd->args[0] >> 7) & 0x01) {
+               if (!((cmd->args[0] >> 7) & 0x01)) {
                        ret = -ETIMEDOUT;
                        goto err_mutex_unlock;
                }
@@ -485,20 +485,6 @@ static int si2168_init(struct dvb_frontend *fe)
        if (ret)
                goto err;
 
-       cmd.args[0] = 0x05;
-       cmd.args[1] = 0x00;
-       cmd.args[2] = 0xaa;
-       cmd.args[3] = 0x4d;
-       cmd.args[4] = 0x56;
-       cmd.args[5] = 0x40;
-       cmd.args[6] = 0x00;
-       cmd.args[7] = 0x00;
-       cmd.wlen = 8;
-       cmd.rlen = 1;
-       ret = si2168_cmd_execute(s, &cmd);
-       if (ret)
-               goto err;
-
        /* cold state - try to download firmware */
        dev_info(&s->client->dev, "%s: found a '%s' in cold state\n",
                        KBUILD_MODNAME, si2168_ops.info.name);
index 2a343e8..53f7f06 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/firmware.h>
 #include <linux/i2c-mux.h>
 
-#define SI2168_FIRMWARE "dvb-demod-si2168-01.fw"
+#define SI2168_FIRMWARE "dvb-demod-si2168-02.fw"
 
 /* state struct */
 struct si2168 {
index 522fe00..9619be5 100644 (file)
@@ -668,6 +668,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
        struct dtv_frontend_properties *c = &fe->dtv_property_cache;
        int ret, i;
        u8 mode, rolloff, pilot, inversion, div;
+       fe_modulation_t modulation;
 
        dev_dbg(&priv->i2c->dev,
                        "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -702,10 +703,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
 
        switch (c->delivery_system) {
        case SYS_DVBS:
+               modulation = QPSK;
                rolloff = 0;
                pilot = 2;
                break;
        case SYS_DVBS2:
+               modulation = c->modulation;
+
                switch (c->rolloff) {
                case ROLLOFF_20:
                        rolloff = 2;
@@ -750,7 +754,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
 
        for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
                if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
-                       c->modulation == TDA10071_MODCOD[i].modulation &&
+                       modulation == TDA10071_MODCOD[i].modulation &&
                        c->fec_inner == TDA10071_MODCOD[i].fec) {
                        mode = TDA10071_MODCOD[i].val;
                        dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
@@ -834,10 +838,10 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
 
        switch ((buf[1] >> 0) & 0x01) {
        case 0:
-               c->inversion = INVERSION_OFF;
+               c->inversion = INVERSION_ON;
                break;
        case 1:
-               c->inversion = INVERSION_ON;
+               c->inversion = INVERSION_OFF;
                break;
        }
 
@@ -856,7 +860,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
        if (ret)
                goto error;
 
-       c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0);
+       c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000;
 
        return ret;
 error:
index 4baf14b..4204861 100644 (file)
@@ -55,6 +55,7 @@ static struct tda10071_modcod {
        { SYS_DVBS2, QPSK,  FEC_8_9,  0x0a },
        { SYS_DVBS2, QPSK,  FEC_9_10, 0x0b },
        /* 8PSK */
+       { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00 },
        { SYS_DVBS2, PSK_8, FEC_3_5,  0x0c },
        { SYS_DVBS2, PSK_8, FEC_2_3,  0x0d },
        { SYS_DVBS2, PSK_8, FEC_3_4,  0x0e },
index e65c760..0006d6b 100644 (file)
@@ -179,7 +179,7 @@ static const struct v4l2_file_operations ts_fops =
        .read     = vb2_fop_read,
        .poll     = vb2_fop_poll,
        .mmap     = vb2_fop_mmap,
-       .ioctl    = video_ioctl2,
+       .unlocked_ioctl = video_ioctl2,
 };
 
 static const struct v4l2_ioctl_ops ts_ioctl_ops = {
index a7ed164..1e4ec69 100644 (file)
@@ -269,6 +269,7 @@ err:
                list_del(&buf->list);
                vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
        }
+       spin_unlock_irqrestore(&common->irqlock, flags);
 
        return ret;
 }
index 5bb085b..b431b58 100644 (file)
@@ -233,6 +233,7 @@ err:
                list_del(&buf->list);
                vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
        }
+       spin_unlock_irqrestore(&common->irqlock, flags);
 
        return ret;
 }
index 271a752..fa4cc7b 100644 (file)
@@ -57,7 +57,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
                        jiffies_to_msecs(jiffies) -
                        (jiffies_to_msecs(timeout) - TIMEOUT));
 
-       if (!(buf[0] >> 7) & 0x01) {
+       if (!((buf[0] >> 7) & 0x01)) {
                ret = -ETIMEDOUT;
                goto err_mutex_unlock;
        } else {
index 021e4d3..7b9b75f 100644 (file)
@@ -704,15 +704,41 @@ static int af9035_read_config(struct dvb_usb_device *d)
                if (ret < 0)
                        goto err;
 
-               if (tmp == 0x00)
-                       dev_dbg(&d->udev->dev,
-                                       "%s: [%d]tuner not set, using default\n",
-                                       __func__, i);
-               else
+               dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
+                               __func__, i, tmp);
+
+               /* tuner sanity check */
+               if (state->chip_type == 0x9135) {
+                       if (state->chip_version == 0x02) {
+                               /* IT9135 BX (v2) */
+                               switch (tmp) {
+                               case AF9033_TUNER_IT9135_60:
+                               case AF9033_TUNER_IT9135_61:
+                               case AF9033_TUNER_IT9135_62:
+                                       state->af9033_config[i].tuner = tmp;
+                                       break;
+                               }
+                       } else {
+                               /* IT9135 AX (v1) */
+                               switch (tmp) {
+                               case AF9033_TUNER_IT9135_38:
+                               case AF9033_TUNER_IT9135_51:
+                               case AF9033_TUNER_IT9135_52:
+                                       state->af9033_config[i].tuner = tmp;
+                                       break;
+                               }
+                       }
+               } else {
+                       /* AF9035 */
                        state->af9033_config[i].tuner = tmp;
+               }
 
-               dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
-                               __func__, i, state->af9033_config[i].tuner);
+               if (state->af9033_config[i].tuner != tmp) {
+                       dev_info(&d->udev->dev,
+                                       "%s: [%d] overriding tuner from %02x to %02x\n",
+                                       KBUILD_MODNAME, i, tmp,
+                                       state->af9033_config[i].tuner);
+               }
 
                switch (state->af9033_config[i].tuner) {
                case AF9033_TUNER_TUA9001:
index e355806..f296394 100644 (file)
@@ -253,13 +253,6 @@ static int dvb_usbv2_adapter_stream_exit(struct dvb_usb_adapter *adap)
        return usb_urb_exitv2(&adap->stream);
 }
 
-static int wait_schedule(void *ptr)
-{
-       schedule();
-
-       return 0;
-}
-
 static int dvb_usb_start_feed(struct dvb_demux_feed *dvbdmxfeed)
 {
        struct dvb_usb_adapter *adap = dvbdmxfeed->demux->priv;
@@ -273,8 +266,7 @@ static int dvb_usb_start_feed(struct dvb_demux_feed *dvbdmxfeed)
                        dvbdmxfeed->pid, dvbdmxfeed->index);
 
        /* wait init is done */
-       wait_on_bit(&adap->state_bits, ADAP_INIT, wait_schedule,
-                       TASK_UNINTERRUPTIBLE);
+       wait_on_bit(&adap->state_bits, ADAP_INIT, TASK_UNINTERRUPTIBLE);
 
        if (adap->active_fe == -1)
                return -EINVAL;
@@ -568,7 +560,7 @@ static int dvb_usb_fe_sleep(struct dvb_frontend *fe)
 
        if (!adap->suspend_resume_active) {
                set_bit(ADAP_SLEEP, &adap->state_bits);
-               wait_on_bit(&adap->state_bits, ADAP_STREAMING, wait_schedule,
+               wait_on_bit(&adap->state_bits, ADAP_STREAMING,
                                TASK_UNINTERRUPTIBLE);
        }
 
index 2fd1c5e..339adce 100644 (file)
@@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
        {USB_DEVICE(0x093a, 0x2620)},
        {USB_DEVICE(0x093a, 0x2621)},
        {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
+       {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
        {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
        {USB_DEVICE(0x093a, 0x2625)},
        {USB_DEVICE(0x093a, 0x2626)},
index 0500c41..6bce01a 100644 (file)
@@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
 }
 
 /*=========================================================================*/
-/* bufffer bits */
+/* buffer bits */
 
 /* function expects dev->io_mutex to be hold by caller */
 int hdpvr_cancel_queue(struct hdpvr_device *dev)
@@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
        case V4L2_CID_MPEG_AUDIO_ENCODING:
                if (dev->flags & HDPVR_FLAG_AC3_CAP) {
                        opt->audio_codec = ctrl->val;
-                       return hdpvr_set_audio(dev, opt->audio_input,
+                       return hdpvr_set_audio(dev, opt->audio_input + 1,
                                              opt->audio_codec);
                }
                return 0;
@@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
        v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
                V4L2_CID_MPEG_AUDIO_ENCODING,
                ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
-               0x7, V4L2_MPEG_AUDIO_ENCODING_AAC);
+               0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
        v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
                V4L2_CID_MPEG_VIDEO_ENCODING,
                V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
index 4ae54ca..ce1c9f5 100644 (file)
@@ -610,10 +610,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
                aspect.denominator = 9;
        } else if (ratio == 34) {
                aspect.numerator = 4;
-               aspect.numerator = 3;
+               aspect.denominator = 3;
        } else if (ratio == 68) {
                aspect.numerator = 15;
-               aspect.numerator = 9;
+               aspect.denominator = 9;
        } else {
                aspect.numerator = hor_landscape + 99;
                aspect.denominator = 100;
index 2a635b6..c880ba6 100644 (file)
@@ -601,6 +601,7 @@ static int rtsx_pci_ms_drv_remove(struct platform_device *pdev)
        pcr->slots[RTSX_MS_CARD].card_event = NULL;
        msh = host->msh;
        host->eject = true;
+       cancel_work_sync(&host->handle_req);
 
        mutex_lock(&host->host_mutex);
        if (host->req) {
index ee8204c..6cc4b6a 100644 (file)
@@ -760,6 +760,7 @@ config MFD_SYSCON
 config MFD_DAVINCI_VOICECODEC
        tristate
        select MFD_CORE
+       select REGMAP_MMIO
 
 config MFD_TI_AM335X_TSCADC
        tristate "TI ADC / Touch Screen chip support"
@@ -1225,7 +1226,7 @@ config MFD_WM8994
          functionaltiy of the device other drivers must be enabled.
 
 config MFD_STW481X
-       bool "Support for ST Microelectronics STw481x"
+       tristate "Support for ST Microelectronics STw481x"
        depends on I2C && ARCH_NOMADIK
        select REGMAP_I2C
        select MFD_CORE
@@ -1248,7 +1249,7 @@ config MCP_SA11X0
 
 # Chip drivers
 config MCP_UCB1200
-       bool "Support for UCB1200 / UCB1300"
+       tristate "Support for UCB1200 / UCB1300"
        depends on MCP_SA11X0
        select MCP
 
index a8ee4a3..cf2e6a1 100644 (file)
@@ -591,7 +591,7 @@ static int ab8500_irq_init(struct ab8500 *ab8500, struct device_node *np)
                num_irqs = AB8500_NR_IRQS;
 
        /* If ->irq_base is zero this will give a linear mapping */
-       ab8500->domain = irq_domain_add_simple(NULL,
+       ab8500->domain = irq_domain_add_simple(ab8500->dev->of_node,
                        num_irqs, 0,
                        &ab8500_irq_ops, ab8500);
 
index a43d0c4..ee94023 100644 (file)
@@ -54,7 +54,7 @@ config AD525X_DPOT_SPI
 config ATMEL_PWM
        tristate "Atmel AT32/AT91 PWM support"
        depends on HAVE_CLK
-       depends on AVR32 || AT91SAM9263 || AT91SAM9RL || AT91SAM9G45
+       depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
        help
          This option enables device driver support for the PWM channels
          on certain Atmel processors.  Pulse Width Modulation is used for
index 73068e5..3250fc1 100644 (file)
@@ -199,7 +199,7 @@ static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
        func = kzalloc(sizeof(*func) + sizeof(*func->template) * num,
                        GFP_KERNEL);
        if (!func)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        func->syscfg = syscfg;
        func->num_templates = num;
@@ -231,10 +231,14 @@ static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
        func->regmap = regmap_init(dev, NULL, func,
                        &vexpress_syscfg_regmap_config);
 
-       if (IS_ERR(func->regmap))
+       if (IS_ERR(func->regmap)) {
+               void *err = func->regmap;
+
                kfree(func);
-       else
-               list_add(&func->list, &syscfg->funcs);
+               return err;
+       }
+
+       list_add(&func->list, &syscfg->funcs);
 
        return func->regmap;
 }
index 2421835..1916174 100644 (file)
@@ -17,7 +17,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  *
- * Maintained by: Dmitry Torokhov <dtor@vmware.com>
+ * Maintained by:      Xavier Deguillard <xdeguillard@vmware.com>
+ *                     Philip Moltmann <moltmann@vmware.com>
  */
 
 /*
index e4ec355..a7543ba 100644 (file)
 /* Atmel chips */
 #define AT49BV640D     0x02de
 #define AT49BV640DT    0x02db
+/* Sharp chips */
+#define LH28F640BFHE_PTTL90    0x00b0
+#define LH28F640BFHE_PBTL90    0x00b1
+#define LH28F640BFHE_PTTL70A   0x00b2
+#define LH28F640BFHE_PBTL70A   0x00b3
 
 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -258,6 +263,36 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd)
                (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
 };
 
+static int is_LH28F640BF(struct cfi_private *cfi)
+{
+       /* Sharp LH28F640BF Family */
+       if (cfi->mfr == CFI_MFR_SHARP && (
+           cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
+           cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
+               return 1;
+       return 0;
+}
+
+static void fixup_LH28F640BF(struct mtd_info *mtd)
+{
+       struct map_info *map = mtd->priv;
+       struct cfi_private *cfi = map->fldrv_priv;
+       struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+
+       /* Reset the Partition Configuration Register on LH28F640BF
+        * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
+       if (is_LH28F640BF(cfi)) {
+               printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
+               map_write(map, CMD(0x60), 0);
+               map_write(map, CMD(0x04), 0);
+
+               /* We have set one single partition thus
+                * Simultaneous Operations are not allowed */
+               printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
+               extp->FeatureSupport &= ~512;
+       }
+}
+
 static void fixup_use_point(struct mtd_info *mtd)
 {
        struct map_info *map = mtd->priv;
@@ -309,6 +344,8 @@ static struct cfi_fixup cfi_fixup_table[] = {
        { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
        { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
        { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
+       { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
+       { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
        { 0, 0, NULL }
 };
 
@@ -1649,6 +1686,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
        initial_adr = adr;
        cmd_adr = adr & ~(wbufsize-1);
 
+       /* Sharp LH28F640BF chips need the first address for the
+        * Page Buffer Program command. See Table 5 of
+        * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
+       if (is_LH28F640BF(cfi))
+               cmd_adr = adr;
+
        /* Let's determine this according to the interleave only once */
        write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
 
index 7df8694..b4f61c7 100644 (file)
@@ -475,6 +475,7 @@ static int elm_context_save(struct elm_info *info)
                                        ELM_SYNDROME_FRAGMENT_1 + offset);
                        regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
                                        ELM_SYNDROME_FRAGMENT_0 + offset);
+                       break;
                default:
                        return -EINVAL;
                }
@@ -520,6 +521,7 @@ static int elm_context_restore(struct elm_info *info)
                                        regs->elm_syndrome_fragment_1[i]);
                        elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
                                        regs->elm_syndrome_fragment_0[i]);
+                       break;
                default:
                        return -EINVAL;
                }
index 41167e9..4f3e80c 100644 (file)
@@ -4047,8 +4047,10 @@ int nand_scan_tail(struct mtd_info *mtd)
                ecc->layout->oobavail += ecc->layout->oobfree[i].length;
        mtd->oobavail = ecc->layout->oobavail;
 
-       /* ECC sanity check: warn noisily if it's too weak */
-       WARN_ON(!nand_ecc_strength_good(mtd));
+       /* ECC sanity check: warn if it's too weak */
+       if (!nand_ecc_strength_good(mtd))
+               pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
+                       mtd->name);
 
        /*
         * Set the number of read / write steps for one page depending on ECC
index b04e7d0..0431b46 100644 (file)
@@ -125,7 +125,7 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
                parent = *p;
                av = rb_entry(parent, struct ubi_ainf_volume, rb);
 
-               if (vol_id < av->vol_id)
+               if (vol_id > av->vol_id)
                        p = &(*p)->rb_left;
                else
                        p = &(*p)->rb_right;
@@ -423,7 +423,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
                                pnum, err);
                        ret = err > 0 ? UBI_BAD_FASTMAP : err;
                        goto out;
-               } else if (ret == UBI_IO_BITFLIPS)
+               } else if (err == UBI_IO_BITFLIPS)
                        scrub = 1;
 
                /*
index 04f35f9..701f86c 100644 (file)
@@ -1025,10 +1025,14 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
                                 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
                                 NETIF_F_HIGHDMA | NETIF_F_LRO)
 
+#define BOND_ENC_FEATURES      (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\
+                                NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL)
+
 static void bond_compute_features(struct bonding *bond)
 {
        unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
        netdev_features_t vlan_features = BOND_VLAN_FEATURES;
+       netdev_features_t enc_features  = BOND_ENC_FEATURES;
        struct net_device *bond_dev = bond->dev;
        struct list_head *iter;
        struct slave *slave;
@@ -1044,6 +1048,9 @@ static void bond_compute_features(struct bonding *bond)
                vlan_features = netdev_increment_features(vlan_features,
                        slave->dev->vlan_features, BOND_VLAN_FEATURES);
 
+               enc_features = netdev_increment_features(enc_features,
+                                                        slave->dev->hw_enc_features,
+                                                        BOND_ENC_FEATURES);
                dst_release_flag &= slave->dev->priv_flags;
                if (slave->dev->hard_header_len > max_hard_header_len)
                        max_hard_header_len = slave->dev->hard_header_len;
@@ -1054,6 +1061,7 @@ static void bond_compute_features(struct bonding *bond)
 
 done:
        bond_dev->vlan_features = vlan_features;
+       bond_dev->hw_enc_features = enc_features;
        bond_dev->hard_header_len = max_hard_header_len;
        bond_dev->gso_max_segs = gso_max_segs;
        netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -3975,6 +3983,7 @@ void bond_setup(struct net_device *bond_dev)
                                NETIF_F_HW_VLAN_CTAG_FILTER;
 
        bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
+       bond_dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
        bond_dev->features |= bond_dev->hw_features;
 }
 
@@ -4059,7 +4068,7 @@ static int bond_check_params(struct bond_params *params)
        }
 
        if (ad_select) {
-               bond_opt_initstr(&newval, lacp_rate);
+               bond_opt_initstr(&newval, ad_select);
                valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
                                        &newval);
                if (!valptr) {
index 824108c..12430be 100644 (file)
@@ -287,7 +287,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
                        break;
                }
 
-               priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
+               priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
+                                                    resource_size(res));
                if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
                        dev_info(&pdev->dev, "control memory is not used for raminit\n");
                else
index dcf9196..ea4d4f1 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/workqueue.h>
 #include <linux/can.h>
 #include <linux/can/skb.h>
 
@@ -85,6 +86,7 @@ struct slcan {
        struct tty_struct       *tty;           /* ptr to TTY structure      */
        struct net_device       *dev;           /* easy for intr handling    */
        spinlock_t              lock;
+       struct work_struct      tx_work;        /* Flushes transmit buffer   */
 
        /* These are pointers to the malloc()ed frame buffers. */
        unsigned char           rbuff[SLC_MTU]; /* receiver buffer           */
@@ -309,36 +311,46 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
        sl->dev->stats.tx_bytes += cf->can_dlc;
 }
 
-/*
- * Called by the driver when there's room for more data.  If we have
- * more packets to send, we send them here.
- */
-static void slcan_write_wakeup(struct tty_struct *tty)
+/* Write out any remaining transmit buffer. Scheduled when tty is writable */
+static void slcan_transmit(struct work_struct *work)
 {
+       struct slcan *sl = container_of(work, struct slcan, tx_work);
        int actual;
-       struct slcan *sl = (struct slcan *) tty->disc_data;
 
+       spin_lock_bh(&sl->lock);
        /* First make sure we're connected. */
-       if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
+       if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
+               spin_unlock_bh(&sl->lock);
                return;
+       }
 
-       spin_lock_bh(&sl->lock);
        if (sl->xleft <= 0)  {
                /* Now serial buffer is almost free & we can start
                 * transmission of another packet */
                sl->dev->stats.tx_packets++;
-               clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+               clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
                spin_unlock_bh(&sl->lock);
                netif_wake_queue(sl->dev);
                return;
        }
 
-       actual = tty->ops->write(tty, sl->xhead, sl->xleft);
+       actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
        sl->xleft -= actual;
        sl->xhead += actual;
        spin_unlock_bh(&sl->lock);
 }
 
+/*
+ * Called by the driver when there's room for more data.
+ * Schedule the transmit.
+ */
+static void slcan_write_wakeup(struct tty_struct *tty)
+{
+       struct slcan *sl = tty->disc_data;
+
+       schedule_work(&sl->tx_work);
+}
+
 /* Send a can_frame to a TTY queue. */
 static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
 {
@@ -528,6 +540,7 @@ static struct slcan *slc_alloc(dev_t line)
        sl->magic = SLCAN_MAGIC;
        sl->dev = dev;
        spin_lock_init(&sl->lock);
+       INIT_WORK(&sl->tx_work, slcan_transmit);
        slcan_devs[i] = dev;
 
        return sl;
@@ -626,8 +639,12 @@ static void slcan_close(struct tty_struct *tty)
        if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
                return;
 
+       spin_lock_bh(&sl->lock);
        tty->disc_data = NULL;
        sl->tty = NULL;
+       spin_unlock_bh(&sl->lock);
+
+       flush_work(&sl->tx_work);
 
        /* Flush network side */
        unregister_netdev(sl->dev);
index 2846067..d81e716 100644 (file)
@@ -736,6 +736,7 @@ static int emac_open(struct net_device *dev)
 
        ret = emac_mdio_probe(dev);
        if (ret < 0) {
+               free_irq(dev->irq, dev);
                netdev_err(dev, "cannot probe MDIO bus\n");
                return ret;
        }
index c83584a..5a1891f 100644 (file)
@@ -339,7 +339,8 @@ static int xgbe_probe(struct platform_device *pdev)
        /* Calculate the number of Tx and Rx rings to be created */
        pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
                                     pdata->hw_feat.tx_ch_cnt);
-       if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) {
+       ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
+       if (ret) {
                dev_err(dev, "error setting real tx queue count\n");
                goto err_io;
        }
index 141160e..5776e50 100644 (file)
@@ -654,13 +654,13 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
 
        work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
 
-       if (work_done < budget) {
+       if (work_done == 0) {
                napi_complete(napi);
                /* re-enable TX interrupt */
                intrl2_1_mask_clear(ring->priv, BIT(ring->index));
        }
 
-       return work_done;
+       return 0;
 }
 
 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
@@ -1254,28 +1254,17 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
                usleep_range(1000, 2000);
 }
 
-static inline int umac_reset(struct bcm_sysport_priv *priv)
+static inline void umac_reset(struct bcm_sysport_priv *priv)
 {
-       unsigned int timeout = 0;
        u32 reg;
-       int ret = 0;
-
-       umac_writel(priv, 0, UMAC_CMD);
-       while (timeout++ < 1000) {
-               reg = umac_readl(priv, UMAC_CMD);
-               if (!(reg & CMD_SW_RESET))
-                       break;
-
-               udelay(1);
-       }
-
-       if (timeout == 1000) {
-               dev_err(&priv->pdev->dev,
-                       "timeout waiting for MAC to come out of reset\n");
-               ret = -ETIMEDOUT;
-       }
 
-       return ret;
+       reg = umac_readl(priv, UMAC_CMD);
+       reg |= CMD_SW_RESET;
+       umac_writel(priv, reg, UMAC_CMD);
+       udelay(10);
+       reg = umac_readl(priv, UMAC_CMD);
+       reg &= ~CMD_SW_RESET;
+       umac_writel(priv, reg, UMAC_CMD);
 }
 
 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
@@ -1303,11 +1292,7 @@ static int bcm_sysport_open(struct net_device *dev)
        int ret;
 
        /* Reset UniMAC */
-       ret = umac_reset(priv);
-       if (ret) {
-               netdev_err(dev, "UniMAC reset failed\n");
-               return ret;
-       }
+       umac_reset(priv);
 
        /* Flush TX and RX FIFOs at TOPCTRL level */
        topctrl_flush(priv);
@@ -1589,12 +1574,6 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
        dev->needed_headroom += sizeof(struct bcm_tsb);
 
-       /* We are interfaced to a switch which handles the multicast
-        * filtering for us, so we do not support programming any
-        * multicast hash table in this Ethernet MAC.
-        */
-       dev->flags &= ~IFF_MULTICAST;
-
        /* libphy will adjust the link state accordingly */
        netif_carrier_off(dev);
 
index 4cab09d..8206a29 100644 (file)
@@ -346,6 +346,7 @@ struct sw_tx_bd {
        u8              flags;
 /* Set on the first BD descriptor when there is a split BD */
 #define BNX2X_TSO_SPLIT_BD             (1<<0)
+#define BNX2X_HAS_SECOND_PBD           (1<<1)
 };
 
 struct sw_rx_page {
index 47c5814..c43e723 100644 (file)
@@ -227,6 +227,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
        --nbd;
        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 
+       if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
+               /* Skip second parse bd... */
+               --nbd;
+               bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+       }
+
        /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
        if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
@@ -797,7 +803,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
                return;
        }
-       bnx2x_frag_free(fp, new_data);
+       if (new_data)
+               bnx2x_frag_free(fp, new_data);
 drop:
        /* drop the packet and keep the buffer in the bin */
        DP(NETIF_MSG_RX_STATUS,
@@ -3888,6 +3895,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        /* set encapsulation flag in start BD */
                        SET_FLAG(tx_start_bd->general_data,
                                 ETH_TX_START_BD_TUNNEL_EXIST, 1);
+
+                       tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
+
                        nbd++;
                } else if (xmit_type & XMIT_CSUM) {
                        /* Set PBD in checksum offload case w/o encapsulation */
index bd0600c..25eddd9 100644 (file)
@@ -379,6 +379,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        break;
                case PORT_FIBRE:
                case PORT_DA:
+               case PORT_NONE:
                        if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
                              bp->port.supported[1] & SUPPORTED_FIBRE)) {
                                DP(BNX2X_MSG_ETHTOOL,
index 2887034..6a8b145 100644 (file)
@@ -12937,7 +12937,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
         * without the default SB.
         * For VFs there is no default SB, then we return (index+1).
         */
-       pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
+       pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
 
        index = control & PCI_MSIX_FLAGS_QSIZE;
 
index 5ba1cfb..4e615de 100644 (file)
@@ -1149,6 +1149,11 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
                goto out;
        }
 
+       if (skb_padto(skb, ETH_ZLEN)) {
+               ret = NETDEV_TX_OK;
+               goto out;
+       }
+
        /* set the SKB transmit checksum */
        if (priv->desc_64b_en) {
                ret = bcmgenet_put_tx_csum(dev, skb);
@@ -1408,13 +1413,6 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
                if (cb->skb)
                        continue;
 
-               /* set the DMA descriptor length once and for all
-                * it will only change if we support dynamically sizing
-                * priv->rx_buf_len, but we do not
-                */
-               dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
-                               priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
-
                ret = bcmgenet_rx_refill(priv, cb);
                if (ret)
                        break;
@@ -2535,14 +2533,17 @@ static int bcmgenet_probe(struct platform_device *pdev)
        netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
        netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
 
-       err = register_netdev(dev);
-       if (err)
-               goto err_clk_disable;
+       /* libphy will determine the link state */
+       netif_carrier_off(dev);
 
        /* Turn off the main clock, WOL clock is handled separately */
        if (!IS_ERR(priv->clk))
                clk_disable_unprepare(priv->clk);
 
+       err = register_netdev(dev);
+       if (err)
+               goto err;
+
        return err;
 
 err_clk_disable:
index 0f11710..e23c993 100644 (file)
@@ -331,9 +331,9 @@ struct bcmgenet_mib_counters {
 #define  EXT_ENERGY_DET_MASK           (1 << 12)
 
 #define EXT_RGMII_OOB_CTRL             0x0C
-#define  RGMII_MODE_EN                 (1 << 0)
 #define  RGMII_LINK                    (1 << 4)
 #define  OOB_DISABLE                   (1 << 5)
+#define  RGMII_MODE_EN                 (1 << 6)
 #define  ID_MODE_DIS                   (1 << 16)
 
 #define EXT_GPHY_CTRL                  0x1C
index df2792d..8afa579 100644 (file)
@@ -3224,7 +3224,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
        return 0;
 }
 
-#define NVRAM_CMD_TIMEOUT 100
+#define NVRAM_CMD_TIMEOUT 5000
 
 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
 {
@@ -3232,7 +3232,7 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
 
        tw32(NVRAM_CMD, nvram_cmd);
        for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
-               udelay(10);
+               usleep_range(10, 40);
                if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
                        udelay(10);
                        break;
@@ -7854,8 +7854,8 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
                netif_wake_queue(tp->dev);
        }
 
-       segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
-       if (IS_ERR(segs))
+       segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6));
+       if (IS_ERR(segs) || !segs)
                goto tg3_tso_bug_end;
 
        do {
index 2f8d6b9..a83271c 100644 (file)
@@ -4057,22 +4057,19 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
 EXPORT_SYMBOL(cxgb4_unregister_uld);
 
 /* Check if netdev on which event is occured belongs to us or not. Return
- * suceess (1) if it belongs otherwise failure (0).
+ * success (true) if it belongs otherwise failure (false).
+ * Called with rcu_read_lock() held.
  */
-static int cxgb4_netdev(struct net_device *netdev)
+static bool cxgb4_netdev(const struct net_device *netdev)
 {
        struct adapter *adap;
        int i;
 
-       spin_lock(&adap_rcu_lock);
        list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
                for (i = 0; i < MAX_NPORTS; i++)
-                       if (adap->port[i] == netdev) {
-                               spin_unlock(&adap_rcu_lock);
-                               return 1;
-                       }
-       spin_unlock(&adap_rcu_lock);
-       return 0;
+                       if (adap->port[i] == netdev)
+                               return true;
+       return false;
 }
 
 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
@@ -6396,6 +6393,7 @@ static void remove_one(struct pci_dev *pdev)
                        adapter->flags &= ~DEV_ENABLED;
                }
                pci_release_regions(pdev);
+               synchronize_rcu();
                kfree(adapter);
        } else
                pci_release_regions(pdev);
index bba6768..931478e 100644 (file)
@@ -3962,6 +3962,7 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
                p->lport = j;
                p->rss_size = rss_size;
                memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
+               adap->port[i]->dev_port = j;
 
                ret = ntohl(c.u.info.lstatus_to_modtype);
                p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
index 768379b..523d9dd 100644 (file)
@@ -158,7 +158,7 @@ void comet_timer(unsigned long data)
 {
        struct net_device *dev = (struct net_device *)data;
        struct tulip_private *tp = netdev_priv(dev);
-       int next_tick = 60*HZ;
+       int next_tick = 2*HZ;
 
        if (tulip_debug > 1)
                netdev_dbg(dev, "Comet link status %04x partner capability %04x\n",
index 2e7c555..c2f5d2d 100644 (file)
@@ -557,9 +557,7 @@ static inline u16 be_max_qs(struct be_adapter *adapter)
 #define be_pvid_tagging_enabled(adapter)       (adapter->pvid)
 
 /* Is BE in QNQ multi-channel mode */
-#define be_is_qnq_mode(adapter)                (adapter->mc_type == FLEX10 ||  \
-                                        adapter->mc_type == vNIC1 ||   \
-                                        adapter->mc_type == UFP)
+#define be_is_qnq_mode(adapter)                (adapter->function_mode & QNQ_MODE)
 
 #define lancer_chip(adapter)   (adapter->pdev->device == OC_DEVICE_ID3 || \
                                 adapter->pdev->device == OC_DEVICE_ID4)
index 3e0a6b2..59b3c05 100644 (file)
@@ -1091,7 +1091,7 @@ struct be_cmd_resp_modify_eq_delay {
  * based on the skew/IPL.
  */
 #define RDMA_ENABLED                           0x4
-#define FLEX10_MODE                            0x400
+#define QNQ_MODE                               0x400
 #define VNIC_MODE                              0x20000
 #define UMC_ENABLED                            0x1000000
 struct be_cmd_req_query_fw_cfg {
index 6822b3d..1e187fb 100644 (file)
@@ -2902,7 +2902,7 @@ static int be_open(struct net_device *netdev)
        for_all_evt_queues(adapter, eqo, i) {
                napi_enable(&eqo->napi);
                be_enable_busy_poll(eqo);
-               be_eq_notify(adapter, eqo->q.id, true, false, 0);
+               be_eq_notify(adapter, eqo->q.id, true, true, 0);
        }
        adapter->flags |= BE_FLAGS_NAPI_ENABLED;
 
@@ -3254,9 +3254,9 @@ err:
 
 static u8 be_convert_mc_type(u32 function_mode)
 {
-       if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
+       if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
                return vNIC1;
-       else if (function_mode & FLEX10_MODE)
+       else if (function_mode & QNQ_MODE)
                return FLEX10;
        else if (function_mode & VNIC_MODE)
                return vNIC2;
index 38d9d27..77037fd 100644 (file)
@@ -320,6 +320,11 @@ static void *swap_buffer(void *bufaddr, int len)
        return bufaddr;
 }
 
+static inline bool is_ipv4_pkt(struct sk_buff *skb)
+{
+       return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
+}
+
 static int
 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
 {
@@ -330,7 +335,8 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
        if (unlikely(skb_cow_head(skb, 0)))
                return -1;
 
-       ip_hdr(skb)->check = 0;
+       if (is_ipv4_pkt(skb))
+               ip_hdr(skb)->check = 0;
        *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
 
        return 0;
index fab39e2..36fc429 100644 (file)
@@ -2990,11 +2990,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
        if (ug_info->rxExtendedFiltering) {
                size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
                if (ug_info->largestexternallookupkeysize ==
-                   QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+                   QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
                        size +=
                            THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
                if (ug_info->largestexternallookupkeysize ==
-                   QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+                   QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
                        size +=
                            THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
        }
index a2db388..ee74f95 100644 (file)
@@ -1481,6 +1481,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
        s32 ret_val;
        u16 i, rar_count = mac->rar_entry_count;
 
+       if ((hw->mac.type >= e1000_i210) &&
+           !(igb_get_flash_presence_i210(hw))) {
+               ret_val = igb_pll_workaround_i210(hw);
+               if (ret_val)
+                       return ret_val;
+       }
+
        /* Initialize identification LED */
        ret_val = igb_id_led_init(hw);
        if (ret_val) {
index 2a8bb35..217f813 100644 (file)
 #define E1000_CTRL_EXT_SDP3_DIR  0x00000800 /* SDP3 Data direction */
 
 /* Physical Func Reset Done Indication */
-#define E1000_CTRL_EXT_PFRSTD    0x00004000
-#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX  0x00400000
-#define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
-#define E1000_CTRL_EXT_LINK_MODE_GMII   0x00000000
-#define E1000_CTRL_EXT_EIAME          0x01000000
-#define E1000_CTRL_EXT_IRCA           0x00000001
+#define E1000_CTRL_EXT_PFRSTD  0x00004000
+#define E1000_CTRL_EXT_SDLPE   0X00040000  /* SerDes Low Power Enable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES   0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX   0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_GMII  0x00000000
+#define E1000_CTRL_EXT_EIAME   0x01000000
+#define E1000_CTRL_EXT_IRCA            0x00000001
 /* Interrupt delay cancellation */
 /* Driver loaded bit for FW */
 #define E1000_CTRL_EXT_DRV_LOAD       0x10000000
@@ -62,6 +63,7 @@
 /* packet buffer parity error detection enabled */
 /* descriptor FIFO parity error detection enable */
 #define E1000_CTRL_EXT_PBA_CLR         0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_PHYPDEN         0x00100000
 #define E1000_I2CCMD_REG_ADDR_SHIFT    16
 #define E1000_I2CCMD_PHY_ADDR_SHIFT    24
 #define E1000_I2CCMD_OPCODE_READ       0x08000000
index 89925e4..ce55ea5 100644 (file)
@@ -567,4 +567,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
 /* These functions must be implemented by drivers */
 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
 #endif /* _E1000_HW_H_ */
index 337161f..65d9316 100644 (file)
@@ -834,3 +834,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
        }
        return ret_val;
 }
+
+/**
+ * igb_pll_workaround_i210
+ * @hw: pointer to the HW structure
+ *
+ * Works around an errata in the PLL circuit where it occasionally
+ * provides the wrong clock frequency after power up.
+ **/
+s32 igb_pll_workaround_i210(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
+       u16 nvm_word, phy_word, pci_word, tmp_nvm;
+       int i;
+
+       /* Get and set needed register values */
+       wuc = rd32(E1000_WUC);
+       mdicnfg = rd32(E1000_MDICNFG);
+       reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
+       wr32(E1000_MDICNFG, reg_val);
+
+       /* Get data from NVM, or set default */
+       ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
+                                         &nvm_word);
+       if (ret_val)
+               nvm_word = E1000_INVM_DEFAULT_AL;
+       tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+       for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
+               /* check current state directly from internal PHY */
+               igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
+                                        E1000_PHY_PLL_FREQ_REG), &phy_word);
+               if ((phy_word & E1000_PHY_PLL_UNCONF)
+                   != E1000_PHY_PLL_UNCONF) {
+                       ret_val = 0;
+                       break;
+               } else {
+                       ret_val = -E1000_ERR_PHY;
+               }
+               /* directly reset the internal PHY */
+               ctrl = rd32(E1000_CTRL);
+               wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
+
+               ctrl_ext = rd32(E1000_CTRL_EXT);
+               ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
+               wr32(E1000_CTRL_EXT, ctrl_ext);
+
+               wr32(E1000_WUC, 0);
+               reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
+               wr32(E1000_EEARBC_I210, reg_val);
+
+               igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+               pci_word |= E1000_PCI_PMCSR_D3;
+               igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+               usleep_range(1000, 2000);
+               pci_word &= ~E1000_PCI_PMCSR_D3;
+               igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+               reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
+               wr32(E1000_EEARBC_I210, reg_val);
+
+               /* restore WUC register */
+               wr32(E1000_WUC, wuc);
+       }
+       /* restore MDICNFG setting */
+       wr32(E1000_MDICNFG, mdicnfg);
+       return ret_val;
+}
index 9f34976..3442b63 100644 (file)
@@ -33,6 +33,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
 s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
 s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
 bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_pll_workaround_i210(struct e1000_hw *hw);
 
 #define E1000_STM_OPCODE               0xDB00
 #define E1000_EEPROM_FLASH_SIZE_WORD   0x11
@@ -78,4 +79,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
 #define NVM_LED_1_CFG_DEFAULT_I211     0x0184
 #define NVM_LED_0_2_CFG_DEFAULT_I211   0x200C
 
+/* PLL Defines */
+#define E1000_PCI_PMCSR                        0x44
+#define E1000_PCI_PMCSR_D3             0x03
+#define E1000_MAX_PLL_TRIES            5
+#define E1000_PHY_PLL_UNCONF           0xFF
+#define E1000_PHY_PLL_FREQ_PAGE                0xFC0000
+#define E1000_PHY_PLL_FREQ_REG         0x000E
+#define E1000_INVM_DEFAULT_AL          0x202F
+#define E1000_INVM_AUTOLOAD            0x0A
+#define E1000_INVM_PLL_WO_VAL          0x0010
+
 #endif
index 1cc4b1a..f5ba4e4 100644 (file)
@@ -66,6 +66,7 @@
 #define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
 #define E1000_PBS      0x01008  /* Packet Buffer Size */
 #define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_EEARBC_I210 0x12024  /* EEPROM Auto Read Bus Control */
 #define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
 #define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
 #define E1000_FRTIMER  0x01048  /* Free Running Timer - RW */
index f145adb..a9537ba 100644 (file)
@@ -7215,6 +7215,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
        }
 }
 
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       struct igb_adapter *adapter = hw->back;
+
+       pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       struct igb_adapter *adapter = hw->back;
+
+       pci_write_config_word(adapter->pdev, reg, *value);
+}
+
 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
 {
        struct igb_adapter *adapter = hw->back;
@@ -7578,6 +7592,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
 
        if (netif_running(netdev))
                igb_close(netdev);
+       else
+               igb_reset(adapter);
 
        igb_clear_interrupt_scheme(adapter);
 
index 45beca1..dadd9a5 100644 (file)
@@ -1207,7 +1207,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
        command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
        command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
 
-       if (l3_proto == swab16(ETH_P_IP))
+       if (l3_proto == htons(ETH_P_IP))
                command |= MVNETA_TXD_IP_CSUM;
        else
                command |= MVNETA_TX_L3_IP6;
@@ -2529,7 +2529,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
 
                        if (phydev->speed == SPEED_1000)
                                val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
-                       else
+                       else if (phydev->speed == SPEED_100)
                                val |= MVNETA_GMAC_CONFIG_MII_SPEED;
 
                        mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
index 7f81ae6..e912b68 100644 (file)
@@ -4199,6 +4199,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
                },
        },
+       {
+               .ident = "FUJITSU SIEMENS A8NE-FM",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM")
+               },
+       },
        {}
 };
 
index 80f7252..56022d6 100644 (file)
@@ -294,8 +294,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
        init_completion(&cq->free);
 
        cq->irq = priv->eq_table.eq[cq->vector].irq;
-       cq->irq_affinity_change = false;
-
        return 0;
 
 err_radix:
index 4b21307..82322b1 100644 (file)
@@ -128,11 +128,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
                                        mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
                                                  name);
                                }
+
                        }
                } else {
                        cq->vector = (cq->ring + 1 + priv->port) %
                                mdev->dev->caps.num_comp_vectors;
                }
+
+               cq->irq_desc =
+                       irq_to_desc(mlx4_eq_get_irq(mdev->dev,
+                                                   cq->vector));
        } else {
                /* For TX we use the same irq per
                ring we assigned for the RX    */
@@ -187,8 +192,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
        mlx4_en_unmap_buffer(&cq->wqres.buf);
        mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
        if (priv->mdev->dev->caps.comp_pool && cq->vector) {
-               if (!cq->is_tx)
-                       irq_set_affinity_hint(cq->mcq.irq, NULL);
                mlx4_release_eq(priv->mdev->dev, cq->vector);
        }
        cq->vector = 0;
@@ -204,6 +207,7 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
        if (!cq->is_tx) {
                napi_hash_del(&cq->napi);
                synchronize_rcu();
+               irq_set_affinity_hint(cq->mcq.irq, NULL);
        }
        netif_napi_del(&cq->napi);
 
index fa1a069..68d763d 100644 (file)
@@ -417,6 +417,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
 
        coal->tx_coalesce_usecs = priv->tx_usecs;
        coal->tx_max_coalesced_frames = priv->tx_frames;
+       coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
+
        coal->rx_coalesce_usecs = priv->rx_usecs;
        coal->rx_max_coalesced_frames = priv->rx_frames;
 
@@ -426,6 +428,7 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
        coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
        coal->rate_sample_interval = priv->sample_interval;
        coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
+
        return 0;
 }
 
@@ -434,6 +437,9 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
+       if (!coal->tx_max_coalesced_frames_irq)
+               return -EINVAL;
+
        priv->rx_frames = (coal->rx_max_coalesced_frames ==
                           MLX4_EN_AUTO_CONF) ?
                                MLX4_EN_RX_COAL_TARGET :
@@ -457,6 +463,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
        priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
        priv->sample_interval = coal->rate_sample_interval;
        priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
+       priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
 
        return mlx4_en_moderation_update(priv);
 }
index 7d4fb7b..7345c43 100644 (file)
@@ -2336,7 +2336,7 @@ static void mlx4_en_add_vxlan_port(struct  net_device *dev,
        struct mlx4_en_priv *priv = netdev_priv(dev);
        __be16 current_port;
 
-       if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS))
+       if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
                return;
 
        if (sa_family == AF_INET6)
@@ -2473,6 +2473,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                        MLX4_WQE_CTRL_SOLICITED);
        priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
        priv->tx_ring_num = prof->tx_ring_num;
+       priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
 
        priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
                                GFP_KERNEL);
index d2d4157..5535862 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/vmalloc.h>
+#include <linux/irq.h>
 
 #include "mlx4_en.h"
 
@@ -782,6 +783,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                                                             PKT_HASH_TYPE_L3);
 
                                        skb_record_rx_queue(gro_skb, cq->ring);
+                                       skb_mark_napi_id(gro_skb, &cq->napi);
 
                                        if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
                                                timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -896,16 +898,25 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
 
        /* If we used up all the quota - we're probably not done yet... */
        if (done == budget) {
+               int cpu_curr;
+               const struct cpumask *aff;
+
                INC_PERF_COUNTER(priv->pstats.napi_quota);
-               if (unlikely(cq->mcq.irq_affinity_change)) {
-                       cq->mcq.irq_affinity_change = false;
+
+               cpu_curr = smp_processor_id();
+               aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
+
+               if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
+                       /* Current cpu is not according to smp_irq_affinity -
+                        * probably affinity changed. need to stop this NAPI
+                        * poll, and restart it on the right CPU
+                        */
                        napi_complete(napi);
                        mlx4_en_arm_cq(priv, cq);
                        return 0;
                }
        } else {
                /* Done for now */
-               cq->mcq.irq_affinity_change = false;
                napi_complete(napi);
                mlx4_en_arm_cq(priv, cq);
        }
index 8be7483..5045bab 100644 (file)
@@ -351,9 +351,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
        return cnt;
 }
 
-static int mlx4_en_process_tx_cq(struct net_device *dev,
-                                struct mlx4_en_cq *cq,
-                                int budget)
+static bool mlx4_en_process_tx_cq(struct net_device *dev,
+                                struct mlx4_en_cq *cq)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +371,10 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
        int factor = priv->cqe_factor;
        u64 timestamp = 0;
        int done = 0;
+       int budget = priv->tx_work_limit;
 
        if (!priv->port_up)
-               return 0;
+               return true;
 
        index = cons_index & size_mask;
        cqe = &buf[(index << factor) + factor];
@@ -447,7 +447,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
                netif_tx_wake_queue(ring->tx_queue);
                ring->wake_queue++;
        }
-       return done;
+       return done < budget;
 }
 
 void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -467,24 +467,16 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
        struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
        struct net_device *dev = cq->dev;
        struct mlx4_en_priv *priv = netdev_priv(dev);
-       int done;
+       int clean_complete;
 
-       done = mlx4_en_process_tx_cq(dev, cq, budget);
+       clean_complete = mlx4_en_process_tx_cq(dev, cq);
+       if (!clean_complete)
+               return budget;
 
-       /* If we used up all the quota - we're probably not done yet... */
-       if (done < budget) {
-               /* Done for now */
-               cq->mcq.irq_affinity_change = false;
-               napi_complete(napi);
-               mlx4_en_arm_cq(priv, cq);
-               return done;
-       } else if (unlikely(cq->mcq.irq_affinity_change)) {
-               cq->mcq.irq_affinity_change = false;
-               napi_complete(napi);
-               mlx4_en_arm_cq(priv, cq);
-               return 0;
-       }
-       return budget;
+       napi_complete(napi);
+       mlx4_en_arm_cq(priv, cq);
+
+       return 0;
 }
 
 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
index d954ec1..2a004b3 100644 (file)
@@ -53,11 +53,6 @@ enum {
        MLX4_EQ_ENTRY_SIZE      = 0x20
 };
 
-struct mlx4_irq_notify {
-       void *arg;
-       struct irq_affinity_notify notify;
-};
-
 #define MLX4_EQ_STATUS_OK         ( 0 << 28)
 #define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
 #define MLX4_EQ_OWNER_SW          ( 0 << 24)
@@ -1088,57 +1083,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
        iounmap(priv->clr_base);
 }
 
-static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
-                                    const cpumask_t *mask)
-{
-       struct mlx4_irq_notify *n = container_of(notify,
-                                                struct mlx4_irq_notify,
-                                                notify);
-       struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
-       struct radix_tree_iter iter;
-       void **slot;
-
-       radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
-               struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
-
-               if (cq->irq == notify->irq)
-                       cq->irq_affinity_change = true;
-       }
-}
-
-static void mlx4_release_irq_notifier(struct kref *ref)
-{
-       struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
-                                                notify.kref);
-       kfree(n);
-}
-
-static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
-                                    struct mlx4_dev *dev, int irq)
-{
-       struct mlx4_irq_notify *irq_notifier = NULL;
-       int err = 0;
-
-       irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
-       if (!irq_notifier) {
-               mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
-                         irq);
-               return;
-       }
-
-       irq_notifier->notify.irq = irq;
-       irq_notifier->notify.notify = mlx4_irq_notifier_notify;
-       irq_notifier->notify.release = mlx4_release_irq_notifier;
-       irq_notifier->arg = priv;
-       err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
-       if (err) {
-               kfree(irq_notifier);
-               irq_notifier = NULL;
-               mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
-       }
-}
-
-
 int mlx4_alloc_eq_table(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1409,8 +1353,6 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
                                continue;
                                /*we dont want to break here*/
                        }
-                       mlx4_assign_irq_notifier(priv, dev,
-                                                priv->eq_table.eq[vec].irq);
 
                        eq_set_ci(&priv->eq_table.eq[vec], 1);
                }
@@ -1427,6 +1369,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
 }
 EXPORT_SYMBOL(mlx4_assign_eq);
 
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       return priv->eq_table.eq[vec].irq;
+}
+EXPORT_SYMBOL(mlx4_eq_get_irq);
+
 void mlx4_release_eq(struct mlx4_dev *dev, int vec)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1438,9 +1388,6 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
                  Belonging to a legacy EQ*/
                mutex_lock(&priv->msix_ctl.pool_lock);
                if (priv->msix_ctl.pool_bm & 1ULL << i) {
-                       irq_set_affinity_notifier(
-                               priv->eq_table.eq[vec].irq,
-                               NULL);
                        free_irq(priv->eq_table.eq[vec].irq,
                                 &priv->eq_table.eq[vec]);
                        priv->msix_ctl.pool_bm &= ~(1ULL << i);
index 5f42f6d..82ab427 100644 (file)
@@ -2439,7 +2439,8 @@ slave_start:
                            (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
                                mlx4_err(dev,
                                         "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
-                               goto err_close;
+                               err = -EINVAL;
+                               goto err_master_mfunc;
                        }
                        for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
                                unsigned j;
index 0e15295..d72a5a8 100644 (file)
@@ -126,6 +126,8 @@ enum {
 #define MAX_TX_RINGS                   (MLX4_EN_MAX_TX_RING_P_UP * \
                                         MLX4_EN_NUM_UP)
 
+#define MLX4_EN_DEFAULT_TX_WORK                256
+
 /* Target number of packets to coalesce with interrupt moderation */
 #define MLX4_EN_RX_COAL_TARGET 44
 #define MLX4_EN_RX_COAL_TIME   0x10
@@ -343,6 +345,7 @@ struct mlx4_en_cq {
 #define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
        spinlock_t poll_lock; /* protects from LLS/napi conflicts */
 #endif  /* CONFIG_NET_RX_BUSY_POLL */
+       struct irq_desc *irq_desc;
 };
 
 struct mlx4_en_port_profile {
@@ -542,6 +545,7 @@ struct mlx4_en_priv {
        __be32 ctrl_flags;
        u32 flags;
        u8 num_tx_rings_p_up;
+       u32 tx_work_limit;
        u32 tx_ring_num;
        u32 rx_ring_num;
        u32 rx_skb_size;
index ba0401d..184c361 100644 (file)
@@ -94,6 +94,11 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
        write_lock_irq(&table->lock);
        err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
        write_unlock_irq(&table->lock);
+       if (err) {
+               mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
+                              mlx5_base_mkey(mr->key), err);
+               mlx5_core_destroy_mkey(dev, mr);
+       }
 
        return err;
 }
@@ -104,12 +109,22 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
        struct mlx5_mr_table *table = &dev->priv.mr_table;
        struct mlx5_destroy_mkey_mbox_in in;
        struct mlx5_destroy_mkey_mbox_out out;
+       struct mlx5_core_mr *deleted_mr;
        unsigned long flags;
        int err;
 
        memset(&in, 0, sizeof(in));
        memset(&out, 0, sizeof(out));
 
+       write_lock_irqsave(&table->lock, flags);
+       deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
+       write_unlock_irqrestore(&table->lock, flags);
+       if (!deleted_mr) {
+               mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n",
+                              mlx5_base_mkey(mr->key));
+               return -ENOENT;
+       }
+
        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
        in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
@@ -119,10 +134,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
        if (out.hdr.status)
                return mlx5_cmd_status_to_err(&out.hdr);
 
-       write_lock_irqsave(&table->lock, flags);
-       radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
-       write_unlock_irqrestore(&table->lock, flags);
-
        return err;
 }
 EXPORT_SYMBOL(mlx5_core_destroy_mkey);
index be425ad..61623e9 100644 (file)
@@ -538,6 +538,7 @@ enum rtl_register_content {
        MagicPacket     = (1 << 5),     /* Wake up when receives a Magic Packet */
        LinkUp          = (1 << 4),     /* Wake up when the cable connection is re-established */
        Jumbo_En0       = (1 << 2),     /* 8168 only. Reserved in the 8168b */
+       Rdy_to_L23      = (1 << 1),     /* L23 Enable */
        Beacon_en       = (1 << 0),     /* 8168 only. Reserved in the 8168b */
 
        /* Config4 register */
@@ -4239,6 +4240,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
                RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
                break;
        case RTL_GIGA_MAC_VER_40:
+               RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+               break;
        case RTL_GIGA_MAC_VER_41:
        case RTL_GIGA_MAC_VER_42:
        case RTL_GIGA_MAC_VER_43:
@@ -4897,6 +4900,21 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
                                 PCI_EXP_LNKCTL_CLKREQ_EN);
 }
 
+static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+       u8 data;
+
+       data = RTL_R8(Config3);
+
+       if (enable)
+               data |= Rdy_to_L23;
+       else
+               data &= ~Rdy_to_L23;
+
+       RTL_W8(Config3, data);
+}
+
 #define R8168_CPCMD_QUIRK_MASK (\
        EnableBist | \
        Mac_dbgo_oe | \
@@ -5246,6 +5264,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
        };
 
        rtl_hw_start_8168f(tp);
+       rtl_pcie_state_l2l3_enable(tp, false);
 
        rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
 
@@ -5284,6 +5303,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
 
        rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
        rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
+
+       rtl_pcie_state_l2l3_enable(tp, false);
 }
 
 static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5536,6 +5557,8 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
        RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
 
        rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+
+       rtl_pcie_state_l2l3_enable(tp, false);
 }
 
 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
@@ -5571,6 +5594,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
        rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
        rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
+
+       rtl_pcie_state_l2l3_enable(tp, false);
 }
 
 static void rtl_hw_start_8106(struct rtl8169_private *tp)
@@ -5583,6 +5608,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
        RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
        RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
        RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
+
+       rtl_pcie_state_l2l3_enable(tp, false);
 }
 
 static void rtl_hw_start_8101(struct net_device *dev)
index b3e148e..9d37483 100644 (file)
@@ -320,11 +320,8 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
 
 static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
 {
-       u32 value;
-
-       value = readl(ioaddr + GMAC_AN_CTRL);
        /* auto negotiation enable and External Loopback enable */
-       value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
+       u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
 
        if (restart)
                value |= GMAC_AN_CTRL_RAN;
index 7e6628a..1e2bcf5 100644 (file)
@@ -145,7 +145,7 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
                        x->rx_msg_type_delay_req++;
                else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
                        x->rx_msg_type_delay_resp++;
-               else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
+               else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ)
                        x->rx_msg_type_pdelay_req++;
                else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
                        x->rx_msg_type_pdelay_resp++;
index 1c24a8f..d813bfb 100644 (file)
@@ -610,6 +610,13 @@ static int __vnet_tx_trigger(struct vnet_port *port)
        return err;
 }
 
+static inline bool port_is_up(struct vnet_port *vnet)
+{
+       struct vio_driver_state *vio = &vnet->vio;
+
+       return !!(vio->hs_state & VIO_HS_COMPLETE);
+}
+
 struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
 {
        unsigned int hash = vnet_hashfn(skb->data);
@@ -617,14 +624,19 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
        struct vnet_port *port;
 
        hlist_for_each_entry(port, hp, hash) {
+               if (!port_is_up(port))
+                       continue;
                if (ether_addr_equal(port->raddr, skb->data))
                        return port;
        }
-       port = NULL;
-       if (!list_empty(&vp->port_list))
-               port = list_entry(vp->port_list.next, struct vnet_port, list);
-
-       return port;
+       list_for_each_entry(port, &vp->port_list, list) {
+               if (!port->switch_port)
+                       continue;
+               if (!port_is_up(port))
+                       continue;
+               return port;
+       }
+       return NULL;
 }
 
 struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
@@ -1083,6 +1095,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
        return vp;
 }
 
+static void vnet_cleanup(void)
+{
+       struct vnet *vp;
+       struct net_device *dev;
+
+       mutex_lock(&vnet_list_mutex);
+       while (!list_empty(&vnet_list)) {
+               vp = list_first_entry(&vnet_list, struct vnet, list);
+               list_del(&vp->list);
+               dev = vp->dev;
+               /* vio_unregister_driver() should have cleaned up port_list */
+               BUG_ON(!list_empty(&vp->port_list));
+               unregister_netdev(dev);
+               free_netdev(dev);
+       }
+       mutex_unlock(&vnet_list_mutex);
+}
+
 static const char *local_mac_prop = "local-mac-address";
 
 static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
@@ -1240,7 +1270,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
 
                kfree(port);
 
-               unregister_netdev(vp->dev);
        }
        return 0;
 }
@@ -1268,6 +1297,7 @@ static int __init vnet_init(void)
 static void __exit vnet_exit(void)
 {
        vio_unregister_driver(&vnet_port_driver);
+       vnet_cleanup();
 }
 
 module_init(vnet_init);
index ff380da..b988d16 100644 (file)
@@ -1212,7 +1212,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
        for_each_slave(priv, cpsw_slave_open, priv);
 
        /* Add default VLAN */
-       cpsw_add_default_vlan(priv);
+       if (!priv->data.dual_emac)
+               cpsw_add_default_vlan(priv);
+       else
+               cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
+                                 ALE_ALL_PORTS << priv->host_port,
+                                 ALE_ALL_PORTS << priv->host_port, 0, 0);
 
        if (!cpsw_common_res_usage_state(priv)) {
                /* setup tx dma to fixed prio and zero offset */
index 14389f8..4c70360 100644 (file)
@@ -2191,7 +2191,6 @@ static void tile_net_setup(struct net_device *dev)
 static void tile_net_dev_init(const char *name, const uint8_t *mac)
 {
        int ret;
-       int i;
        struct net_device *dev;
        struct tile_net_priv *priv;
 
index eb78203..2aa5727 100644 (file)
@@ -291,7 +291,11 @@ static int         dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
 
 static int             dfx_rcv_init(DFX_board_t *bp, int get_buffers);
 static void            dfx_rcv_queue_process(DFX_board_t *bp);
+#ifdef DYNAMIC_BUFFERS
 static void            dfx_rcv_flush(DFX_board_t *bp);
+#else
+static inline void     dfx_rcv_flush(DFX_board_t *bp) {}
+#endif
 
 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
                                     struct net_device *dev);
@@ -2849,7 +2853,7 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
  *     Align an sk_buff to a boundary power of 2
  *
  */
-
+#ifdef DYNAMIC_BUFFERS
 static void my_skb_align(struct sk_buff *skb, int n)
 {
        unsigned long x = (unsigned long)skb->data;
@@ -2859,7 +2863,7 @@ static void my_skb_align(struct sk_buff *skb, int n)
 
        skb_reserve(skb, v - x);
 }
-
+#endif
 
 /*
  * ================
@@ -3074,10 +3078,7 @@ static void dfx_rcv_queue_process(
                                        break;
                                        }
                                else {
-#ifndef DYNAMIC_BUFFERS
-                                       if (! rx_in_place)
-#endif
-                                       {
+                                       if (!rx_in_place) {
                                                /* Receive buffer allocated, pass receive packet up */
 
                                                skb_copy_to_linear_data(skb,
@@ -3453,10 +3454,6 @@ static void dfx_rcv_flush( DFX_board_t *bp )
                }
 
        }
-#else
-static inline void dfx_rcv_flush( DFX_board_t *bp )
-{
-}
 #endif /* DYNAMIC_BUFFERS */
 
 /*
index c041f63..d97d5f3 100644 (file)
@@ -189,7 +189,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
                                   "unable to teardown send buffer's gpadl\n");
                        return ret;
                }
-               net_device->recv_buf_gpadl_handle = 0;
+               net_device->send_buf_gpadl_handle = 0;
        }
        if (net_device->send_buf) {
                /* Free up the receive buffer */
@@ -378,8 +378,10 @@ static int netvsc_init_buf(struct hv_device *device)
 
        net_device->send_section_map =
                kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
-       if (net_device->send_section_map == NULL)
+       if (net_device->send_section_map == NULL) {
+               ret = -ENOMEM;
                goto cleanup;
+       }
 
        goto exit;
 
index 4517b14..5089941 100644 (file)
@@ -1137,6 +1137,8 @@ static int at86rf230_probe(struct spi_device *spi)
        dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
 
        irq_type = irq_get_trigger_type(spi->irq);
+       if (!irq_type)
+               irq_type = IRQF_TRIGGER_RISING;
        if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
                irq_worker = at86rf230_irqwork;
                irq_handler = at86rf230_isr;
@@ -1168,7 +1170,8 @@ static int at86rf230_probe(struct spi_device *spi)
        if (rc)
                goto err_hw_init;
 
-       rc = devm_request_irq(&spi->dev, spi->irq, irq_handler, IRQF_SHARED,
+       rc = devm_request_irq(&spi->dev, spi->irq, irq_handler,
+                             IRQF_SHARED | irq_type,
                              dev_name(&spi->dev), lp);
        if (rc)
                goto err_hw_init;
index 6c622ae..fdc1b41 100644 (file)
 #include <linux/string.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
 
 #define AT803X_INTR_ENABLE                     0x12
 #define AT803X_INTR_STATUS                     0x13
+#define AT803X_SMART_SPEED                     0x14
+#define AT803X_LED_CONTROL                     0x18
 #define AT803X_WOL_ENABLE                      0x01
 #define AT803X_DEVICE_ADDR                     0x03
 #define AT803X_LOC_MAC_ADDR_0_15_OFFSET                0x804C
 #define AT803X_DEBUG_SYSTEM_MODE_CTRL          0x05
 #define AT803X_DEBUG_RGMII_TX_CLK_DLY          BIT(8)
 
+#define ATH8030_PHY_ID 0x004dd076
+#define ATH8031_PHY_ID 0x004dd074
+#define ATH8035_PHY_ID 0x004dd072
+
 MODULE_DESCRIPTION("Atheros 803x PHY driver");
 MODULE_AUTHOR("Matus Ujhelyi");
 MODULE_LICENSE("GPL");
 
+struct at803x_priv {
+       bool phy_reset:1;
+       struct gpio_desc *gpiod_reset;
+};
+
+struct at803x_context {
+       u16 bmcr;
+       u16 advertise;
+       u16 control1000;
+       u16 int_enable;
+       u16 smart_speed;
+       u16 led_control;
+};
+
+/* save relevant PHY registers to private copy */
+static void at803x_context_save(struct phy_device *phydev,
+                               struct at803x_context *context)
+{
+       context->bmcr = phy_read(phydev, MII_BMCR);
+       context->advertise = phy_read(phydev, MII_ADVERTISE);
+       context->control1000 = phy_read(phydev, MII_CTRL1000);
+       context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
+       context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
+       context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
+}
+
+/* restore relevant PHY registers from private copy */
+static void at803x_context_restore(struct phy_device *phydev,
+                                  const struct at803x_context *context)
+{
+       phy_write(phydev, MII_BMCR, context->bmcr);
+       phy_write(phydev, MII_ADVERTISE, context->advertise);
+       phy_write(phydev, MII_CTRL1000, context->control1000);
+       phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
+       phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
+       phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
+}
+
 static int at803x_set_wol(struct phy_device *phydev,
                          struct ethtool_wolinfo *wol)
 {
@@ -142,6 +188,26 @@ static int at803x_resume(struct phy_device *phydev)
        return 0;
 }
 
+static int at803x_probe(struct phy_device *phydev)
+{
+       struct device *dev = &phydev->dev;
+       struct at803x_priv *priv;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->gpiod_reset = devm_gpiod_get(dev, "reset");
+       if (IS_ERR(priv->gpiod_reset))
+               priv->gpiod_reset = NULL;
+       else
+               gpiod_direction_output(priv->gpiod_reset, 1);
+
+       phydev->priv = priv;
+
+       return 0;
+}
+
 static int at803x_config_init(struct phy_device *phydev)
 {
        int ret;
@@ -189,58 +255,99 @@ static int at803x_config_intr(struct phy_device *phydev)
        return err;
 }
 
+static void at803x_link_change_notify(struct phy_device *phydev)
+{
+       struct at803x_priv *priv = phydev->priv;
+
+       /*
+        * Conduct a hardware reset for AT8030 every time a link loss is
+        * signalled. This is necessary to circumvent a hardware bug that
+        * occurs when the cable is unplugged while TX packets are pending
+        * in the FIFO. In such cases, the FIFO enters an error mode it
+        * cannot recover from by software.
+        */
+       if (phydev->drv->phy_id == ATH8030_PHY_ID) {
+               if (phydev->state == PHY_NOLINK) {
+                       if (priv->gpiod_reset && !priv->phy_reset) {
+                               struct at803x_context context;
+
+                               at803x_context_save(phydev, &context);
+
+                               gpiod_set_value(priv->gpiod_reset, 0);
+                               msleep(1);
+                               gpiod_set_value(priv->gpiod_reset, 1);
+                               msleep(1);
+
+                               at803x_context_restore(phydev, &context);
+
+                               dev_dbg(&phydev->dev, "%s(): phy was reset\n",
+                                       __func__);
+                               priv->phy_reset = true;
+                       }
+               } else {
+                       priv->phy_reset = false;
+               }
+       }
+}
+
 static struct phy_driver at803x_driver[] = {
 {
        /* ATHEROS 8035 */
-       .phy_id         = 0x004dd072,
-       .name           = "Atheros 8035 ethernet",
-       .phy_id_mask    = 0xffffffef,
-       .config_init    = at803x_config_init,
-       .set_wol        = at803x_set_wol,
-       .get_wol        = at803x_get_wol,
-       .suspend        = at803x_suspend,
-       .resume         = at803x_resume,
-       .features       = PHY_GBIT_FEATURES,
-       .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = genphy_config_aneg,
-       .read_status    = genphy_read_status,
-       .driver         = {
+       .phy_id                 = ATH8035_PHY_ID,
+       .name                   = "Atheros 8035 ethernet",
+       .phy_id_mask            = 0xffffffef,
+       .probe                  = at803x_probe,
+       .config_init            = at803x_config_init,
+       .link_change_notify     = at803x_link_change_notify,
+       .set_wol                = at803x_set_wol,
+       .get_wol                = at803x_get_wol,
+       .suspend                = at803x_suspend,
+       .resume                 = at803x_resume,
+       .features               = PHY_GBIT_FEATURES,
+       .flags                  = PHY_HAS_INTERRUPT,
+       .config_aneg            = genphy_config_aneg,
+       .read_status            = genphy_read_status,
+       .driver                 = {
                .owner = THIS_MODULE,
        },
 }, {
        /* ATHEROS 8030 */
-       .phy_id         = 0x004dd076,
-       .name           = "Atheros 8030 ethernet",
-       .phy_id_mask    = 0xffffffef,
-       .config_init    = at803x_config_init,
-       .set_wol        = at803x_set_wol,
-       .get_wol        = at803x_get_wol,
-       .suspend        = at803x_suspend,
-       .resume         = at803x_resume,
-       .features       = PHY_GBIT_FEATURES,
-       .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = genphy_config_aneg,
-       .read_status    = genphy_read_status,
-       .driver         = {
+       .phy_id                 = ATH8030_PHY_ID,
+       .name                   = "Atheros 8030 ethernet",
+       .phy_id_mask            = 0xffffffef,
+       .probe                  = at803x_probe,
+       .config_init            = at803x_config_init,
+       .link_change_notify     = at803x_link_change_notify,
+       .set_wol                = at803x_set_wol,
+       .get_wol                = at803x_get_wol,
+       .suspend                = at803x_suspend,
+       .resume                 = at803x_resume,
+       .features               = PHY_GBIT_FEATURES,
+       .flags                  = PHY_HAS_INTERRUPT,
+       .config_aneg            = genphy_config_aneg,
+       .read_status            = genphy_read_status,
+       .driver                 = {
                .owner = THIS_MODULE,
        },
 }, {
        /* ATHEROS 8031 */
-       .phy_id         = 0x004dd074,
-       .name           = "Atheros 8031 ethernet",
-       .phy_id_mask    = 0xffffffef,
-       .config_init    = at803x_config_init,
-       .set_wol        = at803x_set_wol,
-       .get_wol        = at803x_get_wol,
-       .suspend        = at803x_suspend,
-       .resume         = at803x_resume,
-       .features       = PHY_GBIT_FEATURES,
-       .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = genphy_config_aneg,
-       .read_status    = genphy_read_status,
-       .ack_interrupt  = &at803x_ack_interrupt,
-       .config_intr    = &at803x_config_intr,
-       .driver         = {
+       .phy_id                 = ATH8031_PHY_ID,
+       .name                   = "Atheros 8031 ethernet",
+       .phy_id_mask            = 0xffffffef,
+       .probe                  = at803x_probe,
+       .config_init            = at803x_config_init,
+       .link_change_notify     = at803x_link_change_notify,
+       .set_wol                = at803x_set_wol,
+       .get_wol                = at803x_get_wol,
+       .suspend                = at803x_suspend,
+       .resume                 = at803x_resume,
+       .features               = PHY_GBIT_FEATURES,
+       .flags                  = PHY_HAS_INTERRUPT,
+       .config_aneg            = genphy_config_aneg,
+       .read_status            = genphy_read_status,
+       .ack_interrupt          = &at803x_ack_interrupt,
+       .config_intr            = &at803x_config_intr,
+       .driver                 = {
                .owner = THIS_MODULE,
        },
 } };
@@ -260,9 +367,9 @@ module_init(atheros_init);
 module_exit(atheros_exit);
 
 static struct mdio_device_id __maybe_unused atheros_tbl[] = {
-       { 0x004dd076, 0xffffffef },
-       { 0x004dd074, 0xffffffef },
-       { 0x004dd072, 0xffffffef },
+       { ATH8030_PHY_ID, 0xffffffef },
+       { ATH8031_PHY_ID, 0xffffffef },
+       { ATH8035_PHY_ID, 0xffffffef },
        { }
 };
 
index 6a999e6..9408157 100644 (file)
@@ -1323,15 +1323,15 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
 {
        struct dp83640_private *dp83640 = phydev->priv;
 
-       if (!dp83640->hwts_rx_en)
-               return false;
-
        if (is_status_frame(skb, type)) {
                decode_status_frame(dp83640, skb);
                kfree_skb(skb);
                return true;
        }
 
+       if (!dp83640->hwts_rx_en)
+               return false;
+
        SKB_PTP_TYPE(skb) = type;
        skb_queue_tail(&dp83640->rx_queue, skb);
        schedule_work(&dp83640->ts_work);
index 2e58aa5..203651e 100644 (file)
@@ -187,6 +187,50 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
        return d ? to_mii_bus(d) : NULL;
 }
 EXPORT_SYMBOL(of_mdio_find_bus);
+
+/* Walk the list of subnodes of a mdio bus and look for a node that matches the
+ * phy's address with its 'reg' property. If found, set the of_node pointer for
+ * the phy. This allows auto-probed pyh devices to be supplied with information
+ * passed in via DT.
+ */
+static void of_mdiobus_link_phydev(struct mii_bus *mdio,
+                                  struct phy_device *phydev)
+{
+       struct device *dev = &phydev->dev;
+       struct device_node *child;
+
+       if (dev->of_node || !mdio->dev.of_node)
+               return;
+
+       for_each_available_child_of_node(mdio->dev.of_node, child) {
+               int addr;
+               int ret;
+
+               ret = of_property_read_u32(child, "reg", &addr);
+               if (ret < 0) {
+                       dev_err(dev, "%s has invalid PHY address\n",
+                               child->full_name);
+                       continue;
+               }
+
+               /* A PHY must have a reg property in the range [0-31] */
+               if (addr >= PHY_MAX_ADDR) {
+                       dev_err(dev, "%s PHY address %i is too large\n",
+                               child->full_name, addr);
+                       continue;
+               }
+
+               if (addr == phydev->addr) {
+                       dev->of_node = child;
+                       return;
+               }
+       }
+}
+#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
+static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
+                                         struct phy_device *phydev)
+{
+}
 #endif
 
 /**
@@ -211,6 +255,7 @@ int mdiobus_register(struct mii_bus *bus)
 
        bus->dev.parent = bus->parent;
        bus->dev.class = &mdio_bus_class;
+       bus->dev.driver = bus->parent->driver;
        bus->dev.groups = NULL;
        dev_set_name(&bus->dev, "%s", bus->id);
 
index 3bc079a..f7c6181 100644 (file)
@@ -720,6 +720,9 @@ void phy_state_machine(struct work_struct *work)
 
        mutex_lock(&phydev->lock);
 
+       if (phydev->drv->link_change_notify)
+               phydev->drv->link_change_notify(phydev);
+
        switch (phydev->state) {
        case PHY_DOWN:
        case PHY_STARTING:
index 35d753d..22c57be 100644 (file)
@@ -355,7 +355,7 @@ int phy_device_register(struct phy_device *phydev)
        phydev->bus->phy_map[phydev->addr] = phydev;
 
        /* Run all of the fixups for this PHY */
-       err = phy_init_hw(phydev);
+       err = phy_scan_fixups(phydev);
        if (err) {
                pr_err("PHY %d failed to initialize\n", phydev->addr);
                goto out;
@@ -575,6 +575,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
                      u32 flags, phy_interface_t interface)
 {
        struct device *d = &phydev->dev;
+       struct module *bus_module;
        int err;
 
        /* Assume that if there is no driver, that it doesn't
@@ -599,6 +600,14 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
                return -EBUSY;
        }
 
+       /* Increment the bus module reference count */
+       bus_module = phydev->bus->dev.driver ?
+                    phydev->bus->dev.driver->owner : NULL;
+       if (!try_module_get(bus_module)) {
+               dev_err(&dev->dev, "failed to get the bus module\n");
+               return -EIO;
+       }
+
        phydev->attached_dev = dev;
        dev->phydev = phydev;
 
@@ -664,6 +673,10 @@ EXPORT_SYMBOL(phy_attach);
 void phy_detach(struct phy_device *phydev)
 {
        int i;
+
+       if (phydev->bus->dev.driver)
+               module_put(phydev->bus->dev.driver->owner);
+
        phydev->attached_dev->phydev = NULL;
        phydev->attached_dev = NULL;
        phy_suspend(phydev);
index 91d6c12..d5b77ef 100644 (file)
@@ -539,7 +539,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
 {
        struct sock_fprog uprog;
        struct sock_filter *code = NULL;
-       int len, err;
+       int len;
 
        if (copy_from_user(&uprog, arg, sizeof(uprog)))
                return -EFAULT;
@@ -554,12 +554,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
        if (IS_ERR(code))
                return PTR_ERR(code);
 
-       err = sk_chk_filter(code, uprog.len);
-       if (err) {
-               kfree(code);
-               return err;
-       }
-
        *p = code;
        return uprog.len;
 }
@@ -763,10 +757,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        };
 
                        ppp_lock(ppp);
-                       if (ppp->pass_filter)
+                       if (ppp->pass_filter) {
                                sk_unattached_filter_destroy(ppp->pass_filter);
-                       err = sk_unattached_filter_create(&ppp->pass_filter,
-                                                         &fprog);
+                               ppp->pass_filter = NULL;
+                       }
+                       if (fprog.filter != NULL)
+                               err = sk_unattached_filter_create(&ppp->pass_filter,
+                                                                 &fprog);
+                       else
+                               err = 0;
                        kfree(code);
                        ppp_unlock(ppp);
                }
@@ -784,10 +783,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        };
 
                        ppp_lock(ppp);
-                       if (ppp->active_filter)
+                       if (ppp->active_filter) {
                                sk_unattached_filter_destroy(ppp->active_filter);
-                       err = sk_unattached_filter_create(&ppp->active_filter,
-                                                         &fprog);
+                               ppp->active_filter = NULL;
+                       }
+                       if (fprog.filter != NULL)
+                               err = sk_unattached_filter_create(&ppp->active_filter,
+                                                                 &fprog);
+                       else
+                               err = 0;
                        kfree(code);
                        ppp_unlock(ppp);
                }
index 2ea7efd..6c9c16d 100644 (file)
@@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
                po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
                                   dev->hard_header_len);
 
-               po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
+               po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
                po->chan.private = sk;
                po->chan.ops = &pppoe_chan_ops;
 
index ad4a94e..8752644 100644 (file)
@@ -83,6 +83,7 @@
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/workqueue.h>
 #include "slip.h"
 #ifdef CONFIG_INET
 #include <linux/ip.h>
@@ -416,36 +417,46 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
 #endif
 }
 
-/*
- * Called by the driver when there's room for more data.  If we have
- * more packets to send, we send them here.
- */
-static void slip_write_wakeup(struct tty_struct *tty)
+/* Write out any remaining transmit buffer. Scheduled when tty is writable */
+static void slip_transmit(struct work_struct *work)
 {
+       struct slip *sl = container_of(work, struct slip, tx_work);
        int actual;
-       struct slip *sl = tty->disc_data;
 
+       spin_lock_bh(&sl->lock);
        /* First make sure we're connected. */
-       if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
+       if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
+               spin_unlock_bh(&sl->lock);
                return;
+       }
 
-       spin_lock_bh(&sl->lock);
        if (sl->xleft <= 0)  {
                /* Now serial buffer is almost free & we can start
                 * transmission of another packet */
                sl->dev->stats.tx_packets++;
-               clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+               clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
                spin_unlock_bh(&sl->lock);
                sl_unlock(sl);
                return;
        }
 
-       actual = tty->ops->write(tty, sl->xhead, sl->xleft);
+       actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
        sl->xleft -= actual;
        sl->xhead += actual;
        spin_unlock_bh(&sl->lock);
 }
 
+/*
+ * Called by the driver when there's room for more data.
+ * Schedule the transmit.
+ */
+static void slip_write_wakeup(struct tty_struct *tty)
+{
+       struct slip *sl = tty->disc_data;
+
+       schedule_work(&sl->tx_work);
+}
+
 static void sl_tx_timeout(struct net_device *dev)
 {
        struct slip *sl = netdev_priv(dev);
@@ -749,6 +760,7 @@ static struct slip *sl_alloc(dev_t line)
        sl->magic       = SLIP_MAGIC;
        sl->dev         = dev;
        spin_lock_init(&sl->lock);
+       INIT_WORK(&sl->tx_work, slip_transmit);
        sl->mode        = SL_MODE_DEFAULT;
 #ifdef CONFIG_SLIP_SMART
        /* initialize timer_list struct */
@@ -872,8 +884,12 @@ static void slip_close(struct tty_struct *tty)
        if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
                return;
 
+       spin_lock_bh(&sl->lock);
        tty->disc_data = NULL;
        sl->tty = NULL;
+       spin_unlock_bh(&sl->lock);
+
+       flush_work(&sl->tx_work);
 
        /* VSV = very important to remove timers */
 #ifdef CONFIG_SLIP_SMART
index 67673cf..cf32aad 100644 (file)
@@ -53,6 +53,7 @@ struct slip {
   struct tty_struct    *tty;           /* ptr to TTY structure         */
   struct net_device    *dev;           /* easy for intr handling       */
   spinlock_t           lock;
+  struct work_struct   tx_work;        /* Flushes transmit buffer      */
 
 #ifdef SL_INCLUDE_CSLIP
   struct slcompress    *slcomp;        /* for header compression       */
index 9ea4bfe..2a32d91 100644 (file)
@@ -341,6 +341,22 @@ next_desc:
                usb_driver_release_interface(driver, info->data);
                return -ENODEV;
        }
+
+       /* Some devices don't initialise properly. In particular
+        * the packet filter is not reset. There are devices that
+        * don't do reset all the way. So the packet filter should
+        * be set to a sane initial value.
+        */
+       usb_control_msg(dev->udev,
+                       usb_sndctrlpipe(dev->udev, 0),
+                       USB_CDC_SET_ETHERNET_PACKET_FILTER,
+                       USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+                       USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
+                       intf->cur_altsetting->desc.bInterfaceNumber,
+                       NULL,
+                       0,
+                       USB_CTRL_SET_TIMEOUT
+               );
        return 0;
 
 bad_desc:
index a3a0586..a4272ed 100644 (file)
@@ -258,10 +258,8 @@ struct hso_serial {
         * so as not to drop characters on the floor.
         */
        int  curr_rx_urb_idx;
-       u16  curr_rx_urb_offset;
        u8   rx_urb_filled[MAX_RX_URBS];
        struct tasklet_struct unthrottle_tasklet;
-       struct work_struct    retry_unthrottle_workqueue;
 };
 
 struct hso_device {
@@ -1252,14 +1250,6 @@ static   void hso_unthrottle(struct tty_struct *tty)
        tasklet_hi_schedule(&serial->unthrottle_tasklet);
 }
 
-static void hso_unthrottle_workfunc(struct work_struct *work)
-{
-       struct hso_serial *serial =
-           container_of(work, struct hso_serial,
-                        retry_unthrottle_workqueue);
-       hso_unthrottle_tasklet(serial);
-}
-
 /* open the requested serial port */
 static int hso_serial_open(struct tty_struct *tty, struct file *filp)
 {
@@ -1295,8 +1285,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
                tasklet_init(&serial->unthrottle_tasklet,
                             (void (*)(unsigned long))hso_unthrottle_tasklet,
                             (unsigned long)serial);
-               INIT_WORK(&serial->retry_unthrottle_workqueue,
-                         hso_unthrottle_workfunc);
                result = hso_start_serial_device(serial->parent, GFP_KERNEL);
                if (result) {
                        hso_stop_serial_device(serial->parent);
@@ -1345,7 +1333,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
                if (!usb_gone)
                        hso_stop_serial_device(serial->parent);
                tasklet_kill(&serial->unthrottle_tasklet);
-               cancel_work_sync(&serial->retry_unthrottle_workqueue);
        }
 
        if (!usb_gone)
@@ -2013,8 +2000,7 @@ static void ctrl_callback(struct urb *urb)
 static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
 {
        struct tty_struct *tty;
-       int write_length_remaining = 0;
-       int curr_write_len;
+       int count;
 
        /* Sanity check */
        if (urb == NULL || serial == NULL) {
@@ -2024,29 +2010,28 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
 
        tty = tty_port_tty_get(&serial->port);
 
+       if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
+               tty_kref_put(tty);
+               return -1;
+       }
+
        /* Push data to tty */
-       write_length_remaining = urb->actual_length -
-               serial->curr_rx_urb_offset;
        D1("data to push to tty");
-       while (write_length_remaining) {
-               if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
-                       tty_kref_put(tty);
-                       return -1;
-               }
-               curr_write_len = tty_insert_flip_string(&serial->port,
-                       urb->transfer_buffer + serial->curr_rx_urb_offset,
-                       write_length_remaining);
-               serial->curr_rx_urb_offset += curr_write_len;
-               write_length_remaining -= curr_write_len;
+       count = tty_buffer_request_room(&serial->port, urb->actual_length);
+       if (count >= urb->actual_length) {
+               tty_insert_flip_string(&serial->port, urb->transfer_buffer,
+                                      urb->actual_length);
                tty_flip_buffer_push(&serial->port);
+       } else {
+               dev_warn(&serial->parent->usb->dev,
+                        "dropping data, %d bytes lost\n", urb->actual_length);
        }
+
        tty_kref_put(tty);
 
-       if (write_length_remaining == 0) {
-               serial->curr_rx_urb_offset = 0;
-               serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
-       }
-       return write_length_remaining;
+       serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
+
+       return 0;
 }
 
 
@@ -2217,7 +2202,6 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
                }
        }
        serial->curr_rx_urb_idx = 0;
-       serial->curr_rx_urb_offset = 0;
 
        if (serial->tx_urb)
                usb_kill_urb(serial->tx_urb);
index f9822bc..735f7da 100644 (file)
@@ -84,12 +84,13 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
        ctx = drvstate->ctx;
 
        if (usbnet_dev->status)
-               /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256
-                * decimal (0x100)"
+               /* The wMaxCommand buffer must be big enough to hold
+                * any message from the modem. Experience has shown
+                * that some replies are more than 256 bytes long
                 */
                subdriver = usb_cdc_wdm_register(ctx->control,
                                                 &usbnet_dev->status->desc,
-                                                256, /* wMaxCommand */
+                                                1024, /* wMaxCommand */
                                                 huawei_cdc_ncm_wdm_manage_power);
        if (IS_ERR(subdriver)) {
                ret = PTR_ERR(subdriver);
@@ -193,6 +194,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
          .driver_info = (unsigned long)&huawei_cdc_ncm_info,
        },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
+         .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+       },
 
        /* Terminating entry */
        {
index cf62d7e..22756db 100644 (file)
@@ -667,6 +667,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+       {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
        {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
        {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},    /* Huawei E1820 */
        {QMI_FIXED_INTF(0x16d8, 0x6003, 0)},    /* CMOTech 6003 */
@@ -741,6 +742,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
        {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
        {QMI_FIXED_INTF(0x19d2, 0x1426, 2)},    /* ZTE MF91 */
+       {QMI_FIXED_INTF(0x19d2, 0x1428, 2)},    /* Telewell TW-LTE 4G v2 */
        {QMI_FIXED_INTF(0x19d2, 0x2002, 4)},    /* ZTE (Vodafone) K3765-Z */
        {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
@@ -756,6 +758,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x9054, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1199, 0x9055, 8)},    /* Netgear AirCard 341U */
        {QMI_FIXED_INTF(0x1199, 0x9056, 8)},    /* Sierra Wireless Modem */
+       {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
        {QMI_FIXED_INTF(0x1199, 0x9061, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
index 2543196..3eab74c 100644 (file)
 /* USB_DEV_STAT */
 #define STAT_SPEED_MASK                0x0006
 #define STAT_SPEED_HIGH                0x0000
-#define STAT_SPEED_FULL                0x0001
+#define STAT_SPEED_FULL                0x0002
 
 /* USB_TX_AGG */
 #define TX_AGG_MAX_THRESHOLD   0x03
@@ -1359,7 +1359,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
                struct sk_buff_head seg_list;
                struct sk_buff *segs, *nskb;
 
-               features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+               features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
                segs = skb_gso_segment(skb, features);
                if (IS_ERR(segs) || !segs)
                        goto drop;
@@ -2292,9 +2292,8 @@ static void r8152b_exit_oob(struct r8152 *tp)
        /* rx share fifo credit full threshold */
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
 
-       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT);
-       ocp_data &= STAT_SPEED_MASK;
-       if (ocp_data == STAT_SPEED_FULL) {
+       if (tp->udev->speed == USB_SPEED_FULL ||
+           tp->udev->speed == USB_SPEED_LOW) {
                /* rx share fifo credit near full threshold */
                ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
                                RXFIFO_THR2_FULL);
@@ -3204,8 +3203,13 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev,
        struct r8152 *tp = netdev_priv(dev);
        struct tally_counter tally;
 
+       if (usb_autopm_get_interface(tp->intf) < 0)
+               return;
+
        generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
 
+       usb_autopm_put_interface(tp->intf);
+
        data[0] = le64_to_cpu(tally.tx_packets);
        data[1] = le64_to_cpu(tally.rx_packets);
        data[2] = le64_to_cpu(tally.tx_errors);
index 424db65..d07bf4c 100644 (file)
@@ -1714,6 +1714,18 @@ static int smsc95xx_resume(struct usb_interface *intf)
        return ret;
 }
 
+static int smsc95xx_reset_resume(struct usb_interface *intf)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       int ret;
+
+       ret = smsc95xx_reset(dev);
+       if (ret < 0)
+               return ret;
+
+       return smsc95xx_resume(intf);
+}
+
 static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
 {
        skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -2004,7 +2016,7 @@ static struct usb_driver smsc95xx_driver = {
        .probe          = usbnet_probe,
        .suspend        = smsc95xx_suspend,
        .resume         = smsc95xx_resume,
-       .reset_resume   = smsc95xx_resume,
+       .reset_resume   = smsc95xx_reset_resume,
        .disconnect     = usbnet_disconnect,
        .disable_hub_initiated_lpm = 1,
        .supports_autosuspend = 1,
index 9739434..b76f7dc 100644 (file)
@@ -2589,8 +2589,8 @@ vmxnet3_open(struct net_device *netdev)
        for (i = 0; i < adapter->num_tx_queues; i++)
                spin_lock_init(&adapter->tx_queue[i].tx_lock);
 
-       err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
-                                   VMXNET3_DEF_RX_RING_SIZE,
+       err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
+                                   adapter->rx_ring_size,
                                    VMXNET3_DEF_RX_RING_SIZE);
        if (err)
                goto queue_err;
@@ -2968,6 +2968,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
        adapter->netdev = netdev;
        adapter->pdev = pdev;
 
+       adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
+       adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+
        spin_lock_init(&adapter->cmd_lock);
        adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
                                             sizeof(struct vmxnet3_adapter),
index 40c1c7b..b725fd9 100644 (file)
@@ -449,8 +449,8 @@ vmxnet3_get_ringparam(struct net_device *netdev,
        param->rx_mini_max_pending = 0;
        param->rx_jumbo_max_pending = 0;
 
-       param->rx_pending = adapter->rx_queue[0].rx_ring[0].size;
-       param->tx_pending = adapter->tx_queue[0].tx_ring.size;
+       param->rx_pending = adapter->rx_ring_size;
+       param->tx_pending = adapter->tx_ring_size;
        param->rx_mini_pending = 0;
        param->rx_jumbo_pending = 0;
 }
@@ -529,9 +529,11 @@ vmxnet3_set_ringparam(struct net_device *netdev,
                         * size */
                        netdev_err(netdev, "failed to apply new sizes, "
                                   "try the default ones\n");
+                       new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+                       new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
                        err = vmxnet3_create_queues(adapter,
-                                                   VMXNET3_DEF_TX_RING_SIZE,
-                                                   VMXNET3_DEF_RX_RING_SIZE,
+                                                   new_tx_ring_size,
+                                                   new_rx_ring_size,
                                                    VMXNET3_DEF_RX_RING_SIZE);
                        if (err) {
                                netdev_err(netdev, "failed to create queues "
@@ -545,6 +547,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
                        netdev_err(netdev, "failed to re-activate, error %d."
                                   " Closing it\n", err);
        }
+       adapter->tx_ring_size = new_tx_ring_size;
+       adapter->rx_ring_size = new_rx_ring_size;
 
 out:
        clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
index 190569d..29ee77f 100644 (file)
@@ -349,6 +349,11 @@ struct vmxnet3_adapter {
        u32     link_speed; /* in mbps */
 
        u64     tx_timeout_count;
+
+       /* Ring sizes */
+       u32 tx_ring_size;
+       u32 rx_ring_size;
+
        struct work_struct work;
 
        unsigned long  state;    /* VMXNET3_STATE_BIT_xxx */
index ade33ef..9f79192 100644 (file)
@@ -339,7 +339,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
        ndm->ndm_state = fdb->state;
        ndm->ndm_ifindex = vxlan->dev->ifindex;
        ndm->ndm_flags = fdb->flags;
-       ndm->ndm_type = NDA_DST;
+       ndm->ndm_type = RTN_UNICAST;
 
        if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
                goto nla_put_failure;
index 93ace04..1f04127 100644 (file)
@@ -2363,7 +2363,7 @@ static char *type_strings[] = {
        "FarSync TE1"
 };
 
-static void
+static int
 fst_init_card(struct fst_card_info *card)
 {
        int i;
@@ -2374,24 +2374,21 @@ fst_init_card(struct fst_card_info *card)
         * we'll have to revise it in some way then.
         */
        for (i = 0; i < card->nports; i++) {
-                err = register_hdlc_device(card->ports[i].dev);
-                if (err < 0) {
-                       int j;
+               err = register_hdlc_device(card->ports[i].dev);
+               if (err < 0) {
                        pr_err("Cannot register HDLC device for port %d (errno %d)\n",
-                              i, -err);
-                       for (j = i; j < card->nports; j++) {
-                               free_netdev(card->ports[j].dev);
-                               card->ports[j].dev = NULL;
-                       }
-                        card->nports = i;
-                        break;
-                }
+                               i, -err);
+                       while (i--)
+                               unregister_hdlc_device(card->ports[i].dev);
+                       return err;
+               }
        }
 
        pr_info("%s-%s: %s IRQ%d, %d ports\n",
                port_to_dev(&card->ports[0])->name,
                port_to_dev(&card->ports[card->nports - 1])->name,
                type_strings[card->type], card->irq, card->nports);
+       return 0;
 }
 
 static const struct net_device_ops fst_ops = {
@@ -2447,15 +2444,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Try to enable the device */
        if ((err = pci_enable_device(pdev)) != 0) {
                pr_err("Failed to enable card. Err %d\n", -err);
-               kfree(card);
-               return err;
+               goto enable_fail;
        }
 
        if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
                pr_err("Failed to allocate regions. Err %d\n", -err);
-               pci_disable_device(pdev);
-               kfree(card);
-               return err;
+               goto regions_fail;
        }
 
        /* Get virtual addresses of memory regions */
@@ -2464,30 +2458,21 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        card->phys_ctlmem = pci_resource_start(pdev, 3);
        if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
                pr_err("Physical memory remap failed\n");
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-               kfree(card);
-               return -ENODEV;
+               err = -ENODEV;
+               goto ioremap_physmem_fail;
        }
        if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
                pr_err("Control memory remap failed\n");
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-               iounmap(card->mem);
-               kfree(card);
-               return -ENODEV;
+               err = -ENODEV;
+               goto ioremap_ctlmem_fail;
        }
        dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
 
        /* Register the interrupt handler */
        if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
                pr_err("Unable to register interrupt %d\n", card->irq);
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-               iounmap(card->ctlmem);
-               iounmap(card->mem);
-               kfree(card);
-               return -ENODEV;
+               err = -ENODEV;
+               goto irq_fail;
        }
 
        /* Record info we need */
@@ -2513,13 +2498,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        while (i--)
                                free_netdev(card->ports[i].dev);
                        pr_err("FarSync: out of memory\n");
-                        free_irq(card->irq, card);
-                        pci_release_regions(pdev);
-                        pci_disable_device(pdev);
-                        iounmap(card->ctlmem);
-                        iounmap(card->mem);
-                        kfree(card);
-                        return -ENODEV;
+                       err = -ENOMEM;
+                       goto hdlcdev_fail;
                }
                card->ports[i].dev    = dev;
                 card->ports[i].card   = card;
@@ -2565,9 +2545,16 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_drvdata(pdev, card);
 
        /* Remainder of card setup */
+       if (no_of_cards_added >= FST_MAX_CARDS) {
+               pr_err("FarSync: too many cards\n");
+               err = -ENOMEM;
+               goto card_array_fail;
+       }
        fst_card_array[no_of_cards_added] = card;
        card->card_no = no_of_cards_added++;    /* Record instance and bump it */
-       fst_init_card(card);
+       err = fst_init_card(card);
+       if (err)
+               goto init_card_fail;
        if (card->family == FST_FAMILY_TXU) {
                /*
                 * Allocate a dma buffer for transmit and receives
@@ -2577,29 +2564,46 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                                         &card->rx_dma_handle_card);
                if (card->rx_dma_handle_host == NULL) {
                        pr_err("Could not allocate rx dma buffer\n");
-                       fst_disable_intr(card);
-                       pci_release_regions(pdev);
-                       pci_disable_device(pdev);
-                       iounmap(card->ctlmem);
-                       iounmap(card->mem);
-                       kfree(card);
-                       return -ENOMEM;
+                       err = -ENOMEM;
+                       goto rx_dma_fail;
                }
                card->tx_dma_handle_host =
                    pci_alloc_consistent(card->device, FST_MAX_MTU,
                                         &card->tx_dma_handle_card);
                if (card->tx_dma_handle_host == NULL) {
                        pr_err("Could not allocate tx dma buffer\n");
-                       fst_disable_intr(card);
-                       pci_release_regions(pdev);
-                       pci_disable_device(pdev);
-                       iounmap(card->ctlmem);
-                       iounmap(card->mem);
-                       kfree(card);
-                       return -ENOMEM;
+                       err = -ENOMEM;
+                       goto tx_dma_fail;
                }
        }
        return 0;               /* Success */
+
+tx_dma_fail:
+       pci_free_consistent(card->device, FST_MAX_MTU,
+                           card->rx_dma_handle_host,
+                           card->rx_dma_handle_card);
+rx_dma_fail:
+       fst_disable_intr(card);
+       for (i = 0 ; i < card->nports ; i++)
+               unregister_hdlc_device(card->ports[i].dev);
+init_card_fail:
+       fst_card_array[card->card_no] = NULL;
+card_array_fail:
+       for (i = 0 ; i < card->nports ; i++)
+               free_netdev(card->ports[i].dev);
+hdlcdev_fail:
+       free_irq(card->irq, card);
+irq_fail:
+       iounmap(card->ctlmem);
+ioremap_ctlmem_fail:
+       iounmap(card->mem);
+ioremap_physmem_fail:
+       pci_release_regions(pdev);
+regions_fail:
+       pci_disable_device(pdev);
+enable_fail:
+       kfree(card);
+       return err;
 }
 
 /*
index 5895f19..fa9fdfa 100644 (file)
@@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
 {
        struct x25_asy *sl = netdev_priv(dev);
        unsigned char *xbuff, *rbuff;
-       int len = 2 * newmtu;
+       int len;
 
+       if (newmtu > 65534)
+               return -EINVAL;
+
+       len = 2 * newmtu;
        xbuff = kmalloc(len + 4, GFP_ATOMIC);
        rbuff = kmalloc(len + 4, GFP_ATOMIC);
 
index 82017f5..e6c56c5 100644 (file)
@@ -795,7 +795,11 @@ int ath10k_core_start(struct ath10k *ar)
        if (status)
                goto err_htc_stop;
 
-       ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+       if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
+               ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1;
+       else
+               ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
+
        INIT_LIST_HEAD(&ar->arvifs);
 
        if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
index 6c102b1..eebc860 100644 (file)
@@ -312,7 +312,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
        int msdu_len, msdu_chaining = 0;
        struct sk_buff *msdu;
        struct htt_rx_desc *rx_desc;
-       bool corrupted = false;
 
        lockdep_assert_held(&htt->rx_ring.lock);
 
@@ -439,9 +438,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
                last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
                                RX_MSDU_END_INFO0_LAST_MSDU;
 
-               if (msdu_chaining && !last_msdu)
-                       corrupted = true;
-
                if (last_msdu) {
                        msdu->next = NULL;
                        break;
@@ -456,20 +452,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
        if (*head_msdu == NULL)
                msdu_chaining = -1;
 
-       /*
-        * Apparently FW sometimes reports weird chained MSDU sequences with
-        * more than one rx descriptor. This seems like a bug but needs more
-        * analyzing. For the time being fix it by dropping such sequences to
-        * avoid blowing up the host system.
-        */
-       if (corrupted) {
-               ath10k_warn("failed to pop chained msdus, dropping\n");
-               ath10k_htt_rx_free_msdu_chain(*head_msdu);
-               *head_msdu = NULL;
-               *tail_msdu = NULL;
-               msdu_chaining = -EINVAL;
-       }
-
        /*
         * Don't refill the ring yet.
         *
index 66acb2c..7c28cb5 100644 (file)
@@ -887,6 +887,15 @@ ath_tx_get_tid_subframe(struct ath_softc *sc, struct ath_txq *txq,
 
                tx_info = IEEE80211_SKB_CB(skb);
                tx_info->flags &= ~IEEE80211_TX_CTL_CLEAR_PS_FILT;
+
+               /*
+                * No aggregation session is running, but there may be frames
+                * from a previous session or a failed attempt in the queue.
+                * Send them out as normal data frames
+                */
+               if (!tid->active)
+                       tx_info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
                if (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
                        bf->bf_state.bf_type = 0;
                        return bf;
index e3f67b8..40fd9b7 100644 (file)
@@ -36,7 +36,7 @@ config B43_SSB
 choice
        prompt "Supported bus types"
        depends on B43
-       default B43_BCMA_AND_SSB
+       default B43_BUSES_BCMA_AND_SSB
 
 config B43_BUSES_BCMA_AND_SSB
        bool "BCMA and SSB"
index 32538ac..0d6a0bb 100644 (file)
@@ -5221,6 +5221,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
        /* We don't support 5 GHz on some PHYs yet */
        switch (dev->phy.type) {
        case B43_PHYTYPE_A:
+       case B43_PHYTYPE_G:
        case B43_PHYTYPE_N:
        case B43_PHYTYPE_LP:
        case B43_PHYTYPE_HT:
index 4f38f19..6e6ef3f 100644 (file)
@@ -811,9 +811,13 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
                break;
        case B43_PHYTYPE_G:
                status.band = IEEE80211_BAND_2GHZ;
-               /* chanid is the radio channel cookie value as used
-                * to tune the radio. */
-               status.freq = chanid + 2400;
+               /* Somewhere between 478.104 and 508.1084 firmware for G-PHY
+                * has been modified to be compatible with N-PHY and others.
+                */
+               if (dev->fw.rev >= 508)
+                       status.freq = ieee80211_channel_to_frequency(chanid, status.band);
+               else
+                       status.freq = chanid + 2400;
                break;
        case B43_PHYTYPE_N:
        case B43_PHYTYPE_LP:
index 6db51a6..d06fcb0 100644 (file)
@@ -1184,8 +1184,6 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
        bus->bus_priv.usb = bus_pub;
        dev_set_drvdata(dev, bus);
        bus->ops = &brcmf_usb_bus_ops;
-       bus->chip = bus_pub->devid;
-       bus->chiprev = bus_pub->chiprev;
        bus->proto_type = BRCMF_PROTO_BCDC;
        bus->always_use_fws_queue = true;
 
@@ -1194,6 +1192,9 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
                if (ret)
                        goto fail;
        }
+       bus->chip = bus_pub->devid;
+       bus->chiprev = bus_pub->chiprev;
+
        /* request firmware here */
        brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
                               brcmf_usb_probe_phase2);
index ed50de6..6dc5dd3 100644 (file)
@@ -1068,13 +1068,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
        /* recalculate basic rates */
        iwl_calc_basic_rates(priv, ctx);
 
-       /*
-        * force CTS-to-self frames protection if RTS-CTS is not preferred
-        * one aggregation protection method
-        */
-       if (!priv->hw_params.use_rts_for_aggregation)
-               ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
-
        if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
            !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
                ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -1480,11 +1473,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
        else
                ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
 
-       if (bss_conf->use_cts_prot)
-               ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
-       else
-               ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
-
        memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
 
        if (vif->type == NL80211_IFTYPE_AP ||
index 0aa7c00..b1a3332 100644 (file)
@@ -88,6 +88,7 @@
  *     P2P client interfaces simultaneously if they are in different bindings.
  * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
  *     P2P client interfaces simultaneously if they are in same bindings.
+ * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
  * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
  * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
  * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
index 8b53027..8b79081 100644 (file)
@@ -667,10 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
        if (vif->bss_conf.qos)
                cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
 
-       if (vif->bss_conf.use_cts_prot) {
+       if (vif->bss_conf.use_cts_prot)
                cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
-               cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
-       }
+
        IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
                       vif->bss_conf.use_cts_prot,
                       vif->bss_conf.ht_operation_mode);
@@ -1073,8 +1072,12 @@ static int iwl_mvm_mac_ctxt_cmd_ap(struct iwl_mvm *mvm,
        /* Fill the common data for all mac context types */
        iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
 
-       /* Also enable probe requests to pass */
-       cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
+       /*
+        * pass probe requests and beacons from other APs (needed
+        * for ht protection)
+        */
+       cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
+                                       MAC_FILTER_IN_BEACON);
 
        /* Fill the data specific for ap mode */
        iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.ap,
@@ -1095,6 +1098,13 @@ static int iwl_mvm_mac_ctxt_cmd_go(struct iwl_mvm *mvm,
        /* Fill the common data for all mac context types */
        iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
 
+       /*
+        * pass probe requests and beacons from other APs (needed
+        * for ht protection)
+        */
+       cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST |
+                                       MAC_FILTER_IN_BEACON);
+
        /* Fill the data specific for GO mode */
        iwl_mvm_mac_ctxt_cmd_fill_ap(mvm, vif, &cmd.go.ap,
                                     action == FW_CTXT_ACTION_ADD);
index 7215f59..98556d0 100644 (file)
@@ -1159,8 +1159,12 @@ static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
 
        bcast_mac = &cmd->macs[mvmvif->id];
 
-       /* enable filtering only for associated stations */
-       if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+       /*
+        * enable filtering only for associated stations, but not for P2P
+        * Clients
+        */
+       if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
+           !vif->bss_conf.assoc)
                return;
 
        bcast_mac->default_discard = 1;
@@ -1237,10 +1241,6 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
        if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
                return 0;
 
-       /* bcast filtering isn't supported for P2P client */
-       if (vif->p2p)
-               return 0;
-
        if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
                return 0;
 
index 4b6c7d4..eac2b42 100644 (file)
@@ -588,9 +588,7 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
                               struct iwl_scan_offload_cmd *scan,
                               struct iwl_mvm_scan_params *params)
 {
-       scan->channel_count =
-               mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
-               mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
+       scan->channel_count = req->n_channels;
        scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
        scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
        scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
@@ -669,61 +667,37 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
                                  struct cfg80211_sched_scan_request *req,
                                  struct iwl_scan_channel_cfg *channels,
                                  enum ieee80211_band band,
-                                 int *head, int *tail,
+                                 int *head,
                                  u32 ssid_bitmap,
                                  struct iwl_mvm_scan_params *params)
 {
-       struct ieee80211_supported_band *s_band;
-       int n_channels = req->n_channels;
-       int i, j, index = 0;
-       bool partial;
+       int i, index = 0;
 
-       /*
-        * We have to configure all supported channels, even if we don't want to
-        * scan on them, but we have to send channels in the order that we want
-        * to scan. So add requested channels to head of the list and others to
-        * the end.
-       */
-       s_band = &mvm->nvm_data->bands[band];
-
-       for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
-               partial = false;
-               for (j = 0; j < n_channels; j++)
-                       if (s_band->channels[i].center_freq ==
-                                               req->channels[j]->center_freq) {
-                               index = *head;
-                               (*head)++;
-                               /*
-                                * Channels that came with the request will be
-                                * in partial scan .
-                                */
-                               partial = true;
-                               break;
-                       }
-               if (!partial) {
-                       index = *tail;
-                       (*tail)--;
-               }
-               channels->channel_number[index] =
-                       cpu_to_le16(ieee80211_frequency_to_channel(
-                                       s_band->channels[i].center_freq));
+       for (i = 0; i < req->n_channels; i++) {
+               struct ieee80211_channel *chan = req->channels[i];
+
+               if (chan->band != band)
+                       continue;
+
+               index = *head;
+               (*head)++;
+
+               channels->channel_number[index] = cpu_to_le16(chan->hw_value);
                channels->dwell_time[index][0] = params->dwell[band].active;
                channels->dwell_time[index][1] = params->dwell[band].passive;
 
                channels->iter_count[index] = cpu_to_le16(1);
                channels->iter_interval[index] = 0;
 
-               if (!(s_band->channels[i].flags & IEEE80211_CHAN_NO_IR))
+               if (!(chan->flags & IEEE80211_CHAN_NO_IR))
                        channels->type[index] |=
                                cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
 
                channels->type[index] |=
-                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL);
-               if (partial)
-                       channels->type[index] |=
-                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
+                               cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
+                                           IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
 
-               if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40)
+               if (chan->flags & IEEE80211_CHAN_NO_HT40)
                        channels->type[index] |=
                                cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
 
@@ -740,7 +714,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
        int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
        int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
        int head = 0;
-       int tail = band_2ghz + band_5ghz - 1;
        u32 ssid_bitmap;
        int cmd_len;
        int ret;
@@ -772,7 +745,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
                                              &scan_cfg->scan_cmd.tx_cmd[0],
                                              scan_cfg->data);
                iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
-                                     IEEE80211_BAND_2GHZ, &head, &tail,
+                                     IEEE80211_BAND_2GHZ, &head,
                                      ssid_bitmap, &params);
        }
        if (band_5ghz) {
@@ -782,7 +755,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
                                              scan_cfg->data +
                                                SCAN_OFFLOAD_PROBE_REQ_SIZE);
                iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
-                                     IEEE80211_BAND_5GHZ, &head, &tail,
+                                     IEEE80211_BAND_5GHZ, &head,
                                      ssid_bitmap, &params);
        }
 
index 7091a18..98950e4 100644 (file)
@@ -367,6 +367,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
@@ -380,7 +381,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
        {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
-       {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)},
+       {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
        {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
index 5b32106..fe0f66f 100644 (file)
@@ -185,6 +185,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
        skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
        tx_info_aggr =  MWIFIEX_SKB_TXCB(skb_aggr);
 
+       memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
        tx_info_aggr->bss_type = tx_info_src->bss_type;
        tx_info_aggr->bss_num = tx_info_src->bss_num;
 
index e95dec9..b511613 100644 (file)
@@ -220,6 +220,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
        }
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
+       memset(tx_info, 0, sizeof(*tx_info));
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
        tx_info->pkt_len = pkt_len;
index 8dee6c8..c161141 100644 (file)
@@ -453,6 +453,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
 
        if (skb) {
                rx_info = MWIFIEX_SKB_RXCB(skb);
+               memset(rx_info, 0, sizeof(*rx_info));
                rx_info->bss_num = priv->bss_num;
                rx_info->bss_type = priv->bss_type;
        }
index cbabc12..e91cd0f 100644 (file)
@@ -645,6 +645,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
+       memset(tx_info, 0, sizeof(*tx_info));
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
        tx_info->pkt_len = skb->len;
index 574d4b5..2cc9b6f 100644 (file)
@@ -50,7 +50,7 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
                return -1;
        }
        mapping.len = size;
-       memcpy(skb->cb, &mapping, sizeof(mapping));
+       mwifiex_store_mapping(skb, &mapping);
        return 0;
 }
 
@@ -60,7 +60,7 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
        struct pcie_service_card *card = adapter->card;
        struct mwifiex_dma_mapping mapping;
 
-       MWIFIEX_SKB_PACB(skb, &mapping);
+       mwifiex_get_mapping(skb, &mapping);
        pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
 }
 
index 5fce7e7..70eb863 100644 (file)
@@ -150,6 +150,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
                return -1;
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
+       memset(tx_info, 0, sizeof(*tx_info));
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
        tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN);
index e73034f..0e88364 100644 (file)
@@ -605,6 +605,7 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
        }
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
+       memset(tx_info, 0, sizeof(*tx_info));
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
 
@@ -760,6 +761,7 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
        skb->priority = MWIFIEX_PRIO_VI;
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
+       memset(tx_info, 0, sizeof(*tx_info));
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
        tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
index 37f26af..fd7e5b9 100644 (file)
@@ -55,6 +55,7 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
                return -1;
        }
 
+       memset(rx_info, 0, sizeof(*rx_info));
        rx_info->bss_num = priv->bss_num;
        rx_info->bss_type = priv->bss_type;
 
index 9a56bc6..b0601b9 100644 (file)
@@ -175,6 +175,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
        }
 
        tx_info = MWIFIEX_SKB_TXCB(skb);
+       memset(tx_info, 0, sizeof(*tx_info));
        tx_info->bss_num = priv->bss_num;
        tx_info->bss_type = priv->bss_type;
        tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
index ddae570..caadb37 100644 (file)
 #ifndef _MWIFIEX_UTIL_H_
 #define _MWIFIEX_UTIL_H_
 
+struct mwifiex_dma_mapping {
+       dma_addr_t addr;
+       size_t len;
+};
+
+struct mwifiex_cb {
+       struct mwifiex_dma_mapping dma_mapping;
+       union {
+               struct mwifiex_rxinfo rx_info;
+               struct mwifiex_txinfo tx_info;
+       };
+};
+
 static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb)
 {
-       return (struct mwifiex_rxinfo *)(skb->cb + sizeof(dma_addr_t));
+       struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
+
+       BUILD_BUG_ON(sizeof(struct mwifiex_cb) > sizeof(skb->cb));
+       return &cb->rx_info;
 }
 
 static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
 {
-       return (struct mwifiex_txinfo *)(skb->cb + sizeof(dma_addr_t));
+       struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
+
+       return &cb->tx_info;
 }
 
-struct mwifiex_dma_mapping {
-       dma_addr_t addr;
-       size_t len;
-};
+static inline void mwifiex_store_mapping(struct sk_buff *skb,
+                                        struct mwifiex_dma_mapping *mapping)
+{
+       struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
+
+       memcpy(&cb->dma_mapping, mapping, sizeof(*mapping));
+}
 
-static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb,
-                                       struct mwifiex_dma_mapping *mapping)
+static inline void mwifiex_get_mapping(struct sk_buff *skb,
+                                      struct mwifiex_dma_mapping *mapping)
 {
-       memcpy(mapping, skb->cb, sizeof(*mapping));
+       struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
+
+       memcpy(mapping, &cb->dma_mapping, sizeof(*mapping));
 }
 
 static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
 {
        struct mwifiex_dma_mapping mapping;
 
-       MWIFIEX_SKB_PACB(skb, &mapping);
+       mwifiex_get_mapping(skb, &mapping);
 
        return mapping.addr;
 }
index 2f1cd92..a511ccc 100644 (file)
@@ -1681,8 +1681,13 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
        /*
         * Detect if this device has an hardware controlled radio.
         */
-       if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO))
+       if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) {
                __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
+               /*
+                * On this device RFKILL initialized during probe does not work.
+                */
+               __set_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags);
+       }
 
        /*
         * Check if the BBP tuning should be enabled.
index a49c3d7..832006b 100644 (file)
@@ -229,6 +229,31 @@ static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer)
 /*
  * Firmware functions
  */
+static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
+{
+       __le32 *reg;
+       u32 fw_mode;
+
+       reg = kmalloc(sizeof(*reg), GFP_KERNEL);
+       if (reg == NULL)
+               return -ENOMEM;
+       /* cannot use rt2x00usb_register_read here as it uses different
+        * mode (MULTI_READ vs. DEVICE_MODE) and does not pass the
+        * magic value USB_MODE_AUTORUN (0x11) to the device, thus the
+        * returned value would be invalid.
+        */
+       rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
+                                USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN,
+                                reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE);
+       fw_mode = le32_to_cpu(*reg);
+       kfree(reg);
+
+       if ((fw_mode & 0x00000003) == 2)
+               return 1;
+
+       return 0;
+}
+
 static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
 {
        return FIRMWARE_RT2870;
@@ -240,6 +265,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
        int status;
        u32 offset;
        u32 length;
+       int retval;
 
        /*
         * Check which section of the firmware we need.
@@ -257,8 +283,16 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
        /*
         * Write firmware to device.
         */
-       rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
-                                     data + offset, length);
+       retval = rt2800usb_autorun_detect(rt2x00dev);
+       if (retval < 0)
+               return retval;
+       if (retval) {
+               rt2x00_info(rt2x00dev,
+                           "Firmware loading not required - NIC in AutoRun mode\n");
+       } else {
+               rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
+                                             data + offset, length);
+       }
 
        rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
        rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
@@ -735,11 +769,26 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
 /*
  * Device probe functions.
  */
+static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev)
+{
+       int retval;
+
+       retval = rt2800usb_autorun_detect(rt2x00dev);
+       if (retval < 0)
+               return retval;
+       if (retval)
+               return 1;
+       return rt2800_efuse_detect(rt2x00dev);
+}
+
 static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
 {
        int retval;
 
-       if (rt2800_efuse_detect(rt2x00dev))
+       retval = rt2800usb_efuse_detect(rt2x00dev);
+       if (retval < 0)
+               return retval;
+       if (retval)
                retval = rt2800_read_eeprom_efuse(rt2x00dev);
        else
                retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
index 010b765..d13f25c 100644 (file)
@@ -693,6 +693,7 @@ enum rt2x00_capability_flags {
        REQUIRE_SW_SEQNO,
        REQUIRE_HT_TX_DESC,
        REQUIRE_PS_AUTOWAKE,
+       REQUIRE_DELAYED_RFKILL,
 
        /*
         * Capabilities
index 2bde672..4fa43a2 100644 (file)
@@ -1126,9 +1126,10 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
                return;
 
        /*
-        * Unregister extra components.
+        * Stop rfkill polling.
         */
-       rt2x00rfkill_unregister(rt2x00dev);
+       if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
+               rt2x00rfkill_unregister(rt2x00dev);
 
        /*
         * Allow the HW to uninitialize.
@@ -1166,6 +1167,12 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
 
        set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
 
+       /*
+        * Start rfkill polling.
+        */
+       if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
+               rt2x00rfkill_register(rt2x00dev);
+
        return 0;
 }
 
@@ -1375,7 +1382,12 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
        rt2x00link_register(rt2x00dev);
        rt2x00leds_register(rt2x00dev);
        rt2x00debug_register(rt2x00dev);
-       rt2x00rfkill_register(rt2x00dev);
+
+       /*
+        * Start rfkill polling.
+        */
+       if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
+               rt2x00rfkill_register(rt2x00dev);
 
        return 0;
 
@@ -1390,6 +1402,12 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
 {
        clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
 
+       /*
+        * Stop rfkill polling.
+        */
+       if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
+               rt2x00rfkill_unregister(rt2x00dev);
+
        /*
         * Disable radio.
         */
index 212ac48..004dff9 100644 (file)
@@ -487,6 +487,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
        crypto.cipher = rt2x00crypto_key_to_cipher(key);
        if (crypto.cipher == CIPHER_NONE)
                return -EOPNOTSUPP;
+       if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev))
+               return -EOPNOTSUPP;
 
        crypto.cmd = cmd;
 
index e7bcf62..831b65f 100644 (file)
@@ -93,6 +93,7 @@ enum rt2x00usb_mode_offset {
        USB_MODE_SLEEP = 7,     /* RT73USB */
        USB_MODE_FIRMWARE = 8,  /* RT73USB */
        USB_MODE_WAKEUP = 9,    /* RT73USB */
+       USB_MODE_AUTORUN = 17, /* RT2800USB */
 };
 
 /**
index 4dd7c4a..2532ce8 100644 (file)
@@ -222,6 +222,7 @@ struct xenvif {
 
        /* Queues */
        struct xenvif_queue *queues;
+       unsigned int num_queues; /* active queues, resource allocated */
 
        /* Miscellaneous private stuff. */
        struct net_device *dev;
index 852da34..9e97c7c 100644 (file)
@@ -137,32 +137,11 @@ static void xenvif_wake_queue_callback(unsigned long data)
        }
 }
 
-static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
-                              void *accel_priv, select_queue_fallback_t fallback)
-{
-       unsigned int num_queues = dev->real_num_tx_queues;
-       u32 hash;
-       u16 queue_index;
-
-       /* First, check if there is only one queue to optimise the
-        * single-queue or old frontend scenario.
-        */
-       if (num_queues == 1) {
-               queue_index = 0;
-       } else {
-               /* Use skb_get_hash to obtain an L4 hash if available */
-               hash = skb_get_hash(skb);
-               queue_index = hash % num_queues;
-       }
-
-       return queue_index;
-}
-
 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = dev->real_num_tx_queues;
+       unsigned int num_queues = vif->num_queues;
        u16 index;
        int min_slots_needed;
 
@@ -225,7 +204,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = dev->real_num_tx_queues;
+       unsigned int num_queues = vif->num_queues;
        unsigned long rx_bytes = 0;
        unsigned long rx_packets = 0;
        unsigned long tx_bytes = 0;
@@ -256,7 +235,7 @@ out:
 static void xenvif_up(struct xenvif *vif)
 {
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = vif->dev->real_num_tx_queues;
+       unsigned int num_queues = vif->num_queues;
        unsigned int queue_index;
 
        for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -272,7 +251,7 @@ static void xenvif_up(struct xenvif *vif)
 static void xenvif_down(struct xenvif *vif)
 {
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = vif->dev->real_num_tx_queues;
+       unsigned int num_queues = vif->num_queues;
        unsigned int queue_index;
 
        for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -379,7 +358,7 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
                                     struct ethtool_stats *stats, u64 * data)
 {
        struct xenvif *vif = netdev_priv(dev);
-       unsigned int num_queues = dev->real_num_tx_queues;
+       unsigned int num_queues = vif->num_queues;
        int i;
        unsigned int queue_index;
        struct xenvif_stats *vif_stats;
@@ -424,7 +403,6 @@ static const struct net_device_ops xenvif_netdev_ops = {
        .ndo_fix_features = xenvif_fix_features,
        .ndo_set_mac_address = eth_mac_addr,
        .ndo_validate_addr   = eth_validate_addr,
-       .ndo_select_queue = xenvif_select_queue,
 };
 
 struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -438,7 +416,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
        /* Allocate a netdev with the max. supported number of queues.
         * When the guest selects the desired number, it will be updated
-        * via netif_set_real_num_tx_queues().
+        * via netif_set_real_num_*_queues().
         */
        dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
                              xenvif_max_queues);
@@ -458,11 +436,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        vif->dev = dev;
        vif->disabled = false;
 
-       /* Start out with no queues. The call below does not require
-        * rtnl_lock() as it happens before register_netdev().
-        */
+       /* Start out with no queues. */
        vif->queues = NULL;
-       netif_set_real_num_tx_queues(dev, 0);
+       vif->num_queues = 0;
 
        dev->netdev_ops = &xenvif_netdev_ops;
        dev->hw_features = NETIF_F_SG |
@@ -677,7 +653,7 @@ static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
 void xenvif_disconnect(struct xenvif *vif)
 {
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = vif->dev->real_num_tx_queues;
+       unsigned int num_queues = vif->num_queues;
        unsigned int queue_index;
 
        if (netif_carrier_ok(vif->dev))
@@ -724,7 +700,7 @@ void xenvif_deinit_queue(struct xenvif_queue *queue)
 void xenvif_free(struct xenvif *vif)
 {
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = vif->dev->real_num_tx_queues;
+       unsigned int num_queues = vif->num_queues;
        unsigned int queue_index;
        /* Here we want to avoid timeout messages if an skb can be legitimately
         * stuck somewhere else. Realistically this could be an another vif's
@@ -748,12 +724,9 @@ void xenvif_free(struct xenvif *vif)
                xenvif_deinit_queue(queue);
        }
 
-       /* Free the array of queues. The call below does not require
-        * rtnl_lock() because it happens after unregister_netdev().
-        */
-       netif_set_real_num_tx_queues(vif->dev, 0);
        vfree(vif->queues);
        vif->queues = NULL;
+       vif->num_queues = 0;
 
        free_netdev(vif->dev);
 
index 1844a47..c65b636 100644 (file)
@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
 {
        struct gnttab_map_grant_ref *gop_map = *gopp_map;
        u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+       /* This always points to the shinfo of the skb being checked, which
+        * could be either the first or the one on the frag_list
+        */
        struct skb_shared_info *shinfo = skb_shinfo(skb);
+       /* If this is non-NULL, we are currently checking the frag_list skb, and
+        * this points to the shinfo of the first one
+        */
+       struct skb_shared_info *first_shinfo = NULL;
        int nr_frags = shinfo->nr_frags;
+       const bool sharedslot = nr_frags &&
+                               frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
        int i, err;
-       struct sk_buff *first_skb = NULL;
 
        /* Check status of header. */
        err = (*gopp_copy)->status;
-       (*gopp_copy)++;
        if (unlikely(err)) {
                if (net_ratelimit())
                        netdev_dbg(queue->vif->dev,
@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
                                   (*gopp_copy)->status,
                                   pending_idx,
                                   (*gopp_copy)->source.u.ref);
-               xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
+               /* The first frag might still have this slot mapped */
+               if (!sharedslot)
+                       xenvif_idx_release(queue, pending_idx,
+                                          XEN_NETIF_RSP_ERROR);
        }
+       (*gopp_copy)++;
 
 check_frags:
        for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -1062,8 +1073,19 @@ check_frags:
                                                pending_idx,
                                                gop_map->handle);
                        /* Had a previous error? Invalidate this fragment. */
-                       if (unlikely(err))
+                       if (unlikely(err)) {
                                xenvif_idx_unmap(queue, pending_idx);
+                               /* If the mapping of the first frag was OK, but
+                                * the header's copy failed, and they are
+                                * sharing a slot, send an error
+                                */
+                               if (i == 0 && sharedslot)
+                                       xenvif_idx_release(queue, pending_idx,
+                                                          XEN_NETIF_RSP_ERROR);
+                               else
+                                       xenvif_idx_release(queue, pending_idx,
+                                                          XEN_NETIF_RSP_OKAY);
+                       }
                        continue;
                }
 
@@ -1075,42 +1097,53 @@ check_frags:
                                   gop_map->status,
                                   pending_idx,
                                   gop_map->ref);
+
                xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
 
                /* Not the first error? Preceding frags already invalidated. */
                if (err)
                        continue;
-               /* First error: invalidate preceding fragments. */
+
+               /* First error: if the header haven't shared a slot with the
+                * first frag, release it as well.
+                */
+               if (!sharedslot)
+                       xenvif_idx_release(queue,
+                                          XENVIF_TX_CB(skb)->pending_idx,
+                                          XEN_NETIF_RSP_OKAY);
+
+               /* Invalidate preceding fragments of this skb. */
                for (j = 0; j < i; j++) {
                        pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
                        xenvif_idx_unmap(queue, pending_idx);
+                       xenvif_idx_release(queue, pending_idx,
+                                          XEN_NETIF_RSP_OKAY);
+               }
+
+               /* And if we found the error while checking the frag_list, unmap
+                * the first skb's frags
+                */
+               if (first_shinfo) {
+                       for (j = 0; j < first_shinfo->nr_frags; j++) {
+                               pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
+                               xenvif_idx_unmap(queue, pending_idx);
+                               xenvif_idx_release(queue, pending_idx,
+                                                  XEN_NETIF_RSP_OKAY);
+                       }
                }
 
                /* Remember the error: invalidate all subsequent fragments. */
                err = newerr;
        }
 
-       if (skb_has_frag_list(skb)) {
-               first_skb = skb;
-               skb = shinfo->frag_list;
-               shinfo = skb_shinfo(skb);
+       if (skb_has_frag_list(skb) && !first_shinfo) {
+               first_shinfo = skb_shinfo(skb);
+               shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
                nr_frags = shinfo->nr_frags;
 
                goto check_frags;
        }
 
-       /* There was a mapping error in the frag_list skb. We have to unmap
-        * the first skb's frags
-        */
-       if (first_skb && err) {
-               int j;
-               shinfo = skb_shinfo(first_skb);
-               for (j = 0; j < shinfo->nr_frags; j++) {
-                       pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
-                       xenvif_idx_unmap(queue, pending_idx);
-               }
-       }
-
        *gopp_map = gop_map;
        return err;
 }
@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
 
                /* Check the remap error code. */
                if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
+                       /* If there was an error, xenvif_tx_check_gop is
+                        * expected to release all the frags which were mapped,
+                        * so kfree_skb shouldn't do it again
+                        */
                        skb_shinfo(skb)->nr_frags = 0;
+                       if (skb_has_frag_list(skb)) {
+                               struct sk_buff *nskb =
+                                               skb_shinfo(skb)->frag_list;
+                               skb_shinfo(nskb)->nr_frags = 0;
+                       }
                        kfree_skb(skb);
                        continue;
                }
@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
                           tx_unmap_op.status);
                BUG();
        }
-
-       xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
 }
 
 static inline int rx_work_todo(struct xenvif_queue *queue)
index 96c63dc..3d85acd 100644 (file)
@@ -527,9 +527,7 @@ static void connect(struct backend_info *be)
        /* Use the number of queues requested by the frontend */
        be->vif->queues = vzalloc(requested_num_queues *
                                  sizeof(struct xenvif_queue));
-       rtnl_lock();
-       netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
-       rtnl_unlock();
+       be->vif->num_queues = requested_num_queues;
 
        for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
                queue = &be->vif->queues[queue_index];
@@ -546,9 +544,7 @@ static void connect(struct backend_info *be)
                         * earlier queues can be destroyed using the regular
                         * disconnect logic.
                         */
-                       rtnl_lock();
-                       netif_set_real_num_tx_queues(be->vif->dev, queue_index);
-                       rtnl_unlock();
+                       be->vif->num_queues = queue_index;
                        goto err;
                }
 
@@ -561,13 +557,19 @@ static void connect(struct backend_info *be)
                         * and also clean up any previously initialised queues.
                         */
                        xenvif_deinit_queue(queue);
-                       rtnl_lock();
-                       netif_set_real_num_tx_queues(be->vif->dev, queue_index);
-                       rtnl_unlock();
+                       be->vif->num_queues = queue_index;
                        goto err;
                }
        }
 
+       /* Initialisation completed, tell core driver the number of
+        * active queues.
+        */
+       rtnl_lock();
+       netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
+       netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
+       rtnl_unlock();
+
        xenvif_carrier_on(be->vif);
 
        unregister_hotplug_status_watch(be);
@@ -582,13 +584,11 @@ static void connect(struct backend_info *be)
        return;
 
 err:
-       if (be->vif->dev->real_num_tx_queues > 0)
+       if (be->vif->num_queues > 0)
                xenvif_disconnect(be->vif); /* Clean up existing queues */
        vfree(be->vif->queues);
        be->vif->queues = NULL;
-       rtnl_lock();
-       netif_set_real_num_tx_queues(be->vif->dev, 0);
-       rtnl_unlock();
+       be->vif->num_queues = 0;
        return;
 }
 
@@ -596,7 +596,7 @@ err:
 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
 {
        struct xenbus_device *dev = be->dev;
-       unsigned int num_queues = queue->vif->dev->real_num_tx_queues;
+       unsigned int num_queues = queue->vif->num_queues;
        unsigned long tx_ring_ref, rx_ring_ref;
        unsigned int tx_evtchn, rx_evtchn;
        int err;
index 5a7872a..055222b 100644 (file)
@@ -1287,7 +1287,7 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
 
        if (likely(netif_carrier_ok(dev) &&
                   RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
-                       napi_schedule(&queue->napi);
+               napi_schedule(&queue->napi);
 
        return IRQ_HANDLED;
 }
@@ -1437,16 +1437,12 @@ static void xennet_end_access(int ref, void *page)
 static void xennet_disconnect_backend(struct netfront_info *info)
 {
        unsigned int i = 0;
-       struct netfront_queue *queue = NULL;
        unsigned int num_queues = info->netdev->real_num_tx_queues;
 
+       netif_carrier_off(info->netdev);
+
        for (i = 0; i < num_queues; ++i) {
-               /* Stop old i/f to prevent errors whilst we rebuild the state. */
-               spin_lock_bh(&queue->rx_lock);
-               spin_lock_irq(&queue->tx_lock);
-               netif_carrier_off(queue->info->netdev);
-               spin_unlock_irq(&queue->tx_lock);
-               spin_unlock_bh(&queue->rx_lock);
+               struct netfront_queue *queue = &info->queues[i];
 
                if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
                        unbind_from_irqhandler(queue->tx_irq, queue);
@@ -1457,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
                queue->tx_evtchn = queue->rx_evtchn = 0;
                queue->tx_irq = queue->rx_irq = 0;
 
+               napi_synchronize(&queue->napi);
+
                /* End access and free the pages */
                xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
                xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -1698,8 +1696,6 @@ static int xennet_init_queue(struct netfront_queue *queue)
                goto exit_free_tx;
        }
 
-       netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64);
-
        return 0;
 
  exit_free_tx:
@@ -1790,6 +1786,70 @@ error:
        return err;
 }
 
+static void xennet_destroy_queues(struct netfront_info *info)
+{
+       unsigned int i;
+
+       rtnl_lock();
+
+       for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
+               struct netfront_queue *queue = &info->queues[i];
+
+               if (netif_running(info->netdev))
+                       napi_disable(&queue->napi);
+               netif_napi_del(&queue->napi);
+       }
+
+       rtnl_unlock();
+
+       kfree(info->queues);
+       info->queues = NULL;
+}
+
+static int xennet_create_queues(struct netfront_info *info,
+                               unsigned int num_queues)
+{
+       unsigned int i;
+       int ret;
+
+       info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
+                              GFP_KERNEL);
+       if (!info->queues)
+               return -ENOMEM;
+
+       rtnl_lock();
+
+       for (i = 0; i < num_queues; i++) {
+               struct netfront_queue *queue = &info->queues[i];
+
+               queue->id = i;
+               queue->info = info;
+
+               ret = xennet_init_queue(queue);
+               if (ret < 0) {
+                       dev_warn(&info->netdev->dev, "only created %d queues\n",
+                                num_queues);
+                       num_queues = i;
+                       break;
+               }
+
+               netif_napi_add(queue->info->netdev, &queue->napi,
+                              xennet_poll, 64);
+               if (netif_running(info->netdev))
+                       napi_enable(&queue->napi);
+       }
+
+       netif_set_real_num_tx_queues(info->netdev, num_queues);
+
+       rtnl_unlock();
+
+       if (num_queues == 0) {
+               dev_err(&info->netdev->dev, "no queues\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
 /* Common code used when first setting up, and when resuming. */
 static int talk_to_netback(struct xenbus_device *dev,
                           struct netfront_info *info)
@@ -1826,42 +1886,20 @@ static int talk_to_netback(struct xenbus_device *dev,
                goto out;
        }
 
-       /* Allocate array of queues */
-       info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL);
-       if (!info->queues) {
-               err = -ENOMEM;
-               goto out;
-       }
-       rtnl_lock();
-       netif_set_real_num_tx_queues(info->netdev, num_queues);
-       rtnl_unlock();
+       if (info->queues)
+               xennet_destroy_queues(info);
+
+       err = xennet_create_queues(info, num_queues);
+       if (err < 0)
+               goto destroy_ring;
 
        /* Create shared ring, alloc event channel -- for each queue */
        for (i = 0; i < num_queues; ++i) {
                queue = &info->queues[i];
-               queue->id = i;
-               queue->info = info;
-               err = xennet_init_queue(queue);
-               if (err) {
-                       /* xennet_init_queue() cleans up after itself on failure,
-                        * but we still have to clean up any previously initialised
-                        * queues. If i > 0, set num_queues to i, then goto
-                        * destroy_ring, which calls xennet_disconnect_backend()
-                        * to tidy up.
-                        */
-                       if (i > 0) {
-                               rtnl_lock();
-                               netif_set_real_num_tx_queues(info->netdev, i);
-                               rtnl_unlock();
-                               goto destroy_ring;
-                       } else {
-                               goto out;
-                       }
-               }
                err = setup_netfront(dev, queue, feature_split_evtchn);
                if (err) {
-                       /* As for xennet_init_queue(), setup_netfront() will tidy
-                        * up the current queue on error, but we need to clean up
+                       /* setup_netfront() will tidy up the current
+                        * queue on error, but we need to clean up
                         * those already allocated.
                         */
                        if (i > 0) {
@@ -2005,13 +2043,15 @@ static int xennet_connect(struct net_device *dev)
        /* By now, the queue structures have been set up */
        for (j = 0; j < num_queues; ++j) {
                queue = &np->queues[j];
-               spin_lock_bh(&queue->rx_lock);
-               spin_lock_irq(&queue->tx_lock);
 
                /* Step 1: Discard all pending TX packet fragments. */
+               spin_lock_irq(&queue->tx_lock);
                xennet_release_tx_bufs(queue);
+               spin_unlock_irq(&queue->tx_lock);
 
                /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
+               spin_lock_bh(&queue->rx_lock);
+
                for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
                        skb_frag_t *frag;
                        const struct page *page;
@@ -2035,6 +2075,8 @@ static int xennet_connect(struct net_device *dev)
                }
 
                queue->rx.req_prod_pvt = requeue_idx;
+
+               spin_unlock_bh(&queue->rx_lock);
        }
 
        /*
@@ -2046,13 +2088,17 @@ static int xennet_connect(struct net_device *dev)
        netif_carrier_on(np->netdev);
        for (j = 0; j < num_queues; ++j) {
                queue = &np->queues[j];
+
                notify_remote_via_irq(queue->tx_irq);
                if (queue->tx_irq != queue->rx_irq)
                        notify_remote_via_irq(queue->rx_irq);
-               xennet_tx_buf_gc(queue);
-               xennet_alloc_rx_buffers(queue);
 
+               spin_lock_irq(&queue->tx_lock);
+               xennet_tx_buf_gc(queue);
                spin_unlock_irq(&queue->tx_lock);
+
+               spin_lock_bh(&queue->rx_lock);
+               xennet_alloc_rx_buffers(queue);
                spin_unlock_bh(&queue->rx_lock);
        }
 
index 8368d96..b986480 100644 (file)
@@ -227,7 +227,8 @@ static int __of_node_add(struct device_node *np)
        np->kobj.kset = of_kset;
        if (!np->parent) {
                /* Nodes without parents are new top level trees */
-               rc = kobject_add(&np->kobj, NULL, safe_name(&of_kset->kobj, "base"));
+               rc = kobject_add(&np->kobj, NULL, "%s",
+                                safe_name(&of_kset->kobj, "base"));
        } else {
                name = safe_name(&np->parent->kobj, kbasename(np->full_name));
                if (!name || !name[0])
@@ -1960,9 +1961,9 @@ int of_attach_node(struct device_node *np)
 
        raw_spin_lock_irqsave(&devtree_lock, flags);
        np->sibling = np->parent->child;
-       np->allnext = of_allnodes;
+       np->allnext = np->parent->allnext;
+       np->parent->allnext = np;
        np->parent->child = np;
-       of_allnodes = np;
        of_node_clear_flag(np, OF_DETACHED);
        raw_spin_unlock_irqrestore(&devtree_lock, flags);
 
index c4cddf0..9aa012e 100644 (file)
 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 #include <asm/page.h>
 
+/*
+ * of_fdt_limit_memory - limit the number of regions in the /memory node
+ * @limit: maximum entries
+ *
+ * Adjust the flattened device tree to have at most 'limit' number of
+ * memory entries in the /memory node. This function may be called
+ * any time after initial_boot_param is set.
+ */
+void of_fdt_limit_memory(int limit)
+{
+       int memory;
+       int len;
+       const void *val;
+       int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
+       int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
+       const uint32_t *addr_prop;
+       const uint32_t *size_prop;
+       int root_offset;
+       int cell_size;
+
+       root_offset = fdt_path_offset(initial_boot_params, "/");
+       if (root_offset < 0)
+               return;
+
+       addr_prop = fdt_getprop(initial_boot_params, root_offset,
+                               "#address-cells", NULL);
+       if (addr_prop)
+               nr_address_cells = fdt32_to_cpu(*addr_prop);
+
+       size_prop = fdt_getprop(initial_boot_params, root_offset,
+                               "#size-cells", NULL);
+       if (size_prop)
+               nr_size_cells = fdt32_to_cpu(*size_prop);
+
+       cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
+
+       memory = fdt_path_offset(initial_boot_params, "/memory");
+       if (memory > 0) {
+               val = fdt_getprop(initial_boot_params, memory, "reg", &len);
+               if (len > limit*cell_size) {
+                       len = limit*cell_size;
+                       pr_debug("Limiting number of entries to %d\n", limit);
+                       fdt_setprop(initial_boot_params, memory, "reg", val,
+                                       len);
+               }
+       }
+}
+
 /**
  * of_fdt_is_compatible - Return true if given node from the given blob has
  * compat in its compatible list
@@ -880,6 +928,21 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
        const u64 phys_offset = __pa(PAGE_OFFSET);
        base &= PAGE_MASK;
        size &= PAGE_MASK;
+
+       if (sizeof(phys_addr_t) < sizeof(u64)) {
+               if (base > ULONG_MAX) {
+                       pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
+                                       base, base + size);
+                       return;
+               }
+
+               if (base + size > ULONG_MAX) {
+                       pr_warning("Ignoring memory range 0x%lx - 0x%llx\n",
+                                       ULONG_MAX, base + size);
+                       size = ULONG_MAX - base;
+               }
+       }
+
        if (base + size < phys_offset) {
                pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
                           base, base + size);
@@ -922,7 +985,7 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
 }
 #endif
 
-bool __init early_init_dt_scan(void *params)
+bool __init early_init_dt_verify(void *params)
 {
        if (!params)
                return false;
@@ -936,6 +999,12 @@ bool __init early_init_dt_scan(void *params)
                return false;
        }
 
+       return true;
+}
+
+
+void __init early_init_dt_scan_nodes(void)
+{
        /* Retrieve various information from the /chosen node */
        of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
 
@@ -944,7 +1013,17 @@ bool __init early_init_dt_scan(void *params)
 
        /* Setup memory, calling early_init_dt_add_memory_arch */
        of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+}
+
+bool __init early_init_dt_scan(void *params)
+{
+       bool status;
+
+       status = early_init_dt_verify(params);
+       if (!status)
+               return false;
 
+       early_init_dt_scan_nodes();
        return true;
 }
 
index fb4a598..401b245 100644 (file)
@@ -182,40 +182,6 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 }
 EXPORT_SYMBOL(of_mdiobus_register);
 
-/**
- * of_mdiobus_link_phydev - Find a device node for a phy
- * @mdio: pointer to mii_bus structure
- * @phydev: phydev for which the of_node pointer should be set
- *
- * Walk the list of subnodes of a mdio bus and look for a node that matches the
- * phy's address with its 'reg' property. If found, set the of_node pointer for
- * the phy. This allows auto-probed pyh devices to be supplied with information
- * passed in via DT.
- */
-void of_mdiobus_link_phydev(struct mii_bus *mdio,
-                           struct phy_device *phydev)
-{
-       struct device *dev = &phydev->dev;
-       struct device_node *child;
-
-       if (dev->of_node || !mdio->dev.of_node)
-               return;
-
-       for_each_available_child_of_node(mdio->dev.of_node, child) {
-               int addr;
-
-               addr = of_mdio_parse_addr(&mdio->dev, child);
-               if (addr < 0)
-                       continue;
-
-               if (addr == phydev->addr) {
-                       dev->of_node = child;
-                       return;
-               }
-       }
-}
-EXPORT_SYMBOL(of_mdiobus_link_phydev);
-
 /* Helper function for of_phy_find_device */
 static int of_phy_match(struct device *dev, void *phy_np)
 {
@@ -323,11 +289,13 @@ int of_phy_register_fixed_link(struct device_node *np)
        fixed_link_node = of_get_child_by_name(np, "fixed-link");
        if (fixed_link_node) {
                status.link = 1;
-               status.duplex = of_property_read_bool(np, "full-duplex");
+               status.duplex = of_property_read_bool(fixed_link_node,
+                                                     "full-duplex");
                if (of_property_read_u32(fixed_link_node, "speed", &status.speed))
                        return -EINVAL;
-               status.pause = of_property_read_bool(np, "pause");
-               status.asym_pause = of_property_read_bool(np, "asym-pause");
+               status.pause = of_property_read_bool(fixed_link_node, "pause");
+               status.asym_pause = of_property_read_bool(fixed_link_node,
+                                                         "asym-pause");
                of_node_put(fixed_link_node);
                return fixed_phy_register(PHY_POLL, &status, np);
        }
index 6c48d73..500436f 100644 (file)
@@ -166,10 +166,6 @@ static void of_dma_configure(struct platform_device *pdev)
        int ret;
        struct device *dev = &pdev->dev;
 
-#if defined(CONFIG_MICROBLAZE)
-       pdev->archdata.dma_mask = 0xffffffffUL;
-#endif
-
        /*
         * Set default dma-mask to 32 bit. Drivers are expected to setup
         * the correct supported dma_mask.
index 2872ece..44333bd 100644 (file)
@@ -5,6 +5,12 @@
 # Parport configuration.
 #
 
+config ARCH_MIGHT_HAVE_PC_PARPORT
+       bool
+       help
+         Select this config option from the architecture Kconfig if
+         the architecture might have PC parallel port hardware.
+
 menuconfig PARPORT
        tristate "Parallel port support"
        depends on HAS_IOMEM
@@ -31,12 +37,6 @@ menuconfig PARPORT
 
          If unsure, say Y.
 
-config ARCH_MIGHT_HAVE_PC_PARPORT
-       bool
-       help
-         Select this config option from the architecture Kconfig if
-         the architecture might have PC parallel port hardware.
-
 if PARPORT
 
 config PARPORT_PC
index 44fe6aa..3d2076f 100644 (file)
@@ -385,4 +385,4 @@ module_platform_driver(gen_pci_driver);
 
 MODULE_DESCRIPTION("Generic PCI host driver");
 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
index ce23e0f..a8c6f1a 100644 (file)
@@ -1094,4 +1094,4 @@ module_platform_driver(mvebu_pcie_driver);
 
 MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
 MODULE_DESCRIPTION("Marvell EBU PCIe driver");
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
index 083cf37..c284e84 100644 (file)
@@ -1716,4 +1716,4 @@ module_platform_driver(tegra_pcie_driver);
 
 MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
 MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
index f7d3de3..4884ee5 100644 (file)
 #define  PCIE_CONF_DEV(d)      (((d) & 0x1f) << 19)
 #define  PCIE_CONF_FUNC(f)     (((f) & 0x7) << 16)
 
-#define PCI_MAX_RESOURCES 4
+#define RCAR_PCI_MAX_RESOURCES 4
 #define MAX_NR_INBOUND_MAPS 6
 
 struct rcar_msi {
@@ -127,7 +127,7 @@ static inline struct rcar_msi *to_rcar_msi(struct msi_chip *chip)
 struct rcar_pcie {
        struct device           *dev;
        void __iomem            *base;
-       struct resource         res[PCI_MAX_RESOURCES];
+       struct resource         res[RCAR_PCI_MAX_RESOURCES];
        struct resource         busn;
        int                     root_bus_nr;
        struct clk              *clk;
@@ -140,36 +140,37 @@ static inline struct rcar_pcie *sys_to_pcie(struct pci_sys_data *sys)
        return sys->private_data;
 }
 
-static void pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
-                         unsigned long reg)
+static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
+                              unsigned long reg)
 {
        writel(val, pcie->base + reg);
 }
 
-static unsigned long pci_read_reg(struct rcar_pcie *pcie, unsigned long reg)
+static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie,
+                                      unsigned long reg)
 {
        return readl(pcie->base + reg);
 }
 
 enum {
-       PCI_ACCESS_READ,
-       PCI_ACCESS_WRITE,
+       RCAR_PCI_ACCESS_READ,
+       RCAR_PCI_ACCESS_WRITE,
 };
 
 static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
 {
        int shift = 8 * (where & 3);
-       u32 val = pci_read_reg(pcie, where & ~3);
+       u32 val = rcar_pci_read_reg(pcie, where & ~3);
 
        val &= ~(mask << shift);
        val |= data << shift;
-       pci_write_reg(pcie, val, where & ~3);
+       rcar_pci_write_reg(pcie, val, where & ~3);
 }
 
 static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
 {
        int shift = 8 * (where & 3);
-       u32 val = pci_read_reg(pcie, where & ~3);
+       u32 val = rcar_pci_read_reg(pcie, where & ~3);
 
        return val >> shift;
 }
@@ -205,14 +206,14 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie,
                if (dev != 0)
                        return PCIBIOS_DEVICE_NOT_FOUND;
 
-               if (access_type == PCI_ACCESS_READ) {
-                       *data = pci_read_reg(pcie, PCICONF(index));
+               if (access_type == RCAR_PCI_ACCESS_READ) {
+                       *data = rcar_pci_read_reg(pcie, PCICONF(index));
                } else {
                        /* Keep an eye out for changes to the root bus number */
                        if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
                                pcie->root_bus_nr = *data & 0xff;
 
-                       pci_write_reg(pcie, *data, PCICONF(index));
+                       rcar_pci_write_reg(pcie, *data, PCICONF(index));
                }
 
                return PCIBIOS_SUCCESSFUL;
@@ -222,20 +223,20 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie,
                return PCIBIOS_DEVICE_NOT_FOUND;
 
        /* Clear errors */
-       pci_write_reg(pcie, pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
+       rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
 
        /* Set the PIO address */
-       pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(dev) |
-                               PCIE_CONF_FUNC(func) | reg, PCIECAR);
+       rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
+               PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
 
        /* Enable the configuration access */
        if (bus->parent->number == pcie->root_bus_nr)
-               pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
+               rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
        else
-               pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
+               rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
 
        /* Check for errors */
-       if (pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
+       if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
                return PCIBIOS_DEVICE_NOT_FOUND;
 
        /* Check for master and target aborts */
@@ -243,13 +244,13 @@ static int rcar_pcie_config_access(struct rcar_pcie *pcie,
                (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
                return PCIBIOS_DEVICE_NOT_FOUND;
 
-       if (access_type == PCI_ACCESS_READ)
-               *data = pci_read_reg(pcie, PCIECDR);
+       if (access_type == RCAR_PCI_ACCESS_READ)
+               *data = rcar_pci_read_reg(pcie, PCIECDR);
        else
-               pci_write_reg(pcie, *data, PCIECDR);
+               rcar_pci_write_reg(pcie, *data, PCIECDR);
 
        /* Disable the configuration access */
-       pci_write_reg(pcie, 0, PCIECCTLR);
+       rcar_pci_write_reg(pcie, 0, PCIECCTLR);
 
        return PCIBIOS_SUCCESSFUL;
 }
@@ -260,12 +261,7 @@ static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
        struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
        int ret;
 
-       if ((size == 2) && (where & 1))
-               return PCIBIOS_BAD_REGISTER_NUMBER;
-       else if ((size == 4) && (where & 3))
-               return PCIBIOS_BAD_REGISTER_NUMBER;
-
-       ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ,
+       ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
                                      bus, devfn, where, val);
        if (ret != PCIBIOS_SUCCESSFUL) {
                *val = 0xffffffff;
@@ -291,12 +287,7 @@ static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
        int shift, ret;
        u32 data;
 
-       if ((size == 2) && (where & 1))
-               return PCIBIOS_BAD_REGISTER_NUMBER;
-       else if ((size == 4) && (where & 3))
-               return PCIBIOS_BAD_REGISTER_NUMBER;
-
-       ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ,
+       ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
                                      bus, devfn, where, &data);
        if (ret != PCIBIOS_SUCCESSFUL)
                return ret;
@@ -315,7 +306,7 @@ static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
        } else
                data = val;
 
-       ret = rcar_pcie_config_access(pcie, PCI_ACCESS_WRITE,
+       ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE,
                                      bus, devfn, where, &data);
 
        return ret;
@@ -326,14 +317,15 @@ static struct pci_ops rcar_pcie_ops = {
        .write  = rcar_pcie_write_conf,
 };
 
-static void rcar_pcie_setup_window(int win, struct resource *res,
-                                  struct rcar_pcie *pcie)
+static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie)
 {
+       struct resource *res = &pcie->res[win];
+
        /* Setup PCIe address space mappings for each resource */
        resource_size_t size;
        u32 mask;
 
-       pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
+       rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
 
        /*
         * The PAMR mask is calculated in units of 128Bytes, which
@@ -341,17 +333,17 @@ static void rcar_pcie_setup_window(int win, struct resource *res,
         */
        size = resource_size(res);
        mask = (roundup_pow_of_two(size) / SZ_128) - 1;
-       pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
+       rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
 
-       pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win));
-       pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win));
+       rcar_pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win));
+       rcar_pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win));
 
        /* First resource is for IO */
        mask = PAR_ENABLE;
        if (res->flags & IORESOURCE_IO)
                mask |= IO_SPACE;
 
-       pci_write_reg(pcie, mask, PCIEPTCTLR(win));
+       rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
 }
 
 static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
@@ -363,13 +355,13 @@ static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
        pcie->root_bus_nr = -1;
 
        /* Setup PCI resources */
-       for (i = 0; i < PCI_MAX_RESOURCES; i++) {
+       for (i = 0; i < RCAR_PCI_MAX_RESOURCES; i++) {
 
                res = &pcie->res[i];
                if (!res->flags)
                        continue;
 
-               rcar_pcie_setup_window(i, res, pcie);
+               rcar_pcie_setup_window(i, pcie);
 
                if (res->flags & IORESOURCE_IO)
                        pci_ioremap_io(nr * SZ_64K, res->start);
@@ -415,7 +407,7 @@ static int phy_wait_for_ack(struct rcar_pcie *pcie)
        unsigned int timeout = 100;
 
        while (timeout--) {
-               if (pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
+               if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
                        return 0;
 
                udelay(100);
@@ -438,15 +430,15 @@ static void phy_write_reg(struct rcar_pcie *pcie,
                ((addr & 0xff) << ADR_POS);
 
        /* Set write data */
-       pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
-       pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
+       rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
+       rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
 
        /* Ignore errors as they will be dealt with if the data link is down */
        phy_wait_for_ack(pcie);
 
        /* Clear command */
-       pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
-       pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
+       rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
+       rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
 
        /* Ignore errors as they will be dealt with if the data link is down */
        phy_wait_for_ack(pcie);
@@ -457,7 +449,7 @@ static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
        unsigned int timeout = 10;
 
        while (timeout--) {
-               if ((pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
+               if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
                        return 0;
 
                msleep(5);
@@ -471,17 +463,17 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
        int err;
 
        /* Begin initialization */
-       pci_write_reg(pcie, 0, PCIETCTLR);
+       rcar_pci_write_reg(pcie, 0, PCIETCTLR);
 
        /* Set mode */
-       pci_write_reg(pcie, 1, PCIEMSR);
+       rcar_pci_write_reg(pcie, 1, PCIEMSR);
 
        /*
         * Initial header for port config space is type 1, set the device
         * class to match. Hardware takes care of propagating the IDSETR
         * settings, so there is no need to bother with a quirk.
         */
-       pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
+       rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
 
        /*
         * Setup Secondary Bus Number & Subordinate Bus Number, even though
@@ -491,33 +483,31 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
        rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
 
        /* Initialize default capabilities. */
-       rcar_rmw32(pcie, REXPCAP(0), 0, PCI_CAP_ID_EXP);
+       rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
        rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
                PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
        rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
                PCI_HEADER_TYPE_BRIDGE);
 
        /* Enable data link layer active state reporting */
-       rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), 0, PCI_EXP_LNKCAP_DLLLARC);
+       rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
+               PCI_EXP_LNKCAP_DLLLARC);
 
        /* Write out the physical slot number = 0 */
        rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
 
        /* Set the completion timer timeout to the maximum 50ms. */
-       rcar_rmw32(pcie, TLCTLR+1, 0x3f, 50);
+       rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
 
        /* Terminate list of capabilities (Next Capability Offset=0) */
-       rcar_rmw32(pcie, RVCCAP(0), 0xfff0, 0);
-
-       /* Enable MAC data scrambling. */
-       rcar_rmw32(pcie, MACCTLR, SCRAMBLE_DISABLE, 0);
+       rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
 
        /* Enable MSI */
        if (IS_ENABLED(CONFIG_PCI_MSI))
-               pci_write_reg(pcie, 0x101f0000, PCIEMSITXR);
+               rcar_pci_write_reg(pcie, 0x101f0000, PCIEMSITXR);
 
        /* Finish initialization - establish a PCI Express link */
-       pci_write_reg(pcie, CFINIT, PCIETCTLR);
+       rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
 
        /* This will timeout if we don't have a link. */
        err = rcar_pcie_wait_for_dl(pcie);
@@ -527,11 +517,6 @@ static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
        /* Enable INTx interrupts */
        rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
 
-       /* Enable slave Bus Mastering */
-       rcar_rmw32(pcie, RCONF(PCI_STATUS), PCI_STATUS_DEVSEL_MASK,
-               PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
-               PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST);
-
        wmb();
 
        return 0;
@@ -560,7 +545,7 @@ static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
        phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
 
        while (timeout--) {
-               if (pci_read_reg(pcie, H1_PCIEPHYSR))
+               if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR))
                        return rcar_pcie_hw_init(pcie);
 
                msleep(5);
@@ -599,7 +584,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
        struct rcar_msi *msi = &pcie->msi;
        unsigned long reg;
 
-       reg = pci_read_reg(pcie, PCIEMSIFR);
+       reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
 
        /* MSI & INTx share an interrupt - we only handle MSI here */
        if (!reg)
@@ -610,7 +595,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
                unsigned int irq;
 
                /* clear the interrupt */
-               pci_write_reg(pcie, 1 << index, PCIEMSIFR);
+               rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
 
                irq = irq_find_mapping(msi->domain, index);
                if (irq) {
@@ -624,7 +609,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
                }
 
                /* see if there's any more pending in this vector */
-               reg = pci_read_reg(pcie, PCIEMSIFR);
+               reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
        }
 
        return IRQ_HANDLED;
@@ -651,8 +636,8 @@ static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
 
        irq_set_msi_desc(irq, desc);
 
-       msg.address_lo = pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
-       msg.address_hi = pci_read_reg(pcie, PCIEMSIAUR);
+       msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
+       msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
        msg.data = hwirq;
 
        write_msi_msg(irq, &msg);
@@ -729,11 +714,11 @@ static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
        msi->pages = __get_free_pages(GFP_KERNEL, 0);
        base = virt_to_phys((void *)msi->pages);
 
-       pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
-       pci_write_reg(pcie, 0, PCIEMSIAUR);
+       rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
+       rcar_pci_write_reg(pcie, 0, PCIEMSIAUR);
 
        /* enable all MSI interrupts */
-       pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
+       rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
 
        return 0;
 
@@ -826,6 +811,7 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
        if (cpu_addr > 0) {
                unsigned long nr_zeros = __ffs64(cpu_addr);
                u64 alignment = 1ULL << nr_zeros;
+
                size = min(range->size, alignment);
        } else {
                size = range->size;
@@ -841,13 +827,13 @@ static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
                 * Set up 64-bit inbound regions as the range parser doesn't
                 * distinguish between 32 and 64-bit types.
                 */
-               pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx));
-               pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
-               pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx));
+               rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx));
+               rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
+               rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx));
 
-               pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1));
-               pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1));
-               pci_write_reg(pcie, 0, PCIELAMR(idx+1));
+               rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1));
+               rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1));
+               rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
 
                pci_addr += size;
                cpu_addr += size;
@@ -952,7 +938,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
                of_pci_range_to_resource(&range, pdev->dev.of_node,
                                                &pcie->res[win++]);
 
-               if (win > PCI_MAX_RESOURCES)
+               if (win > RCAR_PCI_MAX_RESOURCES)
                        break;
        }
 
@@ -982,7 +968,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
                return 0;
        }
 
-       data = pci_read_reg(pcie, MACSR);
+       data = rcar_pci_read_reg(pcie, MACSR);
        dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
 
        rcar_pcie_enable(pcie);
@@ -1003,4 +989,4 @@ module_platform_driver(rcar_pcie_driver);
 
 MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
 MODULE_DESCRIPTION("Renesas R-Car PCIe driver");
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
index 4a392c4..d81648f 100644 (file)
@@ -216,8 +216,7 @@ void cpqhp_create_debugfs_files(struct controller *ctrl)
 
 void cpqhp_remove_debugfs_files(struct controller *ctrl)
 {
-       if (ctrl->dentry)
-               debugfs_remove(ctrl->dentry);
+       debugfs_remove(ctrl->dentry);
        ctrl->dentry = NULL;
 }
 
index 8e9012d..9e5a9fb 100644 (file)
@@ -92,9 +92,10 @@ struct controller {
        struct slot *slot;
        wait_queue_head_t queue;        /* sleep & wake process */
        u32 slot_cap;
+       u32 slot_ctrl;
        struct timer_list poll_timer;
+       unsigned long cmd_started;      /* jiffies */
        unsigned int cmd_busy:1;
-       unsigned int no_cmd_complete:1;
        unsigned int link_active_reporting:1;
        unsigned int notification_enabled:1;
        unsigned int power_fault_detected;
index a2297db..07aa722 100644 (file)
@@ -255,6 +255,13 @@ static int pciehp_probe(struct pcie_device *dev)
        else if (pciehp_acpi_slot_detection_check(dev->port))
                goto err_out_none;
 
+       if (!dev->port->subordinate) {
+               /* Can happen if we run out of bus numbers during probe */
+               dev_err(&dev->device,
+                       "Hotplug bridge without secondary bus, ignoring\n");
+               goto err_out_none;
+       }
+
        ctrl = pcie_init(dev);
        if (!ctrl) {
                dev_err(&dev->device, "Controller initialization failed\n");
index 42914e0..9da84b8 100644 (file)
@@ -104,11 +104,10 @@ static inline void pciehp_free_irq(struct controller *ctrl)
                free_irq(ctrl->pcie->irq, ctrl);
 }
 
-static int pcie_poll_cmd(struct controller *ctrl)
+static int pcie_poll_cmd(struct controller *ctrl, int timeout)
 {
        struct pci_dev *pdev = ctrl_dev(ctrl);
        u16 slot_status;
-       int timeout = 1000;
 
        pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
        if (slot_status & PCI_EXP_SLTSTA_CC) {
@@ -129,18 +128,52 @@ static int pcie_poll_cmd(struct controller *ctrl)
        return 0;       /* timeout */
 }
 
-static void pcie_wait_cmd(struct controller *ctrl, int poll)
+static void pcie_wait_cmd(struct controller *ctrl)
 {
        unsigned int msecs = pciehp_poll_mode ? 2500 : 1000;
-       unsigned long timeout = msecs_to_jiffies(msecs);
+       unsigned long duration = msecs_to_jiffies(msecs);
+       unsigned long cmd_timeout = ctrl->cmd_started + duration;
+       unsigned long now, timeout;
        int rc;
 
-       if (poll)
-               rc = pcie_poll_cmd(ctrl);
+       /*
+        * If the controller does not generate notifications for command
+        * completions, we never need to wait between writes.
+        */
+       if (NO_CMD_CMPL(ctrl))
+               return;
+
+       if (!ctrl->cmd_busy)
+               return;
+
+       /*
+        * Even if the command has already timed out, we want to call
+        * pcie_poll_cmd() so it can clear PCI_EXP_SLTSTA_CC.
+        */
+       now = jiffies;
+       if (time_before_eq(cmd_timeout, now))
+               timeout = 1;
        else
+               timeout = cmd_timeout - now;
+
+       if (ctrl->slot_ctrl & PCI_EXP_SLTCTL_HPIE &&
+           ctrl->slot_ctrl & PCI_EXP_SLTCTL_CCIE)
                rc = wait_event_timeout(ctrl->queue, !ctrl->cmd_busy, timeout);
+       else
+               rc = pcie_poll_cmd(ctrl, timeout);
+
+       /*
+        * Controllers with errata like Intel CF118 don't generate
+        * completion notifications unless the power/indicator/interlock
+        * control bits are changed.  On such controllers, we'll emit this
+        * timeout message when we wait for completion of commands that
+        * don't change those bits, e.g., commands that merely enable
+        * interrupts.
+        */
        if (!rc)
-               ctrl_dbg(ctrl, "Command not completed in 1000 msec\n");
+               ctrl_info(ctrl, "Timeout on hotplug command %#010x (issued %u msec ago)\n",
+                         ctrl->slot_ctrl,
+                         jiffies_to_msecs(now - ctrl->cmd_started));
 }
 
 /**
@@ -152,34 +185,12 @@ static void pcie_wait_cmd(struct controller *ctrl, int poll)
 static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
 {
        struct pci_dev *pdev = ctrl_dev(ctrl);
-       u16 slot_status;
        u16 slot_ctrl;
 
        mutex_lock(&ctrl->ctrl_lock);
 
-       pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &slot_status);
-       if (slot_status & PCI_EXP_SLTSTA_CC) {
-               pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
-                                          PCI_EXP_SLTSTA_CC);
-               if (!ctrl->no_cmd_complete) {
-                       /*
-                        * After 1 sec and CMD_COMPLETED still not set, just
-                        * proceed forward to issue the next command according
-                        * to spec. Just print out the error message.
-                        */
-                       ctrl_dbg(ctrl, "CMD_COMPLETED not clear after 1 sec\n");
-               } else if (!NO_CMD_CMPL(ctrl)) {
-                       /*
-                        * This controller seems to notify of command completed
-                        * event even though it supports none of power
-                        * controller, attention led, power led and EMI.
-                        */
-                       ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Need to wait for command completed event\n");
-                       ctrl->no_cmd_complete = 0;
-               } else {
-                       ctrl_dbg(ctrl, "Unexpected CMD_COMPLETED. Maybe the controller is broken\n");
-               }
-       }
+       /* Wait for any previous command that might still be in progress */
+       pcie_wait_cmd(ctrl);
 
        pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &slot_ctrl);
        slot_ctrl &= ~mask;
@@ -187,22 +198,9 @@ static void pcie_write_cmd(struct controller *ctrl, u16 cmd, u16 mask)
        ctrl->cmd_busy = 1;
        smp_mb();
        pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
+       ctrl->cmd_started = jiffies;
+       ctrl->slot_ctrl = slot_ctrl;
 
-       /*
-        * Wait for command completion.
-        */
-       if (!ctrl->no_cmd_complete) {
-               int poll = 0;
-               /*
-                * if hotplug interrupt is not enabled or command
-                * completed interrupt is not enabled, we need to poll
-                * command completed event.
-                */
-               if (!(slot_ctrl & PCI_EXP_SLTCTL_HPIE) ||
-                   !(slot_ctrl & PCI_EXP_SLTCTL_CCIE))
-                       poll = 1;
-               pcie_wait_cmd(ctrl, poll);
-       }
        mutex_unlock(&ctrl->ctrl_lock);
 }
 
@@ -773,15 +771,6 @@ struct controller *pcie_init(struct pcie_device *dev)
        mutex_init(&ctrl->ctrl_lock);
        init_waitqueue_head(&ctrl->queue);
        dbg_ctrl(ctrl);
-       /*
-        * Controller doesn't notify of command completion if the "No
-        * Command Completed Support" bit is set in Slot Capability
-        * register or the controller supports none of power
-        * controller, attention led, power led and EMI.
-        */
-       if (NO_CMD_CMPL(ctrl) ||
-           !(POWER_CTRL(ctrl) | ATTN_LED(ctrl) | PWR_LED(ctrl) | EMI(ctrl)))
-               ctrl->no_cmd_complete = 1;
 
        /* Check if Data Link Layer Link Active Reporting is implemented */
        pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &link_cap);
@@ -794,7 +783,7 @@ struct controller *pcie_init(struct pcie_device *dev)
        pcie_capability_write_word(pdev, PCI_EXP_SLTSTA,
                PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
                PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
-               PCI_EXP_SLTSTA_CC);
+               PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
 
        /* Disable software notification */
        pcie_disable_notification(ctrl);
index 13f3d30..5a40516 100644 (file)
@@ -149,15 +149,14 @@ static void msi_set_enable(struct pci_dev *dev, int enable)
        pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
 }
 
-static void msix_set_enable(struct pci_dev *dev, int enable)
+static void msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set)
 {
-       u16 control;
+       u16 ctrl;
 
-       pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
-       control &= ~PCI_MSIX_FLAGS_ENABLE;
-       if (enable)
-               control |= PCI_MSIX_FLAGS_ENABLE;
-       pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+       pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
+       ctrl &= ~clear;
+       ctrl |= set;
+       pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl);
 }
 
 static inline __attribute_const__ u32 msi_mask(unsigned x)
@@ -168,16 +167,6 @@ static inline __attribute_const__ u32 msi_mask(unsigned x)
        return (1 << (1 << x)) - 1;
 }
 
-static inline __attribute_const__ u32 msi_capable_mask(u16 control)
-{
-       return msi_mask((control >> 1) & 7);
-}
-
-static inline __attribute_const__ u32 msi_enabled_mask(u16 control)
-{
-       return msi_mask((control >> 4) & 7);
-}
-
 /*
  * PCI 2.3 does not specify mask bits for each MSI interrupt.  Attempting to
  * mask all MSI interrupts by clearing the MSI enable bit does not work
@@ -246,7 +235,7 @@ static void msi_set_mask_bit(struct irq_data *data, u32 flag)
                msix_mask_irq(desc, flag);
                readl(desc->mask_base);         /* Flush write to device */
        } else {
-               unsigned offset = data->irq - desc->dev->irq;
+               unsigned offset = data->irq - desc->irq;
                msi_mask_irq(desc, 1 << offset, flag << offset);
        }
 }
@@ -460,7 +449,8 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
        arch_restore_msi_irqs(dev);
 
        pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
-       msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
+       msi_mask_irq(entry, msi_mask(entry->msi_attrib.multi_cap),
+                    entry->masked);
        control &= ~PCI_MSI_FLAGS_QSIZE;
        control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE;
        pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
@@ -469,26 +459,22 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
 static void __pci_restore_msix_state(struct pci_dev *dev)
 {
        struct msi_desc *entry;
-       u16 control;
 
        if (!dev->msix_enabled)
                return;
        BUG_ON(list_empty(&dev->msi_list));
-       entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
-       pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
 
        /* route the table */
        pci_intx_for_msi(dev, 0);
-       control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;
-       pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+       msix_clear_and_set_ctrl(dev, 0,
+                               PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL);
 
        arch_restore_msi_irqs(dev);
        list_for_each_entry(entry, &dev->msi_list, list) {
                msix_mask_irq(entry, entry->masked);
        }
 
-       control &= ~PCI_MSIX_FLAGS_MASKALL;
-       pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+       msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
 }
 
 void pci_restore_msi_state(struct pci_dev *dev)
@@ -501,7 +487,6 @@ EXPORT_SYMBOL_GPL(pci_restore_msi_state);
 static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
                             char *buf)
 {
-       struct pci_dev *pdev = to_pci_dev(dev);
        struct msi_desc *entry;
        unsigned long irq;
        int retval;
@@ -510,12 +495,11 @@ static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr,
        if (retval)
                return retval;
 
-       list_for_each_entry(entry, &pdev->msi_list, list) {
-               if (entry->irq == irq) {
-                       return sprintf(buf, "%s\n",
-                                      entry->msi_attrib.is_msix ? "msix" : "msi");
-               }
-       }
+       entry = irq_get_msi_desc(irq);
+       if (entry)
+               return sprintf(buf, "%s\n",
+                               entry->msi_attrib.is_msix ? "msix" : "msi");
+
        return -ENODEV;
 }
 
@@ -594,6 +578,38 @@ error_attrs:
        return ret;
 }
 
+static struct msi_desc *msi_setup_entry(struct pci_dev *dev)
+{
+       u16 control;
+       struct msi_desc *entry;
+
+       /* MSI Entry Initialization */
+       entry = alloc_msi_entry(dev);
+       if (!entry)
+               return NULL;
+
+       pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
+
+       entry->msi_attrib.is_msix       = 0;
+       entry->msi_attrib.is_64         = !!(control & PCI_MSI_FLAGS_64BIT);
+       entry->msi_attrib.entry_nr      = 0;
+       entry->msi_attrib.maskbit       = !!(control & PCI_MSI_FLAGS_MASKBIT);
+       entry->msi_attrib.default_irq   = dev->irq;     /* Save IOAPIC IRQ */
+       entry->msi_attrib.pos           = dev->msi_cap;
+       entry->msi_attrib.multi_cap     = (control & PCI_MSI_FLAGS_QMASK) >> 1;
+
+       if (control & PCI_MSI_FLAGS_64BIT)
+               entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
+       else
+               entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
+
+       /* Save the initial mask status */
+       if (entry->msi_attrib.maskbit)
+               pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
+
+       return entry;
+}
+
 /**
  * msi_capability_init - configure device's MSI capability structure
  * @dev: pointer to the pci_dev data structure of MSI device function
@@ -609,32 +625,16 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
 {
        struct msi_desc *entry;
        int ret;
-       u16 control;
        unsigned mask;
 
        msi_set_enable(dev, 0); /* Disable MSI during set up */
 
-       pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
-       /* MSI Entry Initialization */
-       entry = alloc_msi_entry(dev);
+       entry = msi_setup_entry(dev);
        if (!entry)
                return -ENOMEM;
 
-       entry->msi_attrib.is_msix       = 0;
-       entry->msi_attrib.is_64         = !!(control & PCI_MSI_FLAGS_64BIT);
-       entry->msi_attrib.entry_nr      = 0;
-       entry->msi_attrib.maskbit       = !!(control & PCI_MSI_FLAGS_MASKBIT);
-       entry->msi_attrib.default_irq   = dev->irq;     /* Save IOAPIC IRQ */
-       entry->msi_attrib.pos           = dev->msi_cap;
-
-       if (control & PCI_MSI_FLAGS_64BIT)
-               entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
-       else
-               entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32;
        /* All MSIs are unmasked by default, Mask them all */
-       if (entry->msi_attrib.maskbit)
-               pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
-       mask = msi_capable_mask(control);
+       mask = msi_mask(entry->msi_attrib.multi_cap);
        msi_mask_irq(entry, mask, mask);
 
        list_add_tail(&entry->list, &dev->msi_list);
@@ -743,12 +743,10 @@ static int msix_capability_init(struct pci_dev *dev,
        u16 control;
        void __iomem *base;
 
-       pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
-
        /* Ensure MSI-X is disabled while it is set up */
-       control &= ~PCI_MSIX_FLAGS_ENABLE;
-       pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+       msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
 
+       pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control);
        /* Request & Map MSI-X table region */
        base = msix_map_region(dev, msix_table_size(control));
        if (!base)
@@ -767,8 +765,8 @@ static int msix_capability_init(struct pci_dev *dev,
         * MSI-X registers.  We need to mask all the vectors to prevent
         * interrupts coming in before they're fully set up.
         */
-       control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
-       pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+       msix_clear_and_set_ctrl(dev, 0,
+                               PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE);
 
        msix_program_entries(dev, entries);
 
@@ -780,8 +778,7 @@ static int msix_capability_init(struct pci_dev *dev,
        pci_intx_for_msi(dev, 0);
        dev->msix_enabled = 1;
 
-       control &= ~PCI_MSIX_FLAGS_MASKALL;
-       pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control);
+       msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
 
        return 0;
 
@@ -882,7 +879,6 @@ void pci_msi_shutdown(struct pci_dev *dev)
 {
        struct msi_desc *desc;
        u32 mask;
-       u16 ctrl;
 
        if (!pci_msi_enable || !dev || !dev->msi_enabled)
                return;
@@ -895,8 +891,7 @@ void pci_msi_shutdown(struct pci_dev *dev)
        dev->msi_enabled = 0;
 
        /* Return the device with MSI unmasked as initial states */
-       pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl);
-       mask = msi_capable_mask(ctrl);
+       mask = msi_mask(desc->msi_attrib.multi_cap);
        /* Keep cached state to be restored */
        arch_msi_mask_irq(desc, mask, ~mask);
 
@@ -1001,7 +996,7 @@ void pci_msix_shutdown(struct pci_dev *dev)
                arch_msix_mask_irq(entry, 1);
        }
 
-       msix_set_enable(dev, 0);
+       msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
        pci_intx_for_msi(dev, 1);
        dev->msix_enabled = 0;
 }
@@ -1016,24 +1011,6 @@ void pci_disable_msix(struct pci_dev *dev)
 }
 EXPORT_SYMBOL(pci_disable_msix);
 
-/**
- * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state
- * @dev: pointer to the pci_dev data structure of MSI(X) device function
- *
- * Being called during hotplug remove, from which the device function
- * is hot-removed. All previous assigned MSI/MSI-X irqs, if
- * allocated for this device function, are reclaimed to unused state,
- * which may be used later on.
- **/
-void msi_remove_pci_irq_vectors(struct pci_dev *dev)
-{
-       if (!pci_msi_enable || !dev)
-               return;
-
-       if (dev->msi_enabled || dev->msix_enabled)
-               free_msi_irqs(dev);
-}
-
 void pci_no_msi(void)
 {
        pci_msi_enable = 0;
@@ -1065,7 +1042,7 @@ void pci_msi_init_pci_dev(struct pci_dev *dev)
 
        dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
        if (dev->msix_cap)
-               msix_set_enable(dev, 0);
+               msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
 }
 
 /**
index a3fbe20..2ab1b47 100644 (file)
@@ -161,8 +161,8 @@ enum acpi_attr_enum {
 static void dsm_label_utf16s_to_utf8s(union acpi_object *obj, char *buf)
 {
        int len;
-       len = utf16s_to_utf8s((const wchar_t *)obj->string.pointer,
-                             obj->string.length,
+       len = utf16s_to_utf8s((const wchar_t *)obj->buffer.pointer,
+                             obj->buffer.length,
                              UTF16_LITTLE_ENDIAN,
                              buf, PAGE_SIZE);
        buf[len] = '\n';
@@ -187,16 +187,22 @@ static int dsm_get_label(struct device *dev, char *buf,
        tmp = obj->package.elements;
        if (obj->type == ACPI_TYPE_PACKAGE && obj->package.count == 2 &&
            tmp[0].type == ACPI_TYPE_INTEGER &&
-           tmp[1].type == ACPI_TYPE_STRING) {
+           (tmp[1].type == ACPI_TYPE_STRING ||
+            tmp[1].type == ACPI_TYPE_BUFFER)) {
                /*
                 * The second string element is optional even when
                 * this _DSM is implemented; when not implemented,
                 * this entry must return a null string.
                 */
-               if (attr == ACPI_ATTR_INDEX_SHOW)
+               if (attr == ACPI_ATTR_INDEX_SHOW) {
                        scnprintf(buf, PAGE_SIZE, "%llu\n", tmp->integer.value);
-               else if (attr == ACPI_ATTR_LABEL_SHOW)
-                       dsm_label_utf16s_to_utf8s(tmp + 1, buf);
+               } else if (attr == ACPI_ATTR_LABEL_SHOW) {
+                       if (tmp[1].type == ACPI_TYPE_STRING)
+                               scnprintf(buf, PAGE_SIZE, "%s\n",
+                                         tmp[1].string.pointer);
+                       else if (tmp[1].type == ACPI_TYPE_BUFFER)
+                               dsm_label_utf16s_to_utf8s(tmp + 1, buf);
+               }
                len = strlen(buf) > 0 ? strlen(buf) : -1;
        }
 
index 63a54a3..2c9ac70 100644 (file)
@@ -839,12 +839,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 
        if (!__pci_complete_power_transition(dev, state))
                error = 0;
-       /*
-        * When aspm_policy is "powersave" this call ensures
-        * that ASPM is configured.
-        */
-       if (!error && dev->bus->self)
-               pcie_aspm_powersave_config_link(dev->bus->self);
 
        return error;
 }
@@ -1195,12 +1189,18 @@ int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
 static int do_pci_enable_device(struct pci_dev *dev, int bars)
 {
        int err;
+       struct pci_dev *bridge;
        u16 cmd;
        u8 pin;
 
        err = pci_set_power_state(dev, PCI_D0);
        if (err < 0 && err != -EIO)
                return err;
+
+       bridge = pci_upstream_bridge(dev);
+       if (bridge)
+               pcie_aspm_powersave_config_link(bridge);
+
        err = pcibios_enable_device(dev, bars);
        if (err < 0)
                return err;
@@ -3135,8 +3135,13 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
        if (probe)
                return 0;
 
-       /* Wait for Transaction Pending bit clean */
-       if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP))
+       /*
+        * Wait for Transaction Pending bit to clear.  A word-aligned test
+        * is used, so we use the conrol offset rather than status and shift
+        * the test bit to match.
+        */
+       if (pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
+                                PCI_AF_STATUS_TP << 8))
                goto clear;
 
        dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
@@ -3193,7 +3198,7 @@ static int pci_pm_reset(struct pci_dev *dev, int probe)
        return 0;
 }
 
-void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
+void pci_reset_secondary_bus(struct pci_dev *dev)
 {
        u16 ctrl;
 
@@ -3219,6 +3224,11 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
        ssleep(1);
 }
 
+void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
+{
+       pci_reset_secondary_bus(dev);
+}
+
 /**
  * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
  * @dev: Bridge device
index 80887ea..2ccc9b9 100644 (file)
@@ -203,10 +203,6 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
             (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM)))
                return -ENODEV;
 
-       if (!dev->irq && dev->pin) {
-               dev_warn(&dev->dev, "device [%04x:%04x] has invalid IRQ; check vendor BIOS\n",
-                        dev->vendor, dev->device);
-       }
        status = pcie_port_device_register(dev);
        if (status)
                return status;
index d0f6926..ad56682 100644 (file)
@@ -3405,6 +3405,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ASMEDIA, 0x1080,
 DECLARE_PCI_FIXUP_HEADER(0x10e3, 0x8113, quirk_use_pcie_bridge_dma_alias);
 /* ITE 8892, https://bugzilla.kernel.org/show_bug.cgi?id=73551 */
 DECLARE_PCI_FIXUP_HEADER(0x1283, 0x8892, quirk_use_pcie_bridge_dma_alias);
+/* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
+DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
 
 static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev)
 {
index a5a63ec..6373985 100644 (file)
@@ -925,7 +925,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
 {
        struct pci_dev *dev;
        resource_size_t min_align, align, size, size0, size1;
-       resource_size_t aligns[14];     /* Alignments from 1Mb to 8Gb */
+       resource_size_t aligns[18];     /* Alignments from 1Mb to 128Gb */
        int order, max_order;
        struct resource *b_res = find_free_bus_resource(bus,
                                        mask | IORESOURCE_PREFETCH, type);
index caed1ce..b7c3a5e 100644 (file)
@@ -166,11 +166,10 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
 {
        struct resource *root, *conflict;
        resource_size_t fw_addr, start, end;
-       int ret = 0;
 
        fw_addr = pcibios_retrieve_fw_addr(dev, resno);
        if (!fw_addr)
-               return 1;
+               return -ENOMEM;
 
        start = res->start;
        end = res->end;
@@ -189,14 +188,13 @@ static int pci_revert_fw_address(struct resource *res, struct pci_dev *dev,
                 resno, res);
        conflict = request_resource_conflict(root, res);
        if (conflict) {
-               dev_info(&dev->dev,
-                        "BAR %d: %pR conflicts with %s %pR\n", resno,
-                        res, conflict->name, conflict);
+               dev_info(&dev->dev, "BAR %d: %pR conflicts with %s %pR\n",
+                        resno, res, conflict->name, conflict);
                res->start = start;
                res->end = end;
-               ret = 1;
+               return -EBUSY;
        }
-       return ret;
+       return 0;
 }
 
 static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
@@ -250,10 +248,8 @@ static int __pci_assign_resource(struct pci_bus *bus, struct pci_dev *dev,
 static int _pci_assign_resource(struct pci_dev *dev, int resno,
                                resource_size_t size, resource_size_t min_align)
 {
-       struct resource *res = dev->resource + resno;
        struct pci_bus *bus;
        int ret;
-       char *type;
 
        bus = dev->bus;
        while ((ret = __pci_assign_resource(bus, dev, resno, size, min_align))) {
@@ -262,21 +258,6 @@ static int _pci_assign_resource(struct pci_dev *dev, int resno,
                bus = bus->parent;
        }
 
-       if (ret) {
-               if (res->flags & IORESOURCE_MEM)
-                       if (res->flags & IORESOURCE_PREFETCH)
-                               type = "mem pref";
-                       else
-                               type = "mem";
-               else if (res->flags & IORESOURCE_IO)
-                       type = "io";
-               else
-                       type = "unknown";
-               dev_info(&dev->dev,
-                        "BAR %d: can't assign %s (size %#llx)\n",
-                        resno, type, (unsigned long long) resource_size(res));
-       }
-
        return ret;
 }
 
@@ -302,17 +283,24 @@ int pci_assign_resource(struct pci_dev *dev, int resno)
         * where firmware left it.  That at least has a chance of
         * working, which is better than just leaving it disabled.
         */
-       if (ret < 0)
+       if (ret < 0) {
+               dev_info(&dev->dev, "BAR %d: no space for %pR\n", resno, res);
                ret = pci_revert_fw_address(res, dev, resno, size);
+       }
 
-       if (!ret) {
-               res->flags &= ~IORESOURCE_UNSET;
-               res->flags &= ~IORESOURCE_STARTALIGN;
-               dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
-               if (resno < PCI_BRIDGE_RESOURCES)
-                       pci_update_resource(dev, resno);
+       if (ret < 0) {
+               dev_info(&dev->dev, "BAR %d: failed to assign %pR\n", resno,
+                        res);
+               return ret;
        }
-       return ret;
+
+       res->flags &= ~IORESOURCE_UNSET;
+       res->flags &= ~IORESOURCE_STARTALIGN;
+       dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res);
+       if (resno < PCI_BRIDGE_RESOURCES)
+               pci_update_resource(dev, resno);
+
+       return 0;
 }
 EXPORT_SYMBOL(pci_assign_resource);
 
@@ -320,9 +308,11 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
                        resource_size_t min_align)
 {
        struct resource *res = dev->resource + resno;
+       unsigned long flags;
        resource_size_t new_size;
        int ret;
 
+       flags = res->flags;
        res->flags |= IORESOURCE_UNSET;
        if (!res->parent) {
                dev_info(&dev->dev, "BAR %d: can't reassign an unassigned resource %pR\n",
@@ -333,14 +323,21 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz
        /* already aligned with min_align */
        new_size = resource_size(res) + addsize;
        ret = _pci_assign_resource(dev, resno, new_size, min_align);
-       if (!ret) {
-               res->flags &= ~IORESOURCE_UNSET;
-               res->flags &= ~IORESOURCE_STARTALIGN;
-               dev_info(&dev->dev, "BAR %d: reassigned %pR\n", resno, res);
-               if (resno < PCI_BRIDGE_RESOURCES)
-                       pci_update_resource(dev, resno);
+       if (ret) {
+               res->flags = flags;
+               dev_info(&dev->dev, "BAR %d: %pR (failed to expand by %#llx)\n",
+                        resno, res, (unsigned long long) addsize);
+               return ret;
        }
-       return ret;
+
+       res->flags &= ~IORESOURCE_UNSET;
+       res->flags &= ~IORESOURCE_STARTALIGN;
+       dev_info(&dev->dev, "BAR %d: reassigned %pR (expanded by %#llx)\n",
+                resno, res, (unsigned long long) addsize);
+       if (resno < PCI_BRIDGE_RESOURCES)
+               pci_update_resource(dev, resno);
+
+       return 0;
 }
 
 int pci_enable_resources(struct pci_dev *dev, int mask)
index 16a2f06..64b98d2 100644 (file)
@@ -112,6 +112,7 @@ config PHY_EXYNOS5250_SATA
 config PHY_SUN4I_USB
        tristate "Allwinner sunxi SoC USB PHY driver"
        depends on ARCH_SUNXI && HAS_IOMEM && OF
+       depends on RESET_CONTROLLER
        select GENERIC_PHY
        help
          Enable this to support the transceiver that is part of Allwinner
@@ -122,6 +123,7 @@ config PHY_SUN4I_USB
 
 config PHY_SAMSUNG_USB2
        tristate "Samsung USB 2.0 PHY driver"
+       depends on HAS_IOMEM
        select GENERIC_PHY
        select MFD_SYSCON
        help
index c64a2f3..49c4465 100644 (file)
@@ -614,8 +614,9 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
        return phy;
 
 put_dev:
-       put_device(&phy->dev);
-       ida_remove(&phy_ida, phy->id);
+       put_device(&phy->dev);  /* calls phy_release() which frees resources */
+       return ERR_PTR(ret);
+
 free_phy:
        kfree(phy);
        return ERR_PTR(ret);
@@ -799,7 +800,7 @@ static void phy_release(struct device *dev)
 
        phy = to_phy(dev);
        dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
-       ida_remove(&phy_ida, phy->id);
+       ida_simple_remove(&phy_ida, phy->id);
        kfree(phy);
 }
 
index 7007c11..34b3961 100644 (file)
@@ -233,8 +233,8 @@ static int omap_usb2_probe(struct platform_device *pdev)
        if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
                phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
-               if (!phy->phy_base)
-                       return -ENOMEM;
+               if (IS_ERR(phy->phy_base))
+                       return PTR_ERR(phy->phy_base);
                phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT;
        }
 
@@ -262,7 +262,6 @@ static int omap_usb2_probe(struct platform_device *pdev)
        otg->phy                = &phy->phy;
 
        platform_set_drvdata(pdev, phy);
-       pm_runtime_enable(phy->dev);
 
        generic_phy = devm_phy_create(phy->dev, &ops, NULL);
        if (IS_ERR(generic_phy))
@@ -270,10 +269,13 @@ static int omap_usb2_probe(struct platform_device *pdev)
 
        phy_set_drvdata(generic_phy, phy);
 
+       pm_runtime_enable(phy->dev);
        phy_provider = devm_of_phy_provider_register(phy->dev,
                        of_phy_simple_xlate);
-       if (IS_ERR(phy_provider))
+       if (IS_ERR(phy_provider)) {
+               pm_runtime_disable(phy->dev);
                return PTR_ERR(phy_provider);
+       }
 
        phy->wkupclk = devm_clk_get(phy->dev, "wkupclk");
        if (IS_ERR(phy->wkupclk)) {
@@ -317,6 +319,7 @@ static int omap_usb2_remove(struct platform_device *pdev)
        if (!IS_ERR(phy->optclk))
                clk_unprepare(phy->optclk);
        usb_remove_phy(&phy->phy);
+       pm_runtime_disable(phy->dev);
 
        return 0;
 }
index 8a8c6bc..1e69a32 100644 (file)
@@ -107,6 +107,7 @@ static const struct of_device_id samsung_usb2_phy_of_match[] = {
 #endif
        { },
 };
+MODULE_DEVICE_TABLE(of, samsung_usb2_phy_of_match);
 
 static int samsung_usb2_phy_probe(struct platform_device *pdev)
 {
index edf5d2f..86db223 100644 (file)
@@ -320,7 +320,7 @@ int berlin_pinctrl_probe(struct platform_device *pdev,
 
        regmap = dev_get_regmap(&pdev->dev, NULL);
        if (!regmap)
-               return PTR_ERR(regmap);
+               return -ENODEV;
 
        pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
        if (!pctrl)
index 1bd6363..9f43916 100644 (file)
@@ -1431,7 +1431,7 @@ static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
 
        status = readl(info->irqmux_base);
 
-       for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK)
+       for_each_set_bit(n, &status, info->nbanks)
                __gpio_irq_handler(&info->banks[n]);
 
        chained_irq_exit(chip, desc);
index f1ca75e..5f38c7f 100644 (file)
@@ -211,6 +211,10 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
                        configlen++;
 
                pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL);
+               if (!pinconfig) {
+                       kfree(*map);
+                       return -ENOMEM;
+               }
 
                if (!of_property_read_u32(node, "allwinner,drive", &val)) {
                        u16 strength = (val + 1) * 10;
index b81448b..a5c6cb7 100644 (file)
@@ -319,8 +319,7 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
        struct pnp_dev *pnp = _pnp;
 
        /* true means it matched */
-       return !acpi->physical_node_count
-           && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
+       return pnp->data == acpi;
 }
 
 static struct acpi_device * __init acpi_pnp_find_companion(struct device *dev)
index 6aea373..ee3de34 100644 (file)
@@ -74,7 +74,7 @@ config DP83640_PHY
 
 config PTP_1588_CLOCK_PCH
        tristate "Intel PCH EG20T as PTP clock"
-       depends on X86 || COMPILE_TEST
+       depends on X86_32 || COMPILE_TEST
        depends on HAS_IOMEM && NET
        select PTP_1588_CLOCK
        help
index 9b60b1f..44341dc 100644 (file)
@@ -287,6 +287,12 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
                        "desc %p not ACKed\n", tx_desc);
        }
 
+       if (ret == NULL) {
+               dev_dbg(bdma_chan->dchan.device->dev,
+                       "%s: unable to obtain tx descriptor\n", __func__);
+               goto err_out;
+       }
+
        i = bdma_chan->wr_count_next % bdma_chan->bd_num;
        if (i == bdma_chan->bd_num - 1) {
                i = 0;
@@ -297,7 +303,7 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
        tx_desc->txd.phys = bdma_chan->bd_phys +
                                i * sizeof(struct tsi721_dma_desc);
        tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
-
+err_out:
        spin_unlock_bh(&bdma_chan->lock);
 
        return ret;
index 8558521..ad9e0c9 100644 (file)
@@ -433,6 +433,7 @@ static struct regulator_ops as3722_ldo3_extcntrl_ops = {
 };
 
 static const struct regulator_linear_range as3722_ldo_ranges[] = {
+       REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0),
        REGULATOR_LINEAR_RANGE(825000, 0x01, 0x24, 25000),
        REGULATOR_LINEAR_RANGE(1725000, 0x40, 0x7F, 25000),
 };
@@ -609,6 +610,7 @@ static bool as3722_sd0_is_low_voltage(struct as3722_regulators *as3722_regs)
 }
 
 static const struct regulator_linear_range as3722_sd2345_ranges[] = {
+       REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0),
        REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500),
        REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000),
        REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7F, 50000),
index 57544e2..58ece59 100644 (file)
@@ -119,6 +119,10 @@ static const unsigned int ldo_c_table[] = {
        2900000, 3000000, 3300000,
 };
 
+static const unsigned int ldo_vbus[] = {
+       5000000,
+};
+
 /* DCDC group CSR: supported voltages in microvolts */
 static const struct regulator_linear_range dcdc_csr_ranges[] = {
        REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
@@ -192,6 +196,7 @@ static struct bcm590xx_info bcm590xx_regs[] = {
        BCM590XX_REG_TABLE(gpldo4, ldo_a_table),
        BCM590XX_REG_TABLE(gpldo5, ldo_a_table),
        BCM590XX_REG_TABLE(gpldo6, ldo_a_table),
+       BCM590XX_REG_TABLE(vbus, ldo_vbus),
 };
 
 struct bcm590xx_reg {
index 110a99e..c810518 100644 (file)
@@ -255,7 +255,7 @@ static int ltc3589_parse_regulators_dt(struct ltc3589 *ltc3589)
        struct device_node *node;
        int i, ret;
 
-       node = of_find_node_by_name(dev->of_node, "regulators");
+       node = of_get_child_by_name(dev->of_node, "regulators");
        if (!node) {
                dev_err(dev, "regulators node not found\n");
                return -EINVAL;
index 864ed02..93b4ad8 100644 (file)
@@ -37,12 +37,14 @@ struct regs_info {
 };
 
 static const struct regulator_linear_range smps_low_ranges[] = {
+       REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
        REGULATOR_LINEAR_RANGE(500000, 0x1, 0x6, 0),
        REGULATOR_LINEAR_RANGE(510000, 0x7, 0x79, 10000),
        REGULATOR_LINEAR_RANGE(1650000, 0x7A, 0x7f, 0),
 };
 
 static const struct regulator_linear_range smps_high_ranges[] = {
+       REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
        REGULATOR_LINEAR_RANGE(1000000, 0x1, 0x6, 0),
        REGULATOR_LINEAR_RANGE(1020000, 0x7, 0x79, 20000),
        REGULATOR_LINEAR_RANGE(3300000, 0x7A, 0x7f, 0),
@@ -323,6 +325,10 @@ static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode)
        if (rail_enable)
                palmas_smps_write(pmic->palmas,
                        palmas_regs_info[id].ctrl_addr, reg);
+
+       /* Switch the enable value to ensure this is used for enable */
+       pmic->desc[id].enable_val = pmic->current_reg_mode[id];
+
        return 0;
 }
 
@@ -962,6 +968,14 @@ static int palmas_regulators_probe(struct platform_device *pdev)
                                return ret;
                        pmic->current_reg_mode[id] = reg &
                                        PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
+
+                       pmic->desc[id].enable_reg =
+                                       PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+                                               palmas_regs_info[id].ctrl_addr);
+                       pmic->desc[id].enable_mask =
+                                       PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
+                       /* set_mode overrides this value */
+                       pmic->desc[id].enable_val = SMPS_CTRL_MODE_ON;
                }
 
                pmic->desc[id].type = REGULATOR_VOLTAGE;
index 69b4b77..9effe48 100644 (file)
@@ -209,7 +209,7 @@ static const struct regulator_desc regulators[] = {
                           1, -1, -1, TPS65218_REG_ENABLE1,
                           TPS65218_ENABLE1_DC6_EN, NULL, NULL, 0, 0),
        TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, tps65218_ldo1_dcdc34_ops, 64,
-                          TPS65218_REG_CONTROL_DCDC4,
+                          TPS65218_REG_CONTROL_LDO1,
                           TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2,
                           TPS65218_ENABLE2_LDO1_EN, NULL, ldo1_dcdc3_ranges,
                           2, 0),
@@ -240,6 +240,7 @@ static int tps65218_regulator_probe(struct platform_device *pdev)
        config.init_data = init_data;
        config.driver_data = tps;
        config.regmap = tps->regmap;
+       config.of_node = pdev->dev.of_node;
 
        rdev = devm_regulator_register(&pdev->dev, &regulators[id], &config);
        if (IS_ERR(rdev)) {
index ce1743d..5e343ba 100644 (file)
@@ -44,7 +44,7 @@ config STE_MODEM_RPROC
 config DA8XX_REMOTEPROC
        tristate "DA8xx/OMAP-L13x remoteproc support"
        depends on ARCH_DAVINCI_DA8XX
-       select CMA
+       select CMA if MMU
        select REMOTEPROC
        select RPMSG
        help
index 1ecfe3b..1cff2a2 100644 (file)
@@ -71,7 +71,7 @@ static int puv3_rtc_setpie(struct device *dev, int enabled)
 {
        unsigned int tmp;
 
-       dev_debug(dev, "%s: pie=%d\n", __func__, enabled);
+       dev_dbg(dev, "%s: pie=%d\n", __func__, enabled);
 
        spin_lock_irq(&puv3_rtc_pie_lock);
        tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE;
@@ -140,7 +140,7 @@ static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
        rtc_tm_to_time(tm, &rtcalarm_count);
        writel(rtcalarm_count, RTC_RTAR);
 
-       puv3_rtc_setaie(&dev->dev, alrm->enabled);
+       puv3_rtc_setaie(dev, alrm->enabled);
 
        if (alrm->enabled)
                enable_irq_wake(puv3_rtc_alarmno);
index ee0e85a..0f47175 100644 (file)
@@ -593,7 +593,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
        dev_info->start = dcssblk_find_lowest_addr(dev_info);
        dev_info->end = dcssblk_find_highest_addr(dev_info);
 
-       dev_set_name(&dev_info->dev, dev_info->segment_name);
+       dev_set_name(&dev_info->dev, "%s", dev_info->segment_name);
        dev_info->dev.release = dcssblk_release_segment;
        dev_info->dev.groups = dcssblk_dev_attr_groups;
        INIT_LIST_HEAD(&dev_info->lh);
index 629fcc2..78b6ace 100644 (file)
@@ -19,7 +19,6 @@ obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
 obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
 obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
 
-obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
 obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
 obj-$(CONFIG_VMCP) += vmcp.o
 
index 15b3459..220acb4 100644 (file)
@@ -633,7 +633,6 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
        } else
                raw3270_writesf_readpart(rp);
        memset(&rp->init_reset, 0, sizeof(rp->init_reset));
-       memset(&rp->init_data, 0, sizeof(rp->init_data));
 }
 
 static int
index cd9c919..b9a9f72 100644 (file)
@@ -838,8 +838,6 @@ sclp_vt220_con_init(void)
 {
        int rc;
 
-       if (!CONSOLE_IS_SCLP)
-               return 0;
        rc = __sclp_vt220_init(sclp_console_pages);
        if (rc)
                return rc;
index cf31d33..a8848db 100644 (file)
@@ -761,7 +761,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
 
        dev = kzalloc(sizeof(struct device), GFP_KERNEL);
        if (dev) {
-               dev_set_name(dev, priv->internal_name);
+               dev_set_name(dev, "%s", priv->internal_name);
                dev->bus = &iucv_bus;
                dev->parent = iucv_root;
                dev->driver = &vmlogrdr_driver;
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
deleted file mode 100644 (file)
index d5eac98..0000000
+++ /dev/null
@@ -1,338 +0,0 @@
-/*
- * Watchdog implementation based on z/VM Watchdog Timer API
- *
- * Copyright IBM Corp. 2004, 2009
- *
- * The user space watchdog daemon can use this driver as
- * /dev/vmwatchdog to have z/VM execute the specified CP
- * command when the timeout expires. The default command is
- * "IPL", which which cause an immediate reboot.
- */
-#define KMSG_COMPONENT "vmwatchdog"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include <linux/suspend.h>
-#include <linux/watchdog.h>
-
-#include <asm/ebcdic.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
-#define MAX_CMDLEN 240
-#define MIN_INTERVAL 15
-static char vmwdt_cmd[MAX_CMDLEN] = "IPL";
-static bool vmwdt_conceal;
-
-static bool vmwdt_nowayout = WATCHDOG_NOWAYOUT;
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
-MODULE_DESCRIPTION("z/VM Watchdog Timer");
-module_param_string(cmd, vmwdt_cmd, MAX_CMDLEN, 0644);
-MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers");
-module_param_named(conceal, vmwdt_conceal, bool, 0644);
-MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog "
-               " is active");
-module_param_named(nowayout, vmwdt_nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
-               " (default=CONFIG_WATCHDOG_NOWAYOUT)");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
-static unsigned int vmwdt_interval = 60;
-static unsigned long vmwdt_is_open;
-static int vmwdt_expect_close;
-
-static DEFINE_MUTEX(vmwdt_mutex);
-
-#define VMWDT_OPEN     0       /* devnode is open or suspend in progress */
-#define VMWDT_RUNNING  1       /* The watchdog is armed */
-
-enum vmwdt_func {
-       /* function codes */
-       wdt_init   = 0,
-       wdt_change = 1,
-       wdt_cancel = 2,
-       /* flags */
-       wdt_conceal = 0x80000000,
-};
-
-static int __diag288(enum vmwdt_func func, unsigned int timeout,
-                           char *cmd, size_t len)
-{
-       register unsigned long __func asm("2") = func;
-       register unsigned long __timeout asm("3") = timeout;
-       register unsigned long __cmdp asm("4") = virt_to_phys(cmd);
-       register unsigned long __cmdl asm("5") = len;
-       int err;
-
-       err = -EINVAL;
-       asm volatile(
-               "       diag    %1,%3,0x288\n"
-               "0:     la      %0,0\n"
-               "1:\n"
-               EX_TABLE(0b,1b)
-               : "+d" (err) : "d"(__func), "d"(__timeout),
-                 "d"(__cmdp), "d"(__cmdl) : "1", "cc");
-       return err;
-}
-
-static int vmwdt_keepalive(void)
-{
-       /* we allocate new memory every time to avoid having
-        * to track the state. static allocation is not an
-        * option since that might not be contiguous in real
-        * storage in case of a modular build */
-       static char *ebc_cmd;
-       size_t len;
-       int ret;
-       unsigned int func;
-
-       ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
-       if (!ebc_cmd)
-               return -ENOMEM;
-
-       len = strlcpy(ebc_cmd, vmwdt_cmd, MAX_CMDLEN);
-       ASCEBC(ebc_cmd, MAX_CMDLEN);
-       EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
-
-       func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
-       set_bit(VMWDT_RUNNING, &vmwdt_is_open);
-       ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
-       WARN_ON(ret != 0);
-       kfree(ebc_cmd);
-       return ret;
-}
-
-static int vmwdt_disable(void)
-{
-       char cmd[] = {'\0'};
-       int ret = __diag288(wdt_cancel, 0, cmd, 0);
-       WARN_ON(ret != 0);
-       clear_bit(VMWDT_RUNNING, &vmwdt_is_open);
-       return ret;
-}
-
-static int __init vmwdt_probe(void)
-{
-       /* there is no real way to see if the watchdog is supported,
-        * so we try initializing it with a NOP command ("BEGIN")
-        * that won't cause any harm even if the following disable
-        * fails for some reason */
-       char ebc_begin[] = {
-               194, 197, 199, 201, 213
-       };
-       if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0)
-               return -EINVAL;
-       return vmwdt_disable();
-}
-
-static int vmwdt_open(struct inode *i, struct file *f)
-{
-       int ret;
-       if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open))
-               return -EBUSY;
-       ret = vmwdt_keepalive();
-       if (ret)
-               clear_bit(VMWDT_OPEN, &vmwdt_is_open);
-       return ret ? ret : nonseekable_open(i, f);
-}
-
-static int vmwdt_close(struct inode *i, struct file *f)
-{
-       if (vmwdt_expect_close == 42)
-               vmwdt_disable();
-       vmwdt_expect_close = 0;
-       clear_bit(VMWDT_OPEN, &vmwdt_is_open);
-       return 0;
-}
-
-static struct watchdog_info vmwdt_info = {
-       .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
-       .firmware_version = 0,
-       .identity = "z/VM Watchdog Timer",
-};
-
-static int __vmwdt_ioctl(unsigned int cmd, unsigned long arg)
-{
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               if (copy_to_user((void __user *)arg, &vmwdt_info,
-                                       sizeof(vmwdt_info)))
-                       return -EFAULT;
-               return 0;
-       case WDIOC_GETSTATUS:
-       case WDIOC_GETBOOTSTATUS:
-               return put_user(0, (int __user *)arg);
-       case WDIOC_GETTEMP:
-               return -EINVAL;
-       case WDIOC_SETOPTIONS:
-               {
-                       int options, ret;
-                       if (get_user(options, (int __user *)arg))
-                               return -EFAULT;
-                       ret = -EINVAL;
-                       if (options & WDIOS_DISABLECARD) {
-                               ret = vmwdt_disable();
-                               if (ret)
-                                       return ret;
-                       }
-                       if (options & WDIOS_ENABLECARD) {
-                               ret = vmwdt_keepalive();
-                       }
-                       return ret;
-               }
-       case WDIOC_GETTIMEOUT:
-               return put_user(vmwdt_interval, (int __user *)arg);
-       case WDIOC_SETTIMEOUT:
-               {
-                       int interval;
-                       if (get_user(interval, (int __user *)arg))
-                               return -EFAULT;
-                       if (interval < MIN_INTERVAL)
-                               return -EINVAL;
-                       vmwdt_interval = interval;
-               }
-               return vmwdt_keepalive();
-       case WDIOC_KEEPALIVE:
-               return vmwdt_keepalive();
-       }
-       return -EINVAL;
-}
-
-static long vmwdt_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
-       int rc;
-
-       mutex_lock(&vmwdt_mutex);
-       rc = __vmwdt_ioctl(cmd, arg);
-       mutex_unlock(&vmwdt_mutex);
-       return (long) rc;
-}
-
-static ssize_t vmwdt_write(struct file *f, const char __user *buf,
-                               size_t count, loff_t *ppos)
-{
-       if(count) {
-               if (!vmwdt_nowayout) {
-                       size_t i;
-
-                       /* note: just in case someone wrote the magic character
-                        * five months ago... */
-                       vmwdt_expect_close = 0;
-
-                       for (i = 0; i != count; i++) {
-                               char c;
-                               if (get_user(c, buf+i))
-                                       return -EFAULT;
-                               if (c == 'V')
-                                       vmwdt_expect_close = 42;
-                       }
-               }
-               /* someone wrote to us, we should restart timer */
-               vmwdt_keepalive();
-       }
-       return count;
-}
-
-static int vmwdt_resume(void)
-{
-       clear_bit(VMWDT_OPEN, &vmwdt_is_open);
-       return NOTIFY_DONE;
-}
-
-/*
- * It makes no sense to go into suspend while the watchdog is running.
- * Depending on the memory size, the watchdog might trigger, while we
- * are still saving the memory.
- * We reuse the open flag to ensure that suspend and watchdog open are
- * exclusive operations
- */
-static int vmwdt_suspend(void)
-{
-       if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
-               pr_err("The system cannot be suspended while the watchdog"
-                       " is in use\n");
-               return notifier_from_errno(-EBUSY);
-       }
-       if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
-               clear_bit(VMWDT_OPEN, &vmwdt_is_open);
-               pr_err("The system cannot be suspended while the watchdog"
-                       " is running\n");
-               return notifier_from_errno(-EBUSY);
-       }
-       return NOTIFY_DONE;
-}
-
-/*
- * This function is called for suspend and resume.
- */
-static int vmwdt_power_event(struct notifier_block *this, unsigned long event,
-                            void *ptr)
-{
-       switch (event) {
-       case PM_POST_HIBERNATION:
-       case PM_POST_SUSPEND:
-               return vmwdt_resume();
-       case PM_HIBERNATION_PREPARE:
-       case PM_SUSPEND_PREPARE:
-               return vmwdt_suspend();
-       default:
-               return NOTIFY_DONE;
-       }
-}
-
-static struct notifier_block vmwdt_power_notifier = {
-       .notifier_call = vmwdt_power_event,
-};
-
-static const struct file_operations vmwdt_fops = {
-       .open    = &vmwdt_open,
-       .release = &vmwdt_close,
-       .unlocked_ioctl = &vmwdt_ioctl,
-       .write   = &vmwdt_write,
-       .owner   = THIS_MODULE,
-       .llseek  = noop_llseek,
-};
-
-static struct miscdevice vmwdt_dev = {
-       .minor      = WATCHDOG_MINOR,
-       .name       = "watchdog",
-       .fops       = &vmwdt_fops,
-};
-
-static int __init vmwdt_init(void)
-{
-       int ret;
-
-       ret = vmwdt_probe();
-       if (ret)
-               return ret;
-       ret = register_pm_notifier(&vmwdt_power_notifier);
-       if (ret)
-               return ret;
-       /*
-        * misc_register() has to be the last action in module_init(), because
-        * file operations will be available right after this.
-        */
-       ret = misc_register(&vmwdt_dev);
-       if (ret) {
-               unregister_pm_notifier(&vmwdt_power_notifier);
-               return ret;
-       }
-       return 0;
-}
-module_init(vmwdt_init);
-
-static void __exit vmwdt_exit(void)
-{
-       unregister_pm_notifier(&vmwdt_power_notifier);
-       misc_deregister(&vmwdt_dev);
-}
-module_exit(vmwdt_exit);
index 445564c..00bfbee 100644 (file)
@@ -196,11 +196,11 @@ EXPORT_SYMBOL(airq_iv_release);
  */
 unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
 {
-       unsigned long bit, i;
+       unsigned long bit, i, flags;
 
        if (!iv->avail || num == 0)
                return -1UL;
-       spin_lock(&iv->lock);
+       spin_lock_irqsave(&iv->lock, flags);
        bit = find_first_bit_inv(iv->avail, iv->bits);
        while (bit + num <= iv->bits) {
                for (i = 1; i < num; i++)
@@ -218,9 +218,8 @@ unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
        }
        if (bit + num > iv->bits)
                bit = -1UL;
-       spin_unlock(&iv->lock);
+       spin_unlock_irqrestore(&iv->lock, flags);
        return bit;
-
 }
 EXPORT_SYMBOL(airq_iv_alloc);
 
@@ -232,11 +231,11 @@ EXPORT_SYMBOL(airq_iv_alloc);
  */
 void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
 {
-       unsigned long i;
+       unsigned long i, flags;
 
        if (!iv->avail || num == 0)
                return;
-       spin_lock(&iv->lock);
+       spin_lock_irqsave(&iv->lock, flags);
        for (i = 0; i < num; i++) {
                /* Clear (possibly left over) interrupt bit */
                clear_bit_inv(bit + i, iv->vector);
@@ -248,7 +247,7 @@ void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
                while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
                        iv->end--;
        }
-       spin_unlock(&iv->lock);
+       spin_unlock_irqrestore(&iv->lock, flags);
 }
 EXPORT_SYMBOL(airq_iv_free);
 
index dfd7bc6..e443b0d 100644 (file)
@@ -184,7 +184,7 @@ static ssize_t ccwgroup_ungroup_store(struct device *dev,
                                      const char *buf, size_t count)
 {
        struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
-       int rc;
+       int rc = 0;
 
        /* Prevent concurrent online/offline processing and ungrouping. */
        if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
@@ -196,11 +196,12 @@ static ssize_t ccwgroup_ungroup_store(struct device *dev,
 
        if (device_remove_file_self(dev, attr))
                ccwgroup_ungroup(gdev);
+       else
+               rc = -ENODEV;
 out:
        if (rc) {
-               if (rc != -EAGAIN)
-                       /* Release onoff "lock" when ungrouping failed. */
-                       atomic_set(&gdev->onoff, 0);
+               /* Release onoff "lock" when ungrouping failed. */
+               atomic_set(&gdev->onoff, 0);
                return rc;
        }
        return count;
@@ -227,6 +228,7 @@ static void ccwgroup_ungroup_workfn(struct work_struct *work)
                container_of(work, struct ccwgroup_device, ungroup_work);
 
        ccwgroup_ungroup(gdev);
+       put_device(&gdev->dev);
 }
 
 static void ccwgroup_release(struct device *dev)
@@ -412,8 +414,10 @@ static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
 {
        struct ccwgroup_device *gdev = to_ccwgroupdev(data);
 
-       if (action == BUS_NOTIFY_UNBIND_DRIVER)
+       if (action == BUS_NOTIFY_UNBIND_DRIVER) {
+               get_device(&gdev->dev);
                schedule_work(&gdev->ungroup_work);
+       }
 
        return NOTIFY_OK;
 }
@@ -582,11 +586,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
                                         __ccwgroup_match_all))) {
                struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
 
-               mutex_lock(&gdev->reg_mutex);
-               __ccwgroup_remove_symlinks(gdev);
-               device_unregister(dev);
-               __ccwgroup_remove_cdev_refs(gdev);
-               mutex_unlock(&gdev->reg_mutex);
+               ccwgroup_ungroup(gdev);
                put_device(dev);
        }
        driver_unregister(&cdriver->driver);
@@ -633,13 +633,7 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
        get_device(&gdev->dev);
        spin_unlock_irq(cdev->ccwlock);
        /* Unregister group device. */
-       mutex_lock(&gdev->reg_mutex);
-       if (device_is_registered(&gdev->dev)) {
-               __ccwgroup_remove_symlinks(gdev);
-               device_unregister(&gdev->dev);
-               __ccwgroup_remove_cdev_refs(gdev);
-       }
-       mutex_unlock(&gdev->reg_mutex);
+       ccwgroup_ungroup(gdev);
        /* Release ccwgroup device reference for local processing. */
        put_device(&gdev->dev);
 }
index 77f9c92..2905d8b 100644 (file)
@@ -602,6 +602,7 @@ void __init init_cio_interrupts(void)
 
 #ifdef CONFIG_CCW_CONSOLE
 static struct subchannel *console_sch;
+static struct lock_class_key console_sch_key;
 
 /*
  * Use cio_tsch to update the subchannel status and call the interrupt handler
@@ -686,6 +687,7 @@ struct subchannel *cio_probe_console(void)
        if (IS_ERR(sch))
                return sch;
 
+       lockdep_set_class(sch->lock, &console_sch_key);
        isc_register(CONSOLE_ISC);
        sch->config.isc = CONSOLE_ISC;
        sch->config.intparm = (u32)(addr_t)sch;
index d8d9b5b..dfef5e6 100644 (file)
@@ -678,18 +678,11 @@ static const struct attribute_group *ccwdev_attr_groups[] = {
        NULL,
 };
 
-/* this is a simple abstraction for device_register that sets the
- * correct bus type and adds the bus specific files */
-static int ccw_device_register(struct ccw_device *cdev)
+static int ccw_device_add(struct ccw_device *cdev)
 {
        struct device *dev = &cdev->dev;
-       int ret;
 
        dev->bus = &ccw_bus_type;
-       ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
-                          cdev->private->dev_id.devno);
-       if (ret)
-               return ret;
        return device_add(dev);
 }
 
@@ -764,22 +757,46 @@ static void ccw_device_todo(struct work_struct *work);
 static int io_subchannel_initialize_dev(struct subchannel *sch,
                                        struct ccw_device *cdev)
 {
-       cdev->private->cdev = cdev;
-       cdev->private->int_class = IRQIO_CIO;
-       atomic_set(&cdev->private->onoff, 0);
+       struct ccw_device_private *priv = cdev->private;
+       int ret;
+
+       priv->cdev = cdev;
+       priv->int_class = IRQIO_CIO;
+       priv->state = DEV_STATE_NOT_OPER;
+       priv->dev_id.devno = sch->schib.pmcw.dev;
+       priv->dev_id.ssid = sch->schid.ssid;
+       priv->schid = sch->schid;
+
+       INIT_WORK(&priv->todo_work, ccw_device_todo);
+       INIT_LIST_HEAD(&priv->cmb_list);
+       init_waitqueue_head(&priv->wait_q);
+       init_timer(&priv->timer);
+
+       atomic_set(&priv->onoff, 0);
+       cdev->ccwlock = sch->lock;
        cdev->dev.parent = &sch->dev;
        cdev->dev.release = ccw_device_release;
-       INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
        cdev->dev.groups = ccwdev_attr_groups;
        /* Do first half of device_register. */
        device_initialize(&cdev->dev);
+       ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
+                          cdev->private->dev_id.devno);
+       if (ret)
+               goto out_put;
        if (!get_device(&sch->dev)) {
-               /* Release reference from device_initialize(). */
-               put_device(&cdev->dev);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto out_put;
        }
-       cdev->private->flags.initialized = 1;
+       priv->flags.initialized = 1;
+       spin_lock_irq(sch->lock);
+       sch_set_cdev(sch, cdev);
+       spin_unlock_irq(sch->lock);
        return 0;
+
+out_put:
+       /* Release reference from device_initialize(). */
+       put_device(&cdev->dev);
+       return ret;
 }
 
 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
@@ -858,7 +875,7 @@ static void io_subchannel_register(struct ccw_device *cdev)
        dev_set_uevent_suppress(&sch->dev, 0);
        kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
        /* make it known to the system */
-       ret = ccw_device_register(cdev);
+       ret = ccw_device_add(cdev);
        if (ret) {
                CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
                              cdev->private->dev_id.ssid,
@@ -923,26 +940,11 @@ io_subchannel_recog_done(struct ccw_device *cdev)
 
 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
 {
-       struct ccw_device_private *priv;
-
-       cdev->ccwlock = sch->lock;
-
-       /* Init private data. */
-       priv = cdev->private;
-       priv->dev_id.devno = sch->schib.pmcw.dev;
-       priv->dev_id.ssid = sch->schid.ssid;
-       priv->schid = sch->schid;
-       priv->state = DEV_STATE_NOT_OPER;
-       INIT_LIST_HEAD(&priv->cmb_list);
-       init_waitqueue_head(&priv->wait_q);
-       init_timer(&priv->timer);
-
        /* Increase counter of devices currently in recognition. */
        atomic_inc(&ccw_device_init_count);
 
        /* Start async. device sensing. */
        spin_lock_irq(sch->lock);
-       sch_set_cdev(sch, cdev);
        ccw_device_recognition(cdev);
        spin_unlock_irq(sch->lock);
 }
@@ -1083,7 +1085,7 @@ static int io_subchannel_probe(struct subchannel *sch)
                dev_set_uevent_suppress(&sch->dev, 0);
                kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
                cdev = sch_get_cdev(sch);
-               rc = ccw_device_register(cdev);
+               rc = ccw_device_add(cdev);
                if (rc) {
                        /* Release online reference. */
                        put_device(&cdev->dev);
@@ -1597,7 +1599,6 @@ int __init ccw_device_enable_console(struct ccw_device *cdev)
        if (rc)
                return rc;
        sch->driver = &io_subchannel_driver;
-       sch_set_cdev(sch, cdev);
        io_subchannel_recog(cdev, sch);
        /* Now wait for the async. recognition to come to an end. */
        spin_lock_irq(cdev->ccwlock);
@@ -1639,6 +1640,7 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
                put_device(&sch->dev);
                return ERR_PTR(-ENOMEM);
        }
+       set_io_private(sch, io_priv);
        cdev = io_subchannel_create_ccwdev(sch);
        if (IS_ERR(cdev)) {
                put_device(&sch->dev);
@@ -1646,7 +1648,6 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
                return cdev;
        }
        cdev->drv = drv;
-       set_io_private(sch, io_priv);
        ccw_device_set_int_class(cdev);
        return cdev;
 }
index 4221b02..f1f3baa 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/export.h>
+#include <linux/slab.h>
 #include <asm/debug.h>
 #include "qdio_debug.h"
 #include "qdio.h"
@@ -16,11 +17,51 @@ debug_info_t *qdio_dbf_error;
 
 static struct dentry *debugfs_root;
 #define QDIO_DEBUGFS_NAME_LEN  10
+#define QDIO_DBF_NAME_LEN      20
 
-void qdio_allocate_dbf(struct qdio_initialize *init_data,
+struct qdio_dbf_entry {
+       char dbf_name[QDIO_DBF_NAME_LEN];
+       debug_info_t *dbf_info;
+       struct list_head dbf_list;
+};
+
+static LIST_HEAD(qdio_dbf_list);
+static DEFINE_MUTEX(qdio_dbf_list_mutex);
+
+static debug_info_t *qdio_get_dbf_entry(char *name)
+{
+       struct qdio_dbf_entry *entry;
+       debug_info_t *rc = NULL;
+
+       mutex_lock(&qdio_dbf_list_mutex);
+       list_for_each_entry(entry, &qdio_dbf_list, dbf_list) {
+               if (strcmp(entry->dbf_name, name) == 0) {
+                       rc = entry->dbf_info;
+                       break;
+               }
+       }
+       mutex_unlock(&qdio_dbf_list_mutex);
+       return rc;
+}
+
+static void qdio_clear_dbf_list(void)
+{
+       struct qdio_dbf_entry *entry, *tmp;
+
+       mutex_lock(&qdio_dbf_list_mutex);
+       list_for_each_entry_safe(entry, tmp, &qdio_dbf_list, dbf_list) {
+               list_del(&entry->dbf_list);
+               debug_unregister(entry->dbf_info);
+               kfree(entry);
+       }
+       mutex_unlock(&qdio_dbf_list_mutex);
+}
+
+int qdio_allocate_dbf(struct qdio_initialize *init_data,
                       struct qdio_irq *irq_ptr)
 {
-       char text[20];
+       char text[QDIO_DBF_NAME_LEN];
+       struct qdio_dbf_entry *new_entry;
 
        DBF_EVENT("qfmt:%1d", init_data->q_format);
        DBF_HEX(init_data->adapter_name, 8);
@@ -38,11 +79,34 @@ void qdio_allocate_dbf(struct qdio_initialize *init_data,
        DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
 
        /* allocate trace view for the interface */
-       snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev));
-       irq_ptr->debug_area = debug_register(text, 2, 1, 16);
-       debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view);
-       debug_set_level(irq_ptr->debug_area, DBF_WARN);
-       DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
+       snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s",
+                                       dev_name(&init_data->cdev->dev));
+       irq_ptr->debug_area = qdio_get_dbf_entry(text);
+       if (irq_ptr->debug_area)
+               DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused");
+       else {
+               irq_ptr->debug_area = debug_register(text, 2, 1, 16);
+               if (!irq_ptr->debug_area)
+                       return -ENOMEM;
+               if (debug_register_view(irq_ptr->debug_area,
+                                               &debug_hex_ascii_view)) {
+                       debug_unregister(irq_ptr->debug_area);
+                       return -ENOMEM;
+               }
+               debug_set_level(irq_ptr->debug_area, DBF_WARN);
+               DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
+               new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL);
+               if (!new_entry) {
+                       debug_unregister(irq_ptr->debug_area);
+                       return -ENOMEM;
+               }
+               strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
+               new_entry->dbf_info = irq_ptr->debug_area;
+               mutex_lock(&qdio_dbf_list_mutex);
+               list_add(&new_entry->dbf_list, &qdio_dbf_list);
+               mutex_unlock(&qdio_dbf_list_mutex);
+       }
+       return 0;
 }
 
 static int qstat_show(struct seq_file *m, void *v)
@@ -300,6 +364,7 @@ int __init qdio_debug_init(void)
 
 void qdio_debug_exit(void)
 {
+       qdio_clear_dbf_list();
        debugfs_remove(debugfs_root);
        if (qdio_dbf_setup)
                debug_unregister(qdio_dbf_setup);
index dfac9bf..f33ce85 100644 (file)
@@ -75,7 +75,7 @@ static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
        }
 }
 
-void qdio_allocate_dbf(struct qdio_initialize *init_data,
+int qdio_allocate_dbf(struct qdio_initialize *init_data,
                       struct qdio_irq *irq_ptr);
 void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
                              struct ccw_device *cdev);
index 77466c4..848e3b6 100644 (file)
@@ -409,17 +409,16 @@ static inline void qdio_stop_polling(struct qdio_q *q)
                set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 }
 
-static inline void account_sbals(struct qdio_q *q, int count)
+static inline void account_sbals(struct qdio_q *q, unsigned int count)
 {
-       int pos = 0;
+       int pos;
 
        q->q_stats.nr_sbal_total += count;
        if (count == QDIO_MAX_BUFFERS_MASK) {
                q->q_stats.nr_sbals[7]++;
                return;
        }
-       while (count >>= 1)
-               pos++;
+       pos = ilog2(count);
        q->q_stats.nr_sbals[pos]++;
 }
 
@@ -1234,12 +1233,10 @@ int qdio_free(struct ccw_device *cdev)
                return -ENODEV;
 
        DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
+       DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
        mutex_lock(&irq_ptr->setup_mutex);
 
-       if (irq_ptr->debug_area != NULL) {
-               debug_unregister(irq_ptr->debug_area);
-               irq_ptr->debug_area = NULL;
-       }
+       irq_ptr->debug_area = NULL;
        cdev->private->qdio_data = NULL;
        mutex_unlock(&irq_ptr->setup_mutex);
 
@@ -1276,7 +1273,8 @@ int qdio_allocate(struct qdio_initialize *init_data)
                goto out_err;
 
        mutex_init(&irq_ptr->setup_mutex);
-       qdio_allocate_dbf(init_data, irq_ptr);
+       if (qdio_allocate_dbf(init_data, irq_ptr))
+               goto out_rel;
 
        /*
         * Allocate a page for the chsc calls in qdio_establish.
index 8eec165..4038437 100644 (file)
@@ -77,12 +77,12 @@ MODULE_ALIAS("z90crypt");
  * Module parameter
  */
 int ap_domain_index = -1;      /* Adjunct Processor Domain Index */
-module_param_named(domain, ap_domain_index, int, 0000);
+module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP);
 MODULE_PARM_DESC(domain, "domain index for ap devices");
 EXPORT_SYMBOL(ap_domain_index);
 
 static int ap_thread_flag = 0;
-module_param_named(poll_thread, ap_thread_flag, int, 0000);
+module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP);
 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
 
 static struct device *ap_root_device = NULL;
@@ -901,10 +901,15 @@ static int ap_device_probe(struct device *dev)
        int rc;
 
        ap_dev->drv = ap_drv;
+
+       spin_lock_bh(&ap_device_list_lock);
+       list_add(&ap_dev->list, &ap_device_list);
+       spin_unlock_bh(&ap_device_list_lock);
+
        rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
-       if (!rc) {
+       if (rc) {
                spin_lock_bh(&ap_device_list_lock);
-               list_add(&ap_dev->list, &ap_device_list);
+               list_del_init(&ap_dev->list);
                spin_unlock_bh(&ap_device_list_lock);
        }
        return rc;
index 5222ebe..0e18c5d 100644 (file)
@@ -356,7 +356,7 @@ struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
 
        zops = __ops_lookup(name, variant);
        if (!zops) {
-               request_module(name);
+               request_module("%s", name);
                zops = __ops_lookup(name, variant);
        }
        if ((!zops) || (!try_module_get(zops->owner)))
index 5543490..56467df 100644 (file)
@@ -4198,6 +4198,8 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
                kfree(phba->ep_array);
                phba->ep_array = NULL;
                ret = -ENOMEM;
+
+               goto free_memory;
        }
 
        for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
index 6045aa7..07934b0 100644 (file)
@@ -1008,10 +1008,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
                BE2_IPV6 : BE2_IPV4 ;
 
        rc = mgmt_get_if_info(phba, ip_type, &if_info);
-       if (rc) {
-               kfree(if_info);
+       if (rc)
                return rc;
-       }
 
        if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
                if (if_info->dhcp_state) {
index f548430..785d0d7 100644 (file)
@@ -516,23 +516,17 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
        skb_pull(skb, sizeof(struct fcoe_hdr));
        fr_len = skb->len - sizeof(struct fcoe_crc_eof);
 
-       stats = per_cpu_ptr(lport->stats, get_cpu());
-       stats->RxFrames++;
-       stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
-
        fp = (struct fc_frame *)skb;
        fc_frame_init(fp);
        fr_dev(fp) = lport;
        fr_sof(fp) = hp->fcoe_sof;
        if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
-               put_cpu();
                kfree_skb(skb);
                return;
        }
        fr_eof(fp) = crc_eof.fcoe_eof;
        fr_crc(fp) = crc_eof.fcoe_crc32;
        if (pskb_trim(skb, fr_len)) {
-               put_cpu();
                kfree_skb(skb);
                return;
        }
@@ -544,7 +538,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
                port = lport_priv(vn_port);
                if (!ether_addr_equal(port->data_src_addr, dest_mac)) {
                        BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
-                       put_cpu();
                        kfree_skb(skb);
                        return;
                }
@@ -552,7 +545,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
        if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
            fh->fh_type == FC_TYPE_FCP) {
                /* Drop FCP data. We dont this in L2 path */
-               put_cpu();
                kfree_skb(skb);
                return;
        }
@@ -562,7 +554,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
                case ELS_LOGO:
                        if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
                                /* drop non-FIP LOGO */
-                               put_cpu();
                                kfree_skb(skb);
                                return;
                        }
@@ -572,22 +563,23 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
 
        if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
                /* Drop incoming ABTS */
-               put_cpu();
                kfree_skb(skb);
                return;
        }
 
+       stats = per_cpu_ptr(lport->stats, smp_processor_id());
+       stats->RxFrames++;
+       stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+
        if (le32_to_cpu(fr_crc(fp)) !=
                        ~crc32(~0, skb->data, fr_len)) {
                if (stats->InvalidCRCCount < 5)
                        printk(KERN_WARNING PFX "dropping frame with "
                               "CRC error\n");
                stats->InvalidCRCCount++;
-               put_cpu();
                kfree_skb(skb);
                return;
        }
-       put_cpu();
        fc_exch_recv(lport, fp);
 }
 
index 32a5e0a..7bc47fc 100644 (file)
@@ -282,6 +282,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
                                       arr_sz, GFP_KERNEL);
        if (!cmgr->free_list_lock) {
                printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
+               kfree(cmgr->free_list);
+               cmgr->free_list = NULL;
                goto mem_err;
        }
 
index 2ebfb2b..7b23f21 100644 (file)
@@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
        if (crq->valid & 0x80) {
                if (++queue->cur == queue->size)
                        queue->cur = 0;
+
+               /* Ensure the read of the valid bit occurs before reading any
+                * other bits of the CRQ entry
+                */
+               rmb();
        } else
                crq = NULL;
        spin_unlock_irqrestore(&queue->lock, flags);
@@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
 {
        struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 
+       /*
+        * Ensure the command buffer is flushed to memory before handing it
+        * over to the VIOS to prevent it from fetching any stale data.
+        */
+       mb();
        return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
 }
 
@@ -797,7 +807,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
                                       evt->hostdata->dev);
                        if (evt->cmnd_done)
                                evt->cmnd_done(evt->cmnd);
-               } else if (evt->done)
+               } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
+                          evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
                        evt->done(evt);
                free_event_struct(&evt->hostdata->pool, evt);
                spin_lock_irqsave(hostdata->host->host_lock, flags);
index 1e4479f..9270d15 100644 (file)
@@ -564,7 +564,7 @@ static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
        u32 tmp;
 
        tmp = mr32(MVS_GBL_CTL);
-       tmp |= (IRQ_SAS_A | IRQ_SAS_B);
+       tmp |= (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B);
        mw32(MVS_GBL_INT_STAT, tmp);
        writel(tmp, regs + 0x0C);
        writel(tmp, regs + 0x10);
@@ -580,7 +580,7 @@ static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
 
        tmp = mr32(MVS_GBL_CTL);
 
-       tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
+       tmp &= ~(MVS_IRQ_SAS_A | MVS_IRQ_SAS_B);
        mw32(MVS_GBL_INT_STAT, tmp);
        writel(tmp, regs + 0x0C);
        writel(tmp, regs + 0x10);
@@ -596,7 +596,7 @@ static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
        if (!(mvi->flags & MVF_FLAG_SOC)) {
                stat = mr32(MVS_GBL_INT_STAT);
 
-               if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
+               if (!(stat & (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B)))
                        return 0;
        }
        return stat;
@@ -606,8 +606,8 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
 {
        void __iomem *regs = mvi->regs;
 
-       if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
-                       ((stat & IRQ_SAS_B) && mvi->id == 1)) {
+       if (((stat & MVS_IRQ_SAS_A) && mvi->id == 0) ||
+                       ((stat & MVS_IRQ_SAS_B) && mvi->id == 1)) {
                mw32_f(MVS_INT_STAT, CINT_DONE);
 
                spin_lock(&mvi->lock);
index 487aa6f..14e1974 100644 (file)
@@ -150,35 +150,35 @@ enum chip_register_bits {
 
 enum pci_interrupt_cause {
        /*  MAIN_IRQ_CAUSE (R10200) Bits*/
-       IRQ_COM_IN_I2O_IOP0            = (1 << 0),
-       IRQ_COM_IN_I2O_IOP1            = (1 << 1),
-       IRQ_COM_IN_I2O_IOP2            = (1 << 2),
-       IRQ_COM_IN_I2O_IOP3            = (1 << 3),
-       IRQ_COM_OUT_I2O_HOS0           = (1 << 4),
-       IRQ_COM_OUT_I2O_HOS1           = (1 << 5),
-       IRQ_COM_OUT_I2O_HOS2           = (1 << 6),
-       IRQ_COM_OUT_I2O_HOS3           = (1 << 7),
-       IRQ_PCIF_TO_CPU_DRBL0          = (1 << 8),
-       IRQ_PCIF_TO_CPU_DRBL1          = (1 << 9),
-       IRQ_PCIF_TO_CPU_DRBL2          = (1 << 10),
-       IRQ_PCIF_TO_CPU_DRBL3          = (1 << 11),
-       IRQ_PCIF_DRBL0                 = (1 << 12),
-       IRQ_PCIF_DRBL1                 = (1 << 13),
-       IRQ_PCIF_DRBL2                 = (1 << 14),
-       IRQ_PCIF_DRBL3                 = (1 << 15),
-       IRQ_XOR_A                      = (1 << 16),
-       IRQ_XOR_B                      = (1 << 17),
-       IRQ_SAS_A                      = (1 << 18),
-       IRQ_SAS_B                      = (1 << 19),
-       IRQ_CPU_CNTRL                  = (1 << 20),
-       IRQ_GPIO                       = (1 << 21),
-       IRQ_UART                       = (1 << 22),
-       IRQ_SPI                        = (1 << 23),
-       IRQ_I2C                        = (1 << 24),
-       IRQ_SGPIO                      = (1 << 25),
-       IRQ_COM_ERR                    = (1 << 29),
-       IRQ_I2O_ERR                    = (1 << 30),
-       IRQ_PCIE_ERR                   = (1 << 31),
+       MVS_IRQ_COM_IN_I2O_IOP0        = (1 << 0),
+       MVS_IRQ_COM_IN_I2O_IOP1        = (1 << 1),
+       MVS_IRQ_COM_IN_I2O_IOP2        = (1 << 2),
+       MVS_IRQ_COM_IN_I2O_IOP3        = (1 << 3),
+       MVS_IRQ_COM_OUT_I2O_HOS0       = (1 << 4),
+       MVS_IRQ_COM_OUT_I2O_HOS1       = (1 << 5),
+       MVS_IRQ_COM_OUT_I2O_HOS2       = (1 << 6),
+       MVS_IRQ_COM_OUT_I2O_HOS3       = (1 << 7),
+       MVS_IRQ_PCIF_TO_CPU_DRBL0      = (1 << 8),
+       MVS_IRQ_PCIF_TO_CPU_DRBL1      = (1 << 9),
+       MVS_IRQ_PCIF_TO_CPU_DRBL2      = (1 << 10),
+       MVS_IRQ_PCIF_TO_CPU_DRBL3      = (1 << 11),
+       MVS_IRQ_PCIF_DRBL0             = (1 << 12),
+       MVS_IRQ_PCIF_DRBL1             = (1 << 13),
+       MVS_IRQ_PCIF_DRBL2             = (1 << 14),
+       MVS_IRQ_PCIF_DRBL3             = (1 << 15),
+       MVS_IRQ_XOR_A                  = (1 << 16),
+       MVS_IRQ_XOR_B                  = (1 << 17),
+       MVS_IRQ_SAS_A                  = (1 << 18),
+       MVS_IRQ_SAS_B                  = (1 << 19),
+       MVS_IRQ_CPU_CNTRL              = (1 << 20),
+       MVS_IRQ_GPIO                   = (1 << 21),
+       MVS_IRQ_UART                   = (1 << 22),
+       MVS_IRQ_SPI                    = (1 << 23),
+       MVS_IRQ_I2C                    = (1 << 24),
+       MVS_IRQ_SGPIO                  = (1 << 25),
+       MVS_IRQ_COM_ERR                = (1 << 29),
+       MVS_IRQ_I2O_ERR                = (1 << 30),
+       MVS_IRQ_PCIE_ERR               = (1 << 31),
 };
 
 union reg_phy_cfg {
index c4f31b2..e90c89f 100644 (file)
@@ -677,7 +677,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
  * pm8001_get_phy_settings_info : Read phy setting values.
  * @pm8001_ha : our hba.
  */
-void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
 {
 
 #ifdef PM8001_READ_VPD
@@ -691,11 +691,15 @@ void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
        payload.offset = 0;
        payload.length = 4096;
        payload.func_specific = kzalloc(4096, GFP_KERNEL);
+       if (!payload.func_specific)
+               return -ENOMEM;
        /* Read phy setting values from flash */
        PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
        wait_for_completion(&completion);
        pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
+       kfree(payload.func_specific);
 #endif
+       return 0;
 }
 
 #ifdef PM8001_USE_MSIX
@@ -879,8 +883,11 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
        pm8001_init_sas_add(pm8001_ha);
        /* phy setting support for motherboard controller */
        if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 &&
-               pdev->subsystem_vendor != 0)
-               pm8001_get_phy_settings_info(pm8001_ha);
+               pdev->subsystem_vendor != 0) {
+               rc = pm8001_get_phy_settings_info(pm8001_ha);
+               if (rc)
+                       goto err_out_shost;
+       }
        pm8001_post_sas_ha_init(shost, chip);
        rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
        if (rc)
index 4b188b0..e632e14 100644 (file)
@@ -1128,7 +1128,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
        ctio->u.status1.flags =
            __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
                CTIO7_FLAGS_TERMINATE);
-       ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
+       ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
 
        qla2x00_start_iocbs(vha, vha->req);
 
@@ -1262,6 +1262,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
 {
        struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
        struct ctio7_to_24xx *ctio;
+       uint16_t temp;
 
        ql_dbg(ql_dbg_tgt, ha, 0xe008,
            "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
@@ -1292,7 +1293,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
        ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
            __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
                CTIO7_FLAGS_SEND_STATUS);
-       ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+       temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+       ctio->u.status1.ox_id = cpu_to_le16(temp);
        ctio->u.status1.scsi_status =
            __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
        ctio->u.status1.response_len = __constant_cpu_to_le16(8);
@@ -1513,6 +1515,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
        struct ctio7_to_24xx *pkt;
        struct qla_hw_data *ha = vha->hw;
        struct atio_from_isp *atio = &prm->cmd->atio;
+       uint16_t temp;
 
        pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
        prm->pkt = pkt;
@@ -1541,13 +1544,13 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
        pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
        pkt->exchange_addr = atio->u.isp24.exchange_addr;
        pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
-       pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+       temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+       pkt->u.status0.ox_id = cpu_to_le16(temp);
        pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
 
        ql_dbg(ql_dbg_tgt, vha, 0xe00c,
            "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
-           vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
-           le16_to_cpu(pkt->u.status0.ox_id));
+           vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp);
        return 0;
 }
 
@@ -2619,6 +2622,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
        struct qla_hw_data *ha = vha->hw;
        request_t *pkt;
        int ret = 0;
+       uint16_t temp;
 
        ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
 
@@ -2655,7 +2659,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
        ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
            __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
                CTIO7_FLAGS_TERMINATE);
-       ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+       temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+       ctio24->u.status1.ox_id = cpu_to_le16(temp);
 
        /* Most likely, it isn't needed */
        ctio24->u.status1.residual = get_unaligned((uint32_t *)
index e0a58fd..d1d24fb 100644 (file)
@@ -443,7 +443,7 @@ struct ctio7_to_24xx {
                        uint16_t reserved1;
                        __le16 flags;
                        uint32_t residual;
-                       uint16_t ox_id;
+                       __le16 ox_id;
                        uint16_t scsi_status;
                        uint32_t relative_offset;
                        uint32_t reserved2;
@@ -458,7 +458,7 @@ struct ctio7_to_24xx {
                        uint16_t sense_length;
                        uint16_t flags;
                        uint32_t residual;
-                       uint16_t ox_id;
+                       __le16 ox_id;
                        uint16_t scsi_status;
                        uint16_t response_len;
                        uint16_t reserved;
index cbe38e5..7e95791 100644 (file)
@@ -131,7 +131,7 @@ scmd_eh_abort_handler(struct work_struct *work)
                                    "aborting command %p\n", scmd));
                rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
                if (rtn == SUCCESS) {
-                       scmd->result |= DID_TIME_OUT << 16;
+                       set_host_byte(scmd, DID_TIME_OUT);
                        if (scsi_host_eh_past_deadline(sdev->host)) {
                                SCSI_LOG_ERROR_RECOVERY(3,
                                        scmd_printk(KERN_INFO, scmd,
@@ -167,7 +167,7 @@ scmd_eh_abort_handler(struct work_struct *work)
                        scmd_printk(KERN_WARNING, scmd,
                                    "scmd %p terminate "
                                    "aborted command\n", scmd));
-               scmd->result |= DID_TIME_OUT << 16;
+               set_host_byte(scmd, DID_TIME_OUT);
                scsi_finish_command(scmd);
        }
 }
@@ -287,15 +287,15 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
        else if (host->hostt->eh_timed_out)
                rtn = host->hostt->eh_timed_out(scmd);
 
-       if (rtn == BLK_EH_NOT_HANDLED && !host->hostt->no_async_abort)
-               if (scsi_abort_command(scmd) == SUCCESS)
+       if (rtn == BLK_EH_NOT_HANDLED) {
+               if (!host->hostt->no_async_abort &&
+                   scsi_abort_command(scmd) == SUCCESS)
                        return BLK_EH_NOT_HANDLED;
 
-       scmd->result |= DID_TIME_OUT << 16;
-
-       if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
-                    !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)))
-               rtn = BLK_EH_HANDLED;
+               set_host_byte(scmd, DID_TIME_OUT);
+               if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))
+                       rtn = BLK_EH_HANDLED;
+       }
 
        return rtn;
 }
@@ -1777,7 +1777,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
                break;
        case DID_ABORT:
                if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
-                       scmd->result |= DID_TIME_OUT << 16;
+                       set_host_byte(scmd, DID_TIME_OUT);
                        return SUCCESS;
                }
        case DID_NO_CONNECT:
index f7e3163..3f50dfc 100644 (file)
@@ -733,6 +733,14 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                        scsi_next_command(cmd);
                        return;
                }
+       } else if (blk_rq_bytes(req) == 0 && result && !sense_deferred) {
+               /*
+                * Certain non BLOCK_PC requests are commands that don't
+                * actually transfer anything (FLUSH), so cannot use
+                * good_bytes != blk_rq_bytes(req) as the signal for an error.
+                * This sets the error explicitly for the problem case.
+                */
+               error = __scsi_error_from_host_byte(cmd, result);
        }
 
        /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
index 2bea4f0..503594e 100644 (file)
@@ -28,7 +28,7 @@ scsi_trace_misc(struct trace_seq *, unsigned char *, int);
 static const char *
 scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
 {
-       const char *ret = p->buffer + p->len;
+       const char *ret = trace_seq_buffer_ptr(p);
        sector_t lba = 0, txlen = 0;
 
        lba |= ((cdb[1] & 0x1F) << 16);
@@ -46,7 +46,7 @@ scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len)
 static const char *
 scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
 {
-       const char *ret = p->buffer + p->len;
+       const char *ret = trace_seq_buffer_ptr(p);
        sector_t lba = 0, txlen = 0;
 
        lba |= (cdb[2] << 24);
@@ -71,7 +71,7 @@ scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len)
 static const char *
 scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
 {
-       const char *ret = p->buffer + p->len;
+       const char *ret = trace_seq_buffer_ptr(p);
        sector_t lba = 0, txlen = 0;
 
        lba |= (cdb[2] << 24);
@@ -94,7 +94,7 @@ scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len)
 static const char *
 scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
 {
-       const char *ret = p->buffer + p->len;
+       const char *ret = trace_seq_buffer_ptr(p);
        sector_t lba = 0, txlen = 0;
 
        lba |= ((u64)cdb[2] << 56);
@@ -125,7 +125,7 @@ scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len)
 static const char *
 scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len)
 {
-       const char *ret = p->buffer + p->len, *cmd;
+       const char *ret = trace_seq_buffer_ptr(p), *cmd;
        sector_t lba = 0, txlen = 0;
        u32 ei_lbrt = 0;
 
@@ -180,7 +180,7 @@ out:
 static const char *
 scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
 {
-       const char *ret = p->buffer + p->len;
+       const char *ret = trace_seq_buffer_ptr(p);
        unsigned int regions = cdb[7] << 8 | cdb[8];
 
        trace_seq_printf(p, "regions=%u", (regions - 8) / 16);
@@ -192,7 +192,7 @@ scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len)
 static const char *
 scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len)
 {
-       const char *ret = p->buffer + p->len, *cmd;
+       const char *ret = trace_seq_buffer_ptr(p), *cmd;
        sector_t lba = 0;
        u32 alloc_len = 0;
 
@@ -247,7 +247,7 @@ scsi_trace_varlen(struct trace_seq *p, unsigned char *cdb, int len)
 static const char *
 scsi_trace_misc(struct trace_seq *p, unsigned char *cdb, int len)
 {
-       const char *ret = p->buffer + p->len;
+       const char *ret = trace_seq_buffer_ptr(p);
 
        trace_seq_printf(p, "-");
        trace_seq_putc(p, 0);
index f80908f..521f583 100644 (file)
@@ -2549,6 +2549,7 @@ fc_rport_final_delete(struct work_struct *work)
                        fc_flush_devloss(shost);
                if (!cancel_delayed_work(&rport->dev_loss_work))
                        fc_flush_devloss(shost);
+               cancel_work_sync(&rport->scan_work);
                spin_lock_irqsave(shost->host_lock, flags);
                rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
        }
index e9689d5..6825eda 100644 (file)
@@ -2441,7 +2441,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
                }
 
                sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
-               if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+               if (sdp->broken_fua) {
+                       sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
+                       sdkp->DPOFUA = 0;
+               } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
                        sd_first_printk(KERN_NOTICE, sdkp,
                                  "Uses READ/WRITE(6), disabling FUA\n");
                        sdkp->DPOFUA = 0;
index 89ee592..308256b 100644 (file)
@@ -237,6 +237,16 @@ static void virtscsi_req_done(struct virtqueue *vq)
        virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
 };
 
+static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
+{
+       int i, num_vqs;
+
+       num_vqs = vscsi->num_queues;
+       for (i = 0; i < num_vqs; i++)
+               virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
+                                virtscsi_complete_cmd);
+}
+
 static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
 {
        struct virtio_scsi_cmd *cmd = buf;
@@ -253,6 +263,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
        virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
 };
 
+static void virtscsi_handle_event(struct work_struct *work);
+
 static int virtscsi_kick_event(struct virtio_scsi *vscsi,
                               struct virtio_scsi_event_node *event_node)
 {
@@ -260,6 +272,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
        struct scatterlist sg;
        unsigned long flags;
 
+       INIT_WORK(&event_node->work, virtscsi_handle_event);
        sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
 
        spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
@@ -377,7 +390,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
 {
        struct virtio_scsi_event_node *event_node = buf;
 
-       INIT_WORK(&event_node->work, virtscsi_handle_event);
        schedule_work(&event_node->work);
 }
 
@@ -589,6 +601,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
            cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
                ret = SUCCESS;
 
+       /*
+        * The spec guarantees that all requests related to the TMF have
+        * been completed, but the callback might not have run yet if
+        * we're using independent interrupts (e.g. MSI).  Poll the
+        * virtqueues once.
+        *
+        * In the abort case, sc->scsi_done will do nothing, because
+        * the block layer must have detected a timeout and as a result
+        * REQ_ATOM_COMPLETE has been set.
+        */
+       virtscsi_poll_requests(vscsi);
+
 out:
        mempool_free(cmd, virtscsi_cmd_pool);
        return ret;
index f6759dc..c41ff14 100644 (file)
@@ -368,7 +368,7 @@ int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
         * otherwise we use the default. Also we use the default FIFO
         * thresholds for now.
         */
-       *burst_code = chip_info ? chip_info->dma_burst_size : 16;
+       *burst_code = chip_info ? chip_info->dma_burst_size : 1;
        *threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
                   | SSCR1_TxTresh(TX_THRESH_DFLT);
 
index a98df7e..fe79210 100644 (file)
@@ -118,6 +118,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
         */
        orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
 
+       /* Test SPI_CS_CONTROL_SW_MODE bit enabling */
        value = orig | SPI_CS_CONTROL_SW_MODE;
        writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
        value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
@@ -126,10 +127,13 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
                goto detection_done;
        }
 
-       value &= ~SPI_CS_CONTROL_SW_MODE;
+       orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+
+       /* Test SPI_CS_CONTROL_SW_MODE bit disabling */
+       value = orig & ~SPI_CS_CONTROL_SW_MODE;
        writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
        value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
-       if (value != orig) {
+       if (value != (orig & ~SPI_CS_CONTROL_SW_MODE)) {
                offset = 0x800;
                goto detection_done;
        }
index fc1de86..c08da38 100644 (file)
@@ -424,31 +424,6 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
        return 0;
 }
 
-static void spi_qup_set_cs(struct spi_device *spi, bool enable)
-{
-       struct spi_qup *controller = spi_master_get_devdata(spi->master);
-
-       u32 iocontol, mask;
-
-       iocontol = readl_relaxed(controller->base + SPI_IO_CONTROL);
-
-       /* Disable auto CS toggle and use manual */
-       iocontol &= ~SPI_IO_C_MX_CS_MODE;
-       iocontol |= SPI_IO_C_FORCE_CS;
-
-       iocontol &= ~SPI_IO_C_CS_SELECT_MASK;
-       iocontol |= SPI_IO_C_CS_SELECT(spi->chip_select);
-
-       mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
-
-       if (enable)
-               iocontol |= mask;
-       else
-               iocontol &= ~mask;
-
-       writel_relaxed(iocontol, controller->base + SPI_IO_CONTROL);
-}
-
 static int spi_qup_transfer_one(struct spi_master *master,
                              struct spi_device *spi,
                              struct spi_transfer *xfer)
@@ -571,12 +546,16 @@ static int spi_qup_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
+       /* use num-cs unless not present or out of range */
+       if (of_property_read_u16(dev->of_node, "num-cs",
+                       &master->num_chipselect) ||
+                       (master->num_chipselect > SPI_NUM_CHIPSELECTS))
+               master->num_chipselect = SPI_NUM_CHIPSELECTS;
+
        master->bus_num = pdev->id;
        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
-       master->num_chipselect = SPI_NUM_CHIPSELECTS;
        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
        master->max_speed_hz = max_freq;
-       master->set_cs = spi_qup_set_cs;
        master->transfer_one = spi_qup_transfer_one;
        master->dev.of_node = pdev->dev.of_node;
        master->auto_runtime_pm = true;
@@ -640,16 +619,19 @@ static int spi_qup_probe(struct platform_device *pdev)
        if (ret)
                goto error;
 
-       ret = devm_spi_register_master(dev, master);
-       if (ret)
-               goto error;
-
        pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
        pm_runtime_use_autosuspend(dev);
        pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
+
+       ret = devm_spi_register_master(dev, master);
+       if (ret)
+               goto disable_pm;
+
        return 0;
 
+disable_pm:
+       pm_runtime_disable(&pdev->dev);
 error:
        clk_disable_unprepare(cclk);
        clk_disable_unprepare(iclk);
index 1f56ef6..b83dd73 100644 (file)
@@ -175,9 +175,9 @@ static int sh_sci_spi_remove(struct platform_device *dev)
 {
        struct sh_sci_spi *sp = platform_get_drvdata(dev);
 
-       iounmap(sp->membase);
-       setbits(sp, PIN_INIT, 0);
        spi_bitbang_stop(&sp->bitbang);
+       setbits(sp, PIN_INIT, 0);
+       iounmap(sp->membase);
        spi_master_put(sp->bitbang.master);
        return 0;
 }
index d4f9670..22aa41c 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/mutex.h>
 #include <linux/of_device.h>
 #include <linux/of_irq.h>
+#include <linux/clk/clk-conf.h>
 #include <linux/slab.h>
 #include <linux/mod_devicetable.h>
 #include <linux/spi/spi.h>
@@ -259,6 +260,10 @@ static int spi_drv_probe(struct device *dev)
        const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
        int ret;
 
+       ret = of_clk_set_defaults(dev->of_node, false);
+       if (ret)
+               return ret;
+
        acpi_dev_pm_attach(dev, true);
        ret = sdrv->probe(to_spi_device(dev));
        if (ret)
index 2c61783..c341ac1 100644 (file)
@@ -97,7 +97,6 @@ void timed_output_dev_unregister(struct timed_output_dev *tdev)
 {
        tdev->enable(tdev, 0);
        device_destroy(timed_output_class, MKDEV(0, tdev->index));
-       dev_set_drvdata(tdev->dev, NULL);
 }
 EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
 
index 5d56428..a2f6957 100644 (file)
@@ -651,6 +651,7 @@ config COMEDI_ADDI_APCI_1516
 
 config COMEDI_ADDI_APCI_1564
        tristate "ADDI-DATA APCI_1564 support"
+       select COMEDI_ADDI_WATCHDOG
        ---help---
          Enable support for ADDI-DATA APCI_1564 cards
 
index b36feb0..fa38be0 100644 (file)
@@ -36,10 +36,11 @@ config IIO_SIMPLE_DUMMY_EVENTS
          Add some dummy events to the simple dummy driver.
 
 config IIO_SIMPLE_DUMMY_BUFFER
-       boolean "Buffered capture support"
-       select IIO_KFIFO_BUF
-       help
-         Add buffered data capture to the simple dummy driver.
+       boolean "Buffered capture support"
+       select IIO_BUFFER
+       select IIO_KFIFO_BUF
+       help
+         Add buffered data capture to the simple dummy driver.
 
 endif # IIO_SIMPLE_DUMMY
 
index 357cef2..7194bd1 100644 (file)
@@ -465,7 +465,7 @@ static int ad7291_probe(struct i2c_client *client,
        struct ad7291_platform_data *pdata = client->dev.platform_data;
        struct ad7291_chip_info *chip;
        struct iio_dev *indio_dev;
-       int ret = 0;
+       int ret;
 
        indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
        if (!indio_dev)
@@ -475,7 +475,7 @@ static int ad7291_probe(struct i2c_client *client,
        if (pdata && pdata->use_external_ref) {
                chip->reg = devm_regulator_get(&client->dev, "vref");
                if (IS_ERR(chip->reg))
-                       return ret;
+                       return PTR_ERR(chip->reg);
 
                ret = regulator_enable(chip->reg);
                if (ret)
index dae8d1a..52d7517 100644 (file)
@@ -846,6 +846,14 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
                        LRADC_CTRL1);
        mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
 
+       /* Enable / disable the divider per requirement */
+       if (test_bit(chan, &lradc->is_divided))
+               mxs_lradc_reg_set(lradc, 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
+                       LRADC_CTRL2);
+       else
+               mxs_lradc_reg_clear(lradc,
+                       1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET, LRADC_CTRL2);
+
        /* Clean the slot's previous content, then set new one. */
        mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0),
                        LRADC_CTRL4);
@@ -961,15 +969,11 @@ static int mxs_lradc_write_raw(struct iio_dev *iio_dev,
                if (val == scale_avail[MXS_LRADC_DIV_DISABLED].integer &&
                    val2 == scale_avail[MXS_LRADC_DIV_DISABLED].nano) {
                        /* divider by two disabled */
-                       writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
-                              lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_CLR);
                        clear_bit(chan->channel, &lradc->is_divided);
                        ret = 0;
                } else if (val == scale_avail[MXS_LRADC_DIV_ENABLED].integer &&
                           val2 == scale_avail[MXS_LRADC_DIV_ENABLED].nano) {
                        /* divider by two enabled */
-                       writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
-                              lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_SET);
                        set_bit(chan->channel, &lradc->is_divided);
                        ret = 0;
                }
index 9e0f2a9..ab338e3 100644 (file)
@@ -667,9 +667,13 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
        chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
                        chip->tsl2x7x_settings.prox_pulse_count;
        chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
-       chip->tsl2x7x_settings.prox_thres_low;
+                       (chip->tsl2x7x_settings.prox_thres_low) & 0xFF;
+       chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
+                       (chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF;
        chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
-                       chip->tsl2x7x_settings.prox_thres_high;
+                       (chip->tsl2x7x_settings.prox_thres_high) & 0xFF;
+       chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
+                       (chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF;
 
        /* and make sure we're not already on */
        if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
index b567832..4ca61af 100644 (file)
@@ -173,6 +173,13 @@ static int imx_pd_register(struct drm_device *drm,
        if (ret)
                return ret;
 
+       /* set the connector's dpms to OFF so that
+        * drm_helper_connector_dpms() won't return
+        * immediately since the current state is ON
+        * at this point.
+        */
+       imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
+
        drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs);
        drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs,
                         DRM_MODE_ENCODER_NONE);
index 78b0fba..8afc6fe 100644 (file)
@@ -1,6 +1,6 @@
 config VIDEO_OMAP4
        bool "OMAP 4 Camera support"
-       depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4
+       depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
        select VIDEOBUF2_DMA_CONTIG
        ---help---
          Driver for an OMAP 4 ISS controller.
index 0acacab..46f5abc 100644 (file)
@@ -298,7 +298,7 @@ int rtl8723a_FirmwareDownload(struct rtw_adapter *padapter)
        RT_TRACE(_module_hal_init_c_, _drv_info_, ("+%s\n", __func__));
 
        if (IS_8723A_A_CUT(pHalData->VersionID)) {
-               fw_name = "rtlwifi/rtl8723aufw.bin";
+               fw_name = "rtlwifi/rtl8723aufw_A.bin";
                RT_TRACE(_module_hal_init_c_, _drv_info_,
                         ("rtl8723a_FirmwareDownload: R8723FwImageArray_UMC "
                          "for RTL8723A A CUT\n"));
index 4e32003..1fb3438 100644 (file)
@@ -29,7 +29,9 @@ MODULE_AUTHOR("Realtek Semiconductor Corp.");
 MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
 MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
 MODULE_VERSION(DRIVERVERSION);
-MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin");
 
 /* module param defaults */
 static int rtw_chip_version = 0x00;
index 8b25c1a..ebb19b2 100644 (file)
@@ -530,8 +530,10 @@ int rtw_resume_process23a(struct rtw_adapter *padapter)
        pwrpriv->bkeepfwalive = false;
 
        DBG_8723A("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
-       if (pm_netdev_open23a(pnetdev, true) != 0)
+       if (pm_netdev_open23a(pnetdev, true) != 0) {
+               up(&pwrpriv->lock);
                goto exit;
+       }
 
        netif_device_attach(pnetdev);
        netif_carrier_on(pnetdev);
index 8945b4e..cb50120 100644 (file)
@@ -280,8 +280,10 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
                                        OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
 
                /* Wait until the state has moved to ON */
-               while (*pdata->dsp_prm_read(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST)&
-                                       OMAP_INTRANSITION_MASK);
+               while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
+                                             OMAP2_PM_PWSTST) &
+                                               OMAP_INTRANSITION_MASK)
+                       ;
                /* Disable Automatic transition */
                (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
                                        OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
index 59679cd..69b80e8 100644 (file)
@@ -981,7 +981,7 @@ start:
                pDevice->byERPFlag &= ~(WLAN_SET_ERP_USE_PROTECTION(1));
        }
 
-       {
+       if (pDevice->eCommandState == WLAN_ASSOCIATE_WAIT) {
                pDevice->byReAssocCount++;
                /* 10 sec timeout */
                if ((pDevice->byReAssocCount > 10) && (!pDevice->bLinkPass)) {
index 1d3908d..5a5fd93 100644 (file)
@@ -2318,6 +2318,7 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
        int             handled = 0;
        unsigned char byData = 0;
        int             ii = 0;
+       unsigned long flags;
 
        MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
 
@@ -2331,7 +2332,8 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
 
        handled = 1;
        MACvIntDisable(pDevice->PortOffset);
-       spin_lock_irq(&pDevice->lock);
+
+       spin_lock_irqsave(&pDevice->lock, flags);
 
        //Make sure current page is 0
        VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel);
@@ -2560,7 +2562,8 @@ static  irqreturn_t  device_intr(int irq,  void *dev_instance) {
        if (byOrgPageSel == 1)
                MACvSelectPage1(pDevice->PortOffset);
 
-       spin_unlock_irq(&pDevice->lock);
+       spin_unlock_irqrestore(&pDevice->lock, flags);
+
        MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
 
        return IRQ_RETVAL(handled);
index 5663f4d..1f4c794 100644 (file)
@@ -1309,7 +1309,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
        if (cmd->data_direction != DMA_TO_DEVICE) {
                pr_err("Command ITT: 0x%08x received DataOUT for a"
                        " NON-WRITE command.\n", cmd->init_task_tag);
-               return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
+               return iscsit_dump_data_payload(conn, payload_length, 1);
        }
        se_cmd = &cmd->se_cmd;
        iscsit_mod_dataout_timer(cmd);
index 19b842c..ab4915c 100644 (file)
@@ -174,7 +174,6 @@ static int chap_server_compute_md5(
        char *nr_out_ptr,
        unsigned int *nr_out_len)
 {
-       char *endptr;
        unsigned long id;
        unsigned char id_as_uchar;
        unsigned char digest[MD5_SIGNATURE_SIZE];
@@ -320,9 +319,14 @@ static int chap_server_compute_md5(
        }
 
        if (type == HEX)
-               id = simple_strtoul(&identifier[2], &endptr, 0);
+               ret = kstrtoul(&identifier[2], 0, &id);
        else
-               id = simple_strtoul(identifier, &endptr, 0);
+               ret = kstrtoul(identifier, 0, &id);
+
+       if (ret < 0) {
+               pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret);
+               goto out;
+       }
        if (id > 255) {
                pr_err("chap identifier: %lu greater than 255\n", id);
                goto out;
@@ -351,6 +355,10 @@ static int chap_server_compute_md5(
                pr_err("Unable to convert incoming challenge\n");
                goto out;
        }
+       if (challenge_len > 1024) {
+               pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
+               goto out;
+       }
        /*
         * During mutual authentication, the CHAP_C generated by the
         * initiator must not match the original CHAP_C generated by
index fecb695..5e71ac6 100644 (file)
@@ -1216,7 +1216,7 @@ old_sess_out:
 static int __iscsi_target_login_thread(struct iscsi_np *np)
 {
        u8 *buffer, zero_tsih = 0;
-       int ret = 0, rc, stop;
+       int ret = 0, rc;
        struct iscsi_conn *conn = NULL;
        struct iscsi_login *login;
        struct iscsi_portal_group *tpg = NULL;
@@ -1230,6 +1230,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
                np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
                complete(&np->np_restart_comp);
+       } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
+               spin_unlock_bh(&np->np_thread_lock);
+               goto exit;
        } else {
                np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
        }
@@ -1422,10 +1425,8 @@ old_sess_out:
        }
 
 out:
-       stop = kthread_should_stop();
-       /* Wait for another socket.. */
-       if (!stop)
-               return 1;
+       return 1;
+
 exit:
        iscsi_stop_login_thread_timer(np);
        spin_lock_bh(&np->np_thread_lock);
@@ -1442,7 +1443,7 @@ int iscsi_target_login_thread(void *arg)
 
        allow_signal(SIGINT);
 
-       while (!kthread_should_stop()) {
+       while (1) {
                ret = __iscsi_target_login_thread(np);
                /*
                 * We break and exit here unless another sock_accept() call
index 53e157c..fd90b28 100644 (file)
@@ -1295,6 +1295,8 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta
        login->login_failed = 1;
        iscsit_collect_login_stats(conn, status_class, status_detail);
 
+       memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
+
        hdr     = (struct iscsi_login_rsp *)&login->rsp[0];
        hdr->opcode             = ISCSI_OP_LOGIN_RSP;
        hdr->status_class       = status_class;
index 6d2f375..8c64b87 100644 (file)
@@ -239,6 +239,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
        return;
 
 out_done:
+       kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
        sc->scsi_done(sc);
        return;
 }
index 11d26fe..98da901 100644 (file)
@@ -616,6 +616,7 @@ void core_dev_unexport(
        dev->export_count--;
        spin_unlock(&hba->device_lock);
 
+       lun->lun_sep = NULL;
        lun->lun_se_dev = NULL;
 }
 
index c036595..fddfae6 100644 (file)
@@ -825,7 +825,7 @@ int core_tpg_add_lun(
 
        ret = core_dev_export(dev, tpg, lun);
        if (ret < 0) {
-               percpu_ref_cancel_init(&lun->lun_ref);
+               percpu_ref_exit(&lun->lun_ref);
                return ret;
        }
 
@@ -880,5 +880,7 @@ int core_tpg_post_dellun(
        lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
        spin_unlock(&tpg->tpg_lun_lock);
 
+       percpu_ref_exit(&lun->lun_ref);
+
        return 0;
 }
index a8aaf6a..9465623 100644 (file)
@@ -129,7 +129,10 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
 
                tc_device_get_irq(tdev);
 
-               device_register(&tdev->dev);
+               if (device_register(&tdev->dev)) {
+                       put_device(&tdev->dev);
+                       goto out_err;
+               }
                list_add_tail(&tdev->node, &tbus->devices);
 
 out_err:
@@ -148,7 +151,10 @@ static int __init tc_init(void)
 
        INIT_LIST_HEAD(&tc_bus.devices);
        dev_set_name(&tc_bus.dev, "tc");
-       device_register(&tc_bus.dev);
+       if (device_register(&tc_bus.dev)) {
+               put_device(&tc_bus.dev);
+               return 0;
+       }
 
        if (tc_bus.info.slot_size) {
                unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000;
index a99c631..2c516f2 100644 (file)
@@ -306,7 +306,7 @@ static int imx_get_sensor_data(struct platform_device *pdev)
 {
        struct imx_thermal_data *data = platform_get_drvdata(pdev);
        struct regmap *map;
-       int t1, t2, n1, n2;
+       int t1, n1;
        int ret;
        u32 val;
        u64 temp64;
@@ -333,14 +333,10 @@ static int imx_get_sensor_data(struct platform_device *pdev)
        /*
         * Sensor data layout:
         *   [31:20] - sensor value @ 25C
-        *    [19:8] - sensor value of hot
-        *     [7:0] - hot temperature value
         * Use universal formula now and only need sensor value @ 25C
         * slope = 0.4297157 - (0.0015976 * 25C fuse)
         */
        n1 = val >> 20;
-       n2 = (val & 0xfff00) >> 8;
-       t2 = val & 0xff;
        t1 = 25; /* t1 always 25C */
 
        /*
@@ -366,16 +362,16 @@ static int imx_get_sensor_data(struct platform_device *pdev)
        data->c2 = n1 * data->c1 + 1000 * t1;
 
        /*
-        * Set the default passive cooling trip point to 20 Â°C below the
-        * maximum die temperature. Can be changed from userspace.
+        * Set the default passive cooling trip point,
+        * can be changed from userspace.
         */
-       data->temp_passive = 1000 * (t2 - 20);
+       data->temp_passive = IMX_TEMP_PASSIVE;
 
        /*
-        * The maximum die temperature is t2, let's give 5 Â°C cushion
-        * for noise and possible temperature rise between measurements.
+        * The maximum die temperature set to 20 C higher than
+        * IMX_TEMP_PASSIVE.
         */
-       data->temp_critical = 1000 * (t2 - 5);
+       data->temp_critical = 1000 * 20 + data->temp_passive;
 
        return 0;
 }
index 04b1be7..4b2b999 100644 (file)
@@ -156,8 +156,8 @@ static int of_thermal_bind(struct thermal_zone_device *thermal,
 
                        ret = thermal_zone_bind_cooling_device(thermal,
                                                tbp->trip_id, cdev,
-                                               tbp->min,
-                                               tbp->max);
+                                               tbp->max,
+                                               tbp->min);
                        if (ret)
                                return ret;
                }
@@ -712,11 +712,12 @@ thermal_of_build_thermal_zone(struct device_node *np)
        }
 
        i = 0;
-       for_each_child_of_node(child, gchild)
+       for_each_child_of_node(child, gchild) {
                ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++],
                                                      tz->trips, tz->ntrips);
                if (ret)
                        goto free_tbps;
+       }
 
 finish:
        of_node_put(child);
index fdb0719..1967bee 100644 (file)
@@ -140,6 +140,12 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
        return NULL;
 }
 
+static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz)
+{
+       unsigned long temp;
+       return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp);
+}
+
 int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
 {
        struct thermal_hwmon_device *hwmon;
@@ -189,21 +195,18 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
        if (result)
                goto free_temp_mem;
 
-       if (tz->ops->get_crit_temp) {
-               unsigned long temperature;
-               if (!tz->ops->get_crit_temp(tz, &temperature)) {
-                       snprintf(temp->temp_crit.name,
-                                sizeof(temp->temp_crit.name),
+       if (thermal_zone_crit_temp_valid(tz)) {
+               snprintf(temp->temp_crit.name,
+                               sizeof(temp->temp_crit.name),
                                "temp%d_crit", hwmon->count);
-                       temp->temp_crit.attr.attr.name = temp->temp_crit.name;
-                       temp->temp_crit.attr.attr.mode = 0444;
-                       temp->temp_crit.attr.show = temp_crit_show;
-                       sysfs_attr_init(&temp->temp_crit.attr.attr);
-                       result = device_create_file(hwmon->device,
-                                                   &temp->temp_crit.attr);
-                       if (result)
-                               goto unregister_input;
-               }
+               temp->temp_crit.attr.attr.name = temp->temp_crit.name;
+               temp->temp_crit.attr.attr.mode = 0444;
+               temp->temp_crit.attr.show = temp_crit_show;
+               sysfs_attr_init(&temp->temp_crit.attr.attr);
+               result = device_create_file(hwmon->device,
+                                           &temp->temp_crit.attr);
+               if (result)
+                       goto unregister_input;
        }
 
        mutex_lock(&thermal_hwmon_list_lock);
@@ -250,7 +253,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
        }
 
        device_remove_file(hwmon->device, &temp->temp_input.attr);
-       if (tz->ops->get_crit_temp)
+       if (thermal_zone_crit_temp_valid(tz))
                device_remove_file(hwmon->device, &temp->temp_crit.attr);
 
        mutex_lock(&thermal_hwmon_list_lock);
index a1271b5..634b6ce 100644 (file)
@@ -1155,7 +1155,7 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev)
        /* register shadow for context save and restore */
        bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) *
                                   bgp->conf->sensor_count, GFP_KERNEL);
-       if (!bgp) {
+       if (!bgp->regval) {
                dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
                return ERR_PTR(-ENOMEM);
        }
index f95569d..f44f1ba 100644 (file)
@@ -1214,15 +1214,16 @@ static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
 {
        struct n_tty_data *ldata = tty->disc_data;
 
-       if (I_IGNPAR(tty))
-               return;
-       if (I_PARMRK(tty)) {
-               put_tty_queue('\377', ldata);
-               put_tty_queue('\0', ldata);
-               put_tty_queue(c, ldata);
-       } else  if (I_INPCK(tty))
-               put_tty_queue('\0', ldata);
-       else
+       if (I_INPCK(tty)) {
+               if (I_IGNPAR(tty))
+                       return;
+               if (I_PARMRK(tty)) {
+                       put_tty_queue('\377', ldata);
+                       put_tty_queue('\0', ldata);
+                       put_tty_queue(c, ldata);
+               } else
+                       put_tty_queue('\0', ldata);
+       } else
                put_tty_queue(c, ldata);
        if (waitqueue_active(&tty->read_wait))
                wake_up_interruptible(&tty->read_wait);
index 27f7ad6..7a91c6d 100644 (file)
@@ -2357,7 +2357,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
        port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= UART_LSR_BI;
 
        /*
index cfef801..4858b8a 100644 (file)
@@ -144,8 +144,11 @@ static int __init early_serial8250_setup(struct earlycon_device *device,
        if (!(device->port.membase || device->port.iobase))
                return 0;
 
-       if (!device->baud)
+       if (!device->baud) {
                device->baud = probe_baud(&device->port);
+               snprintf(device->options, sizeof(device->options), "%u",
+                        device->baud);
+       }
 
        init_port(device);
 
index 501667e..3233766 100644 (file)
@@ -185,6 +185,12 @@ static void altera_uart_set_termios(struct uart_port *port,
        uart_update_timeout(port, termios->c_cflag, baud);
        altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
        spin_unlock_irqrestore(&port->lock, flags);
+
+       /*
+        * FIXME: port->read_status_mask and port->ignore_status_mask
+        * need to be initialized based on termios settings for
+        * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
+        */
 }
 
 static void altera_uart_rx_chars(struct altera_uart *pp)
index 01c9e72..971af1e 100644 (file)
@@ -420,7 +420,7 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
        uap->port.read_status_mask = UART01x_RSR_OE;
        if (termios->c_iflag & INPCK)
                uap->port.read_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                uap->port.read_status_mask |= UART01x_RSR_BE;
 
        /*
index 908a6e3..0e26dcb 100644 (file)
@@ -1744,7 +1744,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
        port->read_status_mask = UART011_DR_OE | 255;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= UART011_DR_BE;
 
        /*
index c9f5c9d..008c223 100644 (file)
@@ -177,7 +177,7 @@ static void arc_serial_tx_chars(struct arc_uart_port *uart)
                uart->port.icount.tx++;
                uart->port.x_char = 0;
                sent = 1;
-       } else if (xmit->tail != xmit->head) {  /* TODO: uart_circ_empty */
+       } else if (!uart_circ_empty(xmit)) {
                ch = xmit->buf[xmit->tail];
                xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
                uart->port.icount.tx++;
index 3fceae0..c4f7503 100644 (file)
@@ -1932,7 +1932,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
        port->read_status_mask = ATMEL_US_OVRE;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= ATMEL_US_RXBRK;
 
        if (atmel_use_pdc_rx(port))
index a47421e..2315190 100644 (file)
@@ -567,7 +567,7 @@ static void bcm_uart_set_termios(struct uart_port *port,
                port->read_status_mask |= UART_FIFO_FRAMEERR_MASK;
                port->read_status_mask |= UART_FIFO_PARERR_MASK;
        }
-       if (new->c_iflag & (BRKINT))
+       if (new->c_iflag & (IGNBRK | BRKINT))
                port->read_status_mask |= UART_FIFO_BRKDET_MASK;
 
        port->ignore_status_mask = 0;
index 869ceba..ac86a20 100644 (file)
@@ -833,7 +833,7 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
        port->read_status_mask = OE;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= (FE | PE);
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= BI;
 
        /*
index 2f2b2e5..cdbbc78 100644 (file)
@@ -625,7 +625,7 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
        dport->port.read_status_mask = DZ_OERR;
        if (termios->c_iflag & INPCK)
                dport->port.read_status_mask |= DZ_FERR | DZ_PERR;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                dport->port.read_status_mask |= DZ_BREAK;
 
        /* characters to ignore */
index 5131b5e..a514ee6 100644 (file)
@@ -25,7 +25,7 @@
 #include <asm/serial.h>
 
 static struct console early_con = {
-       .name =         "earlycon",
+       .name =         "uart", /* 8250 console switch requires this name */
        .flags =        CON_PRINTBUFFER | CON_BOOT,
        .index =        -1,
 };
index b373f64..3b0ee9a 100644 (file)
@@ -407,7 +407,7 @@ static void efm32_uart_set_termios(struct uart_port *port,
        if (new->c_iflag & INPCK)
                port->read_status_mask |=
                        UARTn_RXDATAX_FERR | UARTn_RXDATAX_PERR;
-       if (new->c_iflag & (BRKINT | PARMRK))
+       if (new->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= SW_UARTn_RXDATAX_BERR;
 
        port->ignore_status_mask = 0;
index c5eb897..49385c8 100644 (file)
@@ -902,7 +902,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
        sport->port.read_status_mask = 0;
        if (termios->c_iflag & INPCK)
                sport->port.read_status_mask |= (UARTSR1_FE | UARTSR1_PE);
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                sport->port.read_status_mask |= UARTSR1_FE;
 
        /* characters to ignore */
index e2f9387..044e86d 100644 (file)
@@ -567,6 +567,9 @@ static void imx_start_tx(struct uart_port *port)
        struct imx_port *sport = (struct imx_port *)port;
        unsigned long temp;
 
+       if (uart_circ_empty(&port->state->xmit))
+               return;
+
        if (USE_IRDA(sport)) {
                /* half duplex in IrDA mode; have to disable receive mode */
                temp = readl(sport->port.membase + UCR4);
index 1d94205..99b7b86 100644 (file)
@@ -603,6 +603,8 @@ static void ip22zilog_start_tx(struct uart_port *port)
        } else {
                struct circ_buf *xmit = &port->state->xmit;
 
+               if (uart_circ_empty(xmit))
+                       return;
                writeb(xmit->buf[xmit->tail], &channel->data);
                ZSDELAY();
                ZS_WSYNC(channel);
@@ -850,7 +852,7 @@ ip22zilog_convert_to_zs(struct uart_ip22zilog_port *up, unsigned int cflag,
        up->port.read_status_mask = Rx_OVR;
        if (iflag & INPCK)
                up->port.read_status_mask |= CRC_ERR | PAR_ERR;
-       if (iflag & (BRKINT | PARMRK))
+       if (iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= BRK_ABRT;
 
        up->port.ignore_status_mask = 0;
index 9cd9b4e..5702828 100644 (file)
@@ -266,9 +266,11 @@ static void m32r_sio_start_tx(struct uart_port *port)
        if (!(up->ier & UART_IER_THRI)) {
                up->ier |= UART_IER_THRI;
                serial_out(up, UART_IER, up->ier);
-               serial_out(up, UART_TX, xmit->buf[xmit->tail]);
-               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
-               up->port.icount.tx++;
+               if (!uart_circ_empty(xmit)) {
+                       serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+                       xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+                       up->port.icount.tx++;
+               }
        }
        while((serial_in(up, UART_LSR) & UART_EMPTY) != UART_EMPTY);
 #else
@@ -737,7 +739,7 @@ static void m32r_sio_set_termios(struct uart_port *port,
        up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
        if (termios->c_iflag & INPCK)
                up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= UART_LSR_BI;
 
        /*
index 2a99d0c..ba285cd 100644 (file)
@@ -835,7 +835,7 @@ static void max310x_set_termios(struct uart_port *port,
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= MAX310X_LSR_RXPAR_BIT |
                                          MAX310X_LSR_FRERR_BIT;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= MAX310X_LSR_RXBRK_BIT;
 
        /* Set status ignore mask */
index 0edfaf8..a6f0857 100644 (file)
@@ -248,6 +248,12 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
                mr1 |= MCFUART_MR1_PARITYNONE;
        }
 
+       /*
+        * FIXME: port->read_status_mask and port->ignore_status_mask
+        * need to be initialized based on termios settings for
+        * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
+        */
+
        if (termios->c_cflag & CSTOPB)
                mr2 |= MCFUART_MR2_STOP2;
        else
index 52c930f..445799d 100644 (file)
@@ -977,7 +977,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
        up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
        if (termios->c_iflag & INPCK)
                up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= UART_LSR_BI;
 
        /* Characters to ignore */
index e30a3ca..759c6a6 100644 (file)
@@ -1458,7 +1458,7 @@ static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
                pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE
                        | SDMA_DESC_CMDSTAT_FR;
 
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
 
        /* Characters/events to ignore */
index 778e376..72000a6 100644 (file)
@@ -582,7 +582,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
        port->read_status_mask = 0;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= UART_SR_RX_BREAK;
 
        uart_update_timeout(port, termios->c_cflag, baud);
@@ -991,7 +991,7 @@ static const struct of_device_id msm_uartdm_table[] = {
        { }
 };
 
-static int __init msm_serial_probe(struct platform_device *pdev)
+static int msm_serial_probe(struct platform_device *pdev)
 {
        struct msm_port *msm_port;
        struct resource *resource;
index 4b5b3c2..86de447 100644 (file)
@@ -604,7 +604,7 @@ static void mxs_auart_settermios(struct uart_port *u,
 
        if (termios->c_iflag & INPCK)
                u->read_status_mask |= AUART_STAT_PERR;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                u->read_status_mask |= AUART_STAT_BERR;
 
        /*
index 0a4dd70..7a67456 100644 (file)
@@ -419,7 +419,7 @@ netx_set_termios(struct uart_port *port, struct ktermios *termios,
        }
 
        port->read_status_mask = 0;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= SR_BE;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= SR_PE | SR_FE;
index e9d420f..f7ad5b9 100644 (file)
@@ -653,6 +653,8 @@ static void pmz_start_tx(struct uart_port *port)
        } else {
                struct circ_buf *xmit = &port->state->xmit;
 
+               if (uart_circ_empty(xmit))
+                       goto out;
                write_zsdata(uap, xmit->buf[xmit->tail]);
                zssync(uap);
                xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
@@ -661,6 +663,7 @@ static void pmz_start_tx(struct uart_port *port)
                if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                        uart_write_wakeup(&uap->port);
        }
+ out:
        pmz_debug("pmz: start_tx() done.\n");
 }
 
@@ -1092,7 +1095,7 @@ static void pmz_convert_to_zs(struct uart_pmac_port *uap, unsigned int cflag,
        uap->port.read_status_mask = Rx_OVR;
        if (iflag & INPCK)
                uap->port.read_status_mask |= CRC_ERR | PAR_ERR;
-       if (iflag & (BRKINT | PARMRK))
+       if (iflag & (IGNBRK | BRKINT | PARMRK))
                uap->port.read_status_mask |= BRK_ABRT;
 
        uap->port.ignore_status_mask = 0;
index de6c05c..2ba24a4 100644 (file)
@@ -477,7 +477,7 @@ pnx8xxx_set_termios(struct uart_port *port, struct ktermios *termios,
                sport->port.read_status_mask |=
                        FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
                        FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR);
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                sport->port.read_status_mask |=
                        ISTAT_TO_SM(PNX8XXX_UART_INT_BREAK);
 
index 9e7ee39..c638c53 100644 (file)
@@ -492,7 +492,7 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
        up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
        if (termios->c_iflag & INPCK)
                up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= UART_LSR_BI;
 
        /*
index 3293377..c1d3ebd 100644 (file)
@@ -66,7 +66,7 @@ static void dbg(const char *fmt, ...)
        char buff[256];
 
        va_start(va, fmt);
-       vscnprintf(buff, sizeof(buf), fmt, va);
+       vscnprintf(buff, sizeof(buff), fmt, va);
        va_end(va);
 
        printascii(buff);
index a7cdec2..771f361 100644 (file)
@@ -596,7 +596,7 @@ static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
        if (termios->c_iflag & INPCK)
                uport->read_status_mask |= M_DUART_FRM_ERR |
                                           M_DUART_PARITY_ERR;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                uport->read_status_mask |= M_DUART_RCVD_BRK;
 
        uport->ignore_status_mask = 0;
index 5443b46..e84b6a3 100644 (file)
@@ -665,7 +665,7 @@ static void sccnxp_set_termios(struct uart_port *port,
        port->read_status_mask = SR_OVR;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= SR_PE | SR_FE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= SR_BRK;
 
        /* Set status ignore mask */
index e1caa99..5c79bda 100644 (file)
@@ -437,7 +437,7 @@ static void ks8695uart_set_termios(struct uart_port *port, struct ktermios *term
        port->read_status_mask = URLS_URROE;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= (URLS_URFE | URLS_URPE);
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= URLS_URBI;
 
        /*
index 60f49b9..ea85460 100644 (file)
@@ -697,7 +697,7 @@ serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
                TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS;
        if (termios->c_iflag & INPCK)
                up->port.read_status_mask |= TXX9_SIDISR_UFER | TXX9_SIDISR_UPER;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= TXX9_SIDISR_UBRK;
 
        /*
index 1f2be48..9b4d71c 100644 (file)
@@ -896,7 +896,7 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
                if (termios->c_iflag & INPCK)
                        port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
        }
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                        port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
        if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
                if (termios->c_iflag & IGNPAR)
index c7f61ac..f48b1cc 100644 (file)
@@ -547,7 +547,7 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
        ascport->port.read_status_mask = ASC_RXBUF_DUMMY_OE;
        if (termios->c_iflag & INPCK)
                ascport->port.read_status_mask |= ASC_RXBUF_FE | ASC_RXBUF_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                ascport->port.read_status_mask |= ASC_RXBUF_DUMMY_BE;
 
        /*
index 5faa8e9..2f57df9 100644 (file)
@@ -427,6 +427,9 @@ static void sunsab_start_tx(struct uart_port *port)
        struct circ_buf *xmit = &up->port.state->xmit;
        int i;
 
+       if (uart_circ_empty(xmit))
+               return;
+
        up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR);
        writeb(up->interrupt_mask1, &up->regs->w.imr1);
        
@@ -719,7 +722,7 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
        if (iflag & INPCK)
                up->port.read_status_mask |= (SAB82532_ISR0_PERR |
                                              SAB82532_ISR0_FERR);
-       if (iflag & (BRKINT | PARMRK))
+       if (iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= (SAB82532_ISR1_BRK << 8);
 
        /*
index 9a0f24f..5326ae1 100644 (file)
@@ -834,7 +834,7 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
        up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
        if (iflag & INPCK)
                up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-       if (iflag & (BRKINT | PARMRK))
+       if (iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= UART_LSR_BI;
 
        /*
index a2c40ed..02df394 100644 (file)
@@ -703,6 +703,8 @@ static void sunzilog_start_tx(struct uart_port *port)
        } else {
                struct circ_buf *xmit = &port->state->xmit;
 
+               if (uart_circ_empty(xmit))
+                       return;
                writeb(xmit->buf[xmit->tail], &channel->data);
                ZSDELAY();
                ZS_WSYNC(channel);
@@ -915,7 +917,7 @@ sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag,
        up->port.read_status_mask = Rx_OVR;
        if (iflag & INPCK)
                up->port.read_status_mask |= CRC_ERR | PAR_ERR;
-       if (iflag & (BRKINT | PARMRK))
+       if (iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= BRK_ABRT;
 
        up->port.ignore_status_mask = 0;
index d569ca5..1c52074 100644 (file)
@@ -936,7 +936,7 @@ static void qe_uart_set_termios(struct uart_port *port,
        port->read_status_mask = BD_SC_EMPTY | BD_SC_OV;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= BD_SC_FR | BD_SC_PR;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= BD_SC_BR;
 
        /*
index a63c14b..db0c8a4 100644 (file)
@@ -559,7 +559,7 @@ static void siu_set_termios(struct uart_port *port, struct ktermios *new,
        port->read_status_mask = UART_LSR_THRE | UART_LSR_OE | UART_LSR_DR;
        if (c_iflag & INPCK)
                port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-       if (c_iflag & (BRKINT | PARMRK))
+       if (c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= UART_LSR_BI;
 
        port->ignore_status_mask = 0;
index 6a16987..2b65bb7 100644 (file)
@@ -923,7 +923,7 @@ static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
        uport->read_status_mask = Rx_OVR;
        if (termios->c_iflag & INPCK)
                uport->read_status_mask |= FRM_ERR | PAR_ERR;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                uport->read_status_mask |= Rx_BRK;
 
        uport->ignore_status_mask = 0;
index 5e0f6ff..b33b00b 100644 (file)
@@ -3226,8 +3226,7 @@ int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt
        for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
                con_back = &registered_con_driver[i];
 
-               if (con_back->con &&
-                   !(con_back->flag & CON_DRIVER_FLAG_MODULE)) {
+               if (con_back->con && con_back->con != csw) {
                        defcsw = con_back->con;
                        retval = 0;
                        break;
@@ -3332,6 +3331,7 @@ static int vt_unbind(struct con_driver *con)
 {
        const struct consw *csw = NULL;
        int i, more = 1, first = -1, last = -1, deflt = 0;
+       int ret;
 
        if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) ||
            con_is_graphics(con->con, con->first, con->last))
@@ -3357,8 +3357,10 @@ static int vt_unbind(struct con_driver *con)
 
                if (first != -1) {
                        console_lock();
-                       do_unbind_con_driver(csw, first, last, deflt);
+                       ret = do_unbind_con_driver(csw, first, last, deflt);
                        console_unlock();
+                       if (ret != 0)
+                               return ret;
                }
 
                first = -1;
@@ -3645,17 +3647,20 @@ err:
  */
 int do_unregister_con_driver(const struct consw *csw)
 {
-       int i, retval = -ENODEV;
+       int i;
 
        /* cannot unregister a bound driver */
        if (con_is_bound(csw))
-               goto err;
+               return -EBUSY;
+
+       if (csw == conswitchp)
+               return -EINVAL;
 
        for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
                struct con_driver *con_driver = &registered_con_driver[i];
 
                if (con_driver->con == csw &&
-                   con_driver->flag & CON_DRIVER_FLAG_MODULE) {
+                   con_driver->flag & CON_DRIVER_FLAG_INIT) {
                        vtconsole_deinit_device(con_driver);
                        device_destroy(vtconsole_class,
                                       MKDEV(0, con_driver->node));
@@ -3666,12 +3671,11 @@ int do_unregister_con_driver(const struct consw *csw)
                        con_driver->flag = 0;
                        con_driver->first = 0;
                        con_driver->last = 0;
-                       retval = 0;
-                       break;
+                       return 0;
                }
        }
-err:
-       return retval;
+
+       return -ENODEV;
 }
 EXPORT_SYMBOL_GPL(do_unregister_con_driver);
 
index e371f5a..a673e5b 100644 (file)
@@ -655,7 +655,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
 
        if (mem->addr & ~PAGE_MASK)
                return -ENODEV;
-       if (vma->vm_end - vma->vm_start > PAGE_ALIGN(mem->size))
+       if (vma->vm_end - vma->vm_start > mem->size)
                return -EINVAL;
 
        vma->vm_ops = &uio_physical_vm_ops;
index 69425b3..b8125aa 100644 (file)
@@ -1169,8 +1169,8 @@ static int ep_enable(struct usb_ep *ep,
 
        if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
                cap |= QH_IOS;
-       if (hwep->num)
-               cap |= QH_ZLT;
+
+       cap |= QH_ZLT;
        cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
        /*
         * For ISO-TX, we set mult at QH as the largest value, and use
@@ -1321,6 +1321,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
        struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
        struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
        unsigned long flags;
+       struct td_node *node, *tmpnode;
 
        if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
                hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
@@ -1331,6 +1332,12 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
 
        hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
 
+       list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
+               dma_pool_free(hwep->td_pool, node->ptr, node->dma);
+               list_del(&node->td);
+               kfree(node);
+       }
+
        /* pop request */
        list_del_init(&hwreq->queue);
 
index 879b66e..0e950ad 100644 (file)
@@ -889,6 +889,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
        if (!hub_is_superspeed(hub->hdev))
                return -EINVAL;
 
+       ret = hub_port_status(hub, port1, &portstatus, &portchange);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
+        * Controller [1022:7814] will have spurious result making the following
+        * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
+        * as high-speed device if we set the usb 3.0 port link state to
+        * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
+        * check the state here to avoid the bug.
+        */
+       if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                               USB_SS_PORT_LS_RX_DETECT) {
+               dev_dbg(&hub->ports[port1 - 1]->dev,
+                        "Not disabling port; link state is RxDetect\n");
+               return ret;
+       }
+
        ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
        if (ret)
                return ret;
@@ -1526,18 +1545,6 @@ static int hub_configure(struct usb_hub *hub,
                dev_dbg(hub_dev, "%umA bus power budget for each child\n",
                                hub->mA_per_port);
 
-       /* Update the HCD's internal representation of this hub before khubd
-        * starts getting port status changes for devices under the hub.
-        */
-       if (hcd->driver->update_hub_device) {
-               ret = hcd->driver->update_hub_device(hcd, hdev,
-                               &hub->tt, GFP_KERNEL);
-               if (ret < 0) {
-                       message = "can't update HCD hub info";
-                       goto fail;
-               }
-       }
-
        ret = hub_hub_status(hub, &hubstatus, &hubchange);
        if (ret < 0) {
                message = "can't get hub status";
@@ -1589,10 +1596,28 @@ static int hub_configure(struct usb_hub *hub,
                }
        }
        hdev->maxchild = i;
+       for (i = 0; i < hdev->maxchild; i++) {
+               struct usb_port *port_dev = hub->ports[i];
+
+               pm_runtime_put(&port_dev->dev);
+       }
+
        mutex_unlock(&usb_port_peer_mutex);
        if (ret < 0)
                goto fail;
 
+       /* Update the HCD's internal representation of this hub before khubd
+        * starts getting port status changes for devices under the hub.
+        */
+       if (hcd->driver->update_hub_device) {
+               ret = hcd->driver->update_hub_device(hcd, hdev,
+                               &hub->tt, GFP_KERNEL);
+               if (ret < 0) {
+                       message = "can't update HCD hub info";
+                       goto fail;
+               }
+       }
+
        usb_hub_adjust_deviceremovable(hdev, hub->descriptor);
 
        hub_activate(hub, HUB_INIT);
@@ -3458,7 +3483,8 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
                struct usb_device *udev = port_dev->child;
 
                if (udev && udev->can_submit) {
-                       dev_warn(&port_dev->dev, "not suspended yet\n");
+                       dev_warn(&port_dev->dev, "device %s not suspended yet\n",
+                                       dev_name(&udev->dev));
                        if (PMSG_IS_AUTO(msg))
                                return -EBUSY;
                }
index 0a7cdc0..326308e 100644 (file)
@@ -84,6 +84,7 @@ struct usb_hub {
  * @dev: generic device interface
  * @port_owner: port's owner
  * @peer: related usb2 and usb3 ports (share the same connector)
+ * @req: default pm qos request for hubs without port power control
  * @connect_type: port's connect type
  * @location: opaque representation of platform connector location
  * @status_lock: synchronize port_event() vs usb_port_{suspend|resume}
@@ -95,6 +96,7 @@ struct usb_port {
        struct device dev;
        struct usb_dev_state *port_owner;
        struct usb_port *peer;
+       struct dev_pm_qos_request *req;
        enum usb_port_connect_type connect_type;
        usb_port_location_t location;
        struct mutex status_lock;
index 62036fa..fe1b6d0 100644 (file)
@@ -21,6 +21,8 @@
 
 #include "hub.h"
 
+static int usb_port_block_power_off;
+
 static const struct attribute_group *port_dev_group[];
 
 static ssize_t connect_type_show(struct device *dev,
@@ -66,6 +68,7 @@ static void usb_port_device_release(struct device *dev)
 {
        struct usb_port *port_dev = to_usb_port(dev);
 
+       kfree(port_dev->req);
        kfree(port_dev);
 }
 
@@ -142,6 +145,9 @@ static int usb_port_runtime_suspend(struct device *dev)
                        == PM_QOS_FLAGS_ALL)
                return -EAGAIN;
 
+       if (usb_port_block_power_off)
+               return -EBUSY;
+
        usb_autopm_get_interface(intf);
        retval = usb_hub_set_port_power(hdev, hub, port1, false);
        usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
@@ -190,11 +196,19 @@ static int link_peers(struct usb_port *left, struct usb_port *right)
        if (left->peer || right->peer) {
                struct usb_port *lpeer = left->peer;
                struct usb_port *rpeer = right->peer;
-
-               WARN(1, "failed to peer %s and %s (%s -> %p) (%s -> %p)\n",
-                       dev_name(&left->dev), dev_name(&right->dev),
-                       dev_name(&left->dev), lpeer,
-                       dev_name(&right->dev), rpeer);
+               char *method;
+
+               if (left->location && left->location == right->location)
+                       method = "location";
+               else
+                       method = "default";
+
+               pr_warn("usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\n",
+                       dev_name(&left->dev), dev_name(&right->dev), method,
+                       dev_name(&left->dev),
+                       lpeer ? dev_name(&lpeer->dev) : "none",
+                       dev_name(&right->dev),
+                       rpeer ? dev_name(&rpeer->dev) : "none");
                return -EBUSY;
        }
 
@@ -251,6 +265,7 @@ static void link_peers_report(struct usb_port *left, struct usb_port *right)
                dev_warn(&left->dev, "failed to peer to %s (%d)\n",
                                dev_name(&right->dev), rc);
                pr_warn_once("usb: port power management may be unreliable\n");
+               usb_port_block_power_off = 1;
        }
 }
 
@@ -386,9 +401,13 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
        int retval;
 
        port_dev = kzalloc(sizeof(*port_dev), GFP_KERNEL);
-       if (!port_dev) {
-               retval = -ENOMEM;
-               goto exit;
+       if (!port_dev)
+               return -ENOMEM;
+
+       port_dev->req = kzalloc(sizeof(*(port_dev->req)), GFP_KERNEL);
+       if (!port_dev->req) {
+               kfree(port_dev);
+               return -ENOMEM;
        }
 
        hub->ports[port1 - 1] = port_dev;
@@ -404,31 +423,53 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
                        port1);
        mutex_init(&port_dev->status_lock);
        retval = device_register(&port_dev->dev);
-       if (retval)
-               goto error_register;
+       if (retval) {
+               put_device(&port_dev->dev);
+               return retval;
+       }
+
+       /* Set default policy of port-poweroff disabled. */
+       retval = dev_pm_qos_add_request(&port_dev->dev, port_dev->req,
+                       DEV_PM_QOS_FLAGS, PM_QOS_FLAG_NO_POWER_OFF);
+       if (retval < 0) {
+               device_unregister(&port_dev->dev);
+               return retval;
+       }
 
        find_and_link_peer(hub, port1);
 
+       /*
+        * Enable runtime pm and hold a refernce that hub_configure()
+        * will drop once the PM_QOS_NO_POWER_OFF flag state has been set
+        * and the hub has been fully registered (hdev->maxchild set).
+        */
        pm_runtime_set_active(&port_dev->dev);
+       pm_runtime_get_noresume(&port_dev->dev);
+       pm_runtime_enable(&port_dev->dev);
+       device_enable_async_suspend(&port_dev->dev);
 
        /*
-        * Do not enable port runtime pm if the hub does not support
-        * power switching.  Also, userspace must have final say of
-        * whether a port is permitted to power-off.  Do not enable
-        * runtime pm if we fail to expose pm_qos_no_power_off.
+        * Keep hidden the ability to enable port-poweroff if the hub
+        * does not support power switching.
         */
-       if (hub_is_port_power_switchable(hub)
-                       && dev_pm_qos_expose_flags(&port_dev->dev,
-                       PM_QOS_FLAG_NO_POWER_OFF) == 0)
-               pm_runtime_enable(&port_dev->dev);
+       if (!hub_is_port_power_switchable(hub))
+               return 0;
 
-       device_enable_async_suspend(&port_dev->dev);
-       return 0;
+       /* Attempt to let userspace take over the policy. */
+       retval = dev_pm_qos_expose_flags(&port_dev->dev,
+                       PM_QOS_FLAG_NO_POWER_OFF);
+       if (retval < 0) {
+               dev_warn(&port_dev->dev, "failed to expose pm_qos_no_poweroff\n");
+               return 0;
+       }
 
-error_register:
-       put_device(&port_dev->dev);
-exit:
-       return retval;
+       /* Userspace owns the policy, drop the kernel 'no_poweroff' request. */
+       retval = dev_pm_qos_remove_request(port_dev->req);
+       if (retval >= 0) {
+               kfree(port_dev->req);
+               port_dev->req = NULL;
+       }
+       return 0;
 }
 
 void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
index 8eb996e..261c3b4 100644 (file)
@@ -45,6 +45,7 @@ comment "Platform Glue Driver Support"
 config USB_DWC3_OMAP
        tristate "Texas Instruments OMAP5 and similar Platforms"
        depends on EXTCON && (ARCH_OMAP2PLUS || COMPILE_TEST)
+       depends on OF
        default USB_DWC3
        help
          Some platforms from Texas Instruments like OMAP5, DRA7xxx and
index 4af4c35..07a736a 100644 (file)
@@ -322,7 +322,7 @@ static int dwc3_omap_remove_core(struct device *dev, void *c)
 {
        struct platform_device *pdev = to_platform_device(dev);
 
-       platform_device_unregister(pdev);
+       of_device_unregister(pdev);
 
        return 0;
 }
@@ -599,7 +599,7 @@ static int dwc3_omap_prepare(struct device *dev)
 {
        struct dwc3_omap        *omap = dev_get_drvdata(dev);
 
-       dwc3_omap_disable_irqs(omap);
+       dwc3_omap_write_irqmisc_set(omap, 0x00);
 
        return 0;
 }
@@ -607,8 +607,19 @@ static int dwc3_omap_prepare(struct device *dev)
 static void dwc3_omap_complete(struct device *dev)
 {
        struct dwc3_omap        *omap = dev_get_drvdata(dev);
+       u32                     reg;
 
-       dwc3_omap_enable_irqs(omap);
+       reg = (USBOTGSS_IRQMISC_OEVT |
+                       USBOTGSS_IRQMISC_DRVVBUS_RISE |
+                       USBOTGSS_IRQMISC_CHRGVBUS_RISE |
+                       USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
+                       USBOTGSS_IRQMISC_IDPULLUP_RISE |
+                       USBOTGSS_IRQMISC_DRVVBUS_FALL |
+                       USBOTGSS_IRQMISC_CHRGVBUS_FALL |
+                       USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
+                       USBOTGSS_IRQMISC_IDPULLUP_FALL);
+
+       dwc3_omap_write_irqmisc_set(omap, reg);
 }
 
 static int dwc3_omap_suspend(struct device *dev)
index 9d64dd0..dab7927 100644 (file)
@@ -828,10 +828,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
                        length, last ? " last" : "",
                        chain ? " chain" : "");
 
-       /* Skip the LINK-TRB on ISOC */
-       if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
-                       usb_endpoint_xfer_isoc(dep->endpoint.desc))
-               dep->free_slot++;
 
        trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
 
@@ -843,6 +839,10 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
        }
 
        dep->free_slot++;
+       /* Skip the LINK-TRB on ISOC */
+       if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
+                       usb_endpoint_xfer_isoc(dep->endpoint.desc))
+               dep->free_slot++;
 
        trb->size = DWC3_TRB_SIZE_LENGTH(length);
        trb->bpl = lower_32_bits(dma);
index 2ddcd63..9714214 100644 (file)
@@ -1145,15 +1145,15 @@ static struct configfs_item_operations interf_item_ops = {
        .store_attribute        = usb_os_desc_attr_store,
 };
 
-static ssize_t rndis_grp_compatible_id_show(struct usb_os_desc *desc,
-                                           char *page)
+static ssize_t interf_grp_compatible_id_show(struct usb_os_desc *desc,
+                                            char *page)
 {
        memcpy(page, desc->ext_compat_id, 8);
        return 8;
 }
 
-static ssize_t rndis_grp_compatible_id_store(struct usb_os_desc *desc,
-                                            const char *page, size_t len)
+static ssize_t interf_grp_compatible_id_store(struct usb_os_desc *desc,
+                                             const char *page, size_t len)
 {
        int l;
 
@@ -1171,20 +1171,20 @@ static ssize_t rndis_grp_compatible_id_store(struct usb_os_desc *desc,
        return len;
 }
 
-static struct usb_os_desc_attribute rndis_grp_attr_compatible_id =
+static struct usb_os_desc_attribute interf_grp_attr_compatible_id =
        __CONFIGFS_ATTR(compatible_id, S_IRUGO | S_IWUSR,
-                       rndis_grp_compatible_id_show,
-                       rndis_grp_compatible_id_store);
+                       interf_grp_compatible_id_show,
+                       interf_grp_compatible_id_store);
 
-static ssize_t rndis_grp_sub_compatible_id_show(struct usb_os_desc *desc,
-                                               char *page)
+static ssize_t interf_grp_sub_compatible_id_show(struct usb_os_desc *desc,
+                                                char *page)
 {
        memcpy(page, desc->ext_compat_id + 8, 8);
        return 8;
 }
 
-static ssize_t rndis_grp_sub_compatible_id_store(struct usb_os_desc *desc,
-                                                const char *page, size_t len)
+static ssize_t interf_grp_sub_compatible_id_store(struct usb_os_desc *desc,
+                                                 const char *page, size_t len)
 {
        int l;
 
@@ -1202,20 +1202,21 @@ static ssize_t rndis_grp_sub_compatible_id_store(struct usb_os_desc *desc,
        return len;
 }
 
-static struct usb_os_desc_attribute rndis_grp_attr_sub_compatible_id =
+static struct usb_os_desc_attribute interf_grp_attr_sub_compatible_id =
        __CONFIGFS_ATTR(sub_compatible_id, S_IRUGO | S_IWUSR,
-                       rndis_grp_sub_compatible_id_show,
-                       rndis_grp_sub_compatible_id_store);
+                       interf_grp_sub_compatible_id_show,
+                       interf_grp_sub_compatible_id_store);
 
 static struct configfs_attribute *interf_grp_attrs[] = {
-       &rndis_grp_attr_compatible_id.attr,
-       &rndis_grp_attr_sub_compatible_id.attr,
+       &interf_grp_attr_compatible_id.attr,
+       &interf_grp_attr_sub_compatible_id.attr,
        NULL
 };
 
 int usb_os_desc_prepare_interf_dir(struct config_group *parent,
                                   int n_interf,
                                   struct usb_os_desc **desc,
+                                  char **names,
                                   struct module *owner)
 {
        struct config_group **f_default_groups, *os_desc_group,
@@ -1257,8 +1258,8 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
                d = desc[n_interf];
                d->owner = owner;
                config_group_init_type_name(&d->group, "", interface_type);
-               config_item_set_name(&d->group.cg_item, "interface.%d",
-                                    n_interf);
+               config_item_set_name(&d->group.cg_item, "interface.%s",
+                                    names[n_interf]);
                interface_groups[n_interf] = &d->group;
        }
 
index a14ac79..36c468c 100644 (file)
@@ -8,6 +8,7 @@ void unregister_gadget_item(struct config_item *item);
 int usb_os_desc_prepare_interf_dir(struct config_group *parent,
                                   int n_interf,
                                   struct usb_os_desc **desc,
+                                  char **names,
                                   struct module *owner);
 
 static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item)
index 74202d6..8598c27 100644 (file)
@@ -1483,11 +1483,13 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
        ffs->ep0req->context = ffs;
 
        lang = ffs->stringtabs;
-       for (lang = ffs->stringtabs; *lang; ++lang) {
-               struct usb_string *str = (*lang)->strings;
-               int id = first_id;
-               for (; str->s; ++id, ++str)
-                       str->id = id;
+       if (lang) {
+               for (; *lang; ++lang) {
+                       struct usb_string *str = (*lang)->strings;
+                       int id = first_id;
+                       for (; str->s; ++id, ++str)
+                               str->id = id;
+               }
        }
 
        ffs->gadget = cdev->gadget;
index eed3ad8..9c41e95 100644 (file)
@@ -687,7 +687,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
                f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
                                           GFP_KERNEL);
                if (!f->os_desc_table)
-                       return PTR_ERR(f->os_desc_table);
+                       return -ENOMEM;
                f->os_desc_n = 1;
                f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc;
        }
@@ -905,6 +905,7 @@ static struct usb_function_instance *rndis_alloc_inst(void)
 {
        struct f_rndis_opts *opts;
        struct usb_os_desc *descs[1];
+       char *names[1];
 
        opts = kzalloc(sizeof(*opts), GFP_KERNEL);
        if (!opts)
@@ -922,8 +923,9 @@ static struct usb_function_instance *rndis_alloc_inst(void)
        INIT_LIST_HEAD(&opts->rndis_os_desc.ext_prop);
 
        descs[0] = &opts->rndis_os_desc;
+       names[0] = "rndis";
        usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
-                                      THIS_MODULE);
+                                      names, THIS_MODULE);
        config_group_init_type_name(&opts->func_inst.group, "",
                                    &rndis_func_type);
 
index 99a37ed..c7004ee 100644 (file)
@@ -1532,8 +1532,9 @@ static int gr_ep_enable(struct usb_ep *_ep,
                        "%s mode: multiple trans./microframe not valid\n",
                        (mode == 2 ? "Bulk" : "Control"));
                return -EINVAL;
-       } else if (nt == 0x11) {
-               dev_err(dev->dev, "Invalid value for trans./microframe\n");
+       } else if (nt == 0x3) {
+               dev_err(dev->dev,
+                       "Invalid value 0x3 for additional trans./microframe\n");
                return -EINVAL;
        } else if ((nt + 1) * max > buffer_size) {
                dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
index ee6c164..2e4ce77 100644 (file)
@@ -1264,8 +1264,13 @@ dev_release (struct inode *inode, struct file *fd)
 
        kfree (dev->buf);
        dev->buf = NULL;
-       put_dev (dev);
 
+       /* other endpoints were all decoupled from this device */
+       spin_lock_irq(&dev->lock);
+       dev->state = STATE_DEV_DISABLED;
+       spin_unlock_irq(&dev->lock);
+
+       put_dev (dev);
        return 0;
 }
 
index 3d78a88..97b0277 100644 (file)
@@ -1120,7 +1120,10 @@ void gether_disconnect(struct gether *link)
 
        DBG(dev, "%s\n", __func__);
 
+       netif_tx_lock(dev->net);
        netif_stop_queue(dev->net);
+       netif_tx_unlock(dev->net);
+
        netif_carrier_off(dev->net);
 
        /* disable endpoints, forcing (synchronous) completion
index 61b7817..03314f8 100644 (file)
@@ -176,7 +176,7 @@ config USB_EHCI_HCD_AT91
 
 config USB_EHCI_MSM
        tristate "Support for Qualcomm QSD/MSM on-chip EHCI USB controller"
-       depends on ARCH_MSM
+       depends on ARCH_MSM || ARCH_QCOM
        select USB_EHCI_ROOT_HUB_TT
        ---help---
          Enables support for the USB Host controller present on the
index 4a6d3dd..2f3aceb 100644 (file)
@@ -656,6 +656,14 @@ static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
                        DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
                },
        },
+       {
+               /* HASEE E200 */
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
+                       DMI_MATCH(DMI_BOARD_NAME, "E210"),
+                       DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
+               },
+       },
        { }
 };
 
@@ -665,9 +673,14 @@ static void ehci_bios_handoff(struct pci_dev *pdev,
 {
        int try_handoff = 1, tried_handoff = 0;
 
-       /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
-        * the handoff on its unused controller.  Skip it. */
-       if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
+       /*
+        * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
+        * the handoff on its unused controller.  Skip it.
+        *
+        * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
+        */
+       if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
+                       pdev->device == 0x27cc)) {
                if (dmi_check_system(ehci_dmi_nohandoff_table))
                        try_handoff = 0;
        }
index 6231ce6..aa79e87 100644 (file)
@@ -22,6 +22,7 @@
 
 
 #include <linux/slab.h>
+#include <linux/device.h>
 #include <asm/unaligned.h>
 
 #include "xhci.h"
@@ -287,7 +288,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
                if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
                        struct xhci_command *command;
                        command = xhci_alloc_command(xhci, false, false,
-                                                    GFP_NOIO);
+                                                    GFP_NOWAIT);
                        if (!command) {
                                spin_unlock_irqrestore(&xhci->lock, flags);
                                xhci_free_command(xhci, cmd);
@@ -1139,7 +1140,9 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                 * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME
                 * is enabled, so also enable remote wake here.
                 */
-               if (hcd->self.root_hub->do_remote_wakeup) {
+               if (hcd->self.root_hub->do_remote_wakeup
+                               && device_may_wakeup(hcd->self.controller)) {
+
                        if (t1 & PORT_CONNECT) {
                                t2 |= PORT_WKOC_E | PORT_WKDISC_E;
                                t2 &= ~PORT_WKCONN_E;
index d67ff71..749fc68 100644 (file)
@@ -1433,8 +1433,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
                xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
                break;
        case TRB_RESET_DEV:
-               WARN_ON(slot_id != TRB_TO_SLOT_ID(
-                               le32_to_cpu(cmd_trb->generic.field[3])));
+               /* SLOT_ID field in reset device cmd completion event TRB is 0.
+                * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
+                */
+               slot_id = TRB_TO_SLOT_ID(
+                               le32_to_cpu(cmd_trb->generic.field[3]));
                xhci_handle_cmd_reset_dev(xhci, slot_id, event);
                break;
        case TRB_NEC_GET_FW:
@@ -3534,7 +3537,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
                return 0;
 
        max_burst = urb->ep->ss_ep_comp.bMaxBurst;
-       return roundup(total_packet_count, max_burst + 1) - 1;
+       return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
 }
 
 /*
index 2b8d9a2..7436d5f 100644 (file)
@@ -936,7 +936,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
  */
 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 {
-       u32                     command, temp = 0;
+       u32                     command, temp = 0, status;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        struct usb_hcd          *secondary_hcd;
        int                     retval = 0;
@@ -1054,8 +1054,12 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 
  done:
        if (retval == 0) {
-               usb_hcd_resume_root_hub(hcd);
-               usb_hcd_resume_root_hub(xhci->shared_hcd);
+               /* Resume root hubs only when have pending events. */
+               status = readl(&xhci->op_regs->status);
+               if (status & STS_EINT) {
+                       usb_hcd_resume_root_hub(hcd);
+                       usb_hcd_resume_root_hub(xhci->shared_hcd);
+               }
        }
 
        /*
index 51a6da2..829f446 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/moduleparam.h>
 #include <linux/scatterlist.h>
 #include <linux/mutex.h>
-
+#include <linux/timer.h>
 #include <linux/usb.h>
 
 #define SIMPLE_IO_TIMEOUT      10000   /* in milliseconds */
@@ -484,6 +484,14 @@ alloc_sglist(int nents, int max, int vary)
        return sg;
 }
 
+static void sg_timeout(unsigned long _req)
+{
+       struct usb_sg_request   *req = (struct usb_sg_request *) _req;
+
+       req->status = -ETIMEDOUT;
+       usb_sg_cancel(req);
+}
+
 static int perform_sglist(
        struct usbtest_dev      *tdev,
        unsigned                iterations,
@@ -495,6 +503,9 @@ static int perform_sglist(
 {
        struct usb_device       *udev = testdev_to_usbdev(tdev);
        int                     retval = 0;
+       struct timer_list       sg_timer;
+
+       setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
 
        while (retval == 0 && iterations-- > 0) {
                retval = usb_sg_init(req, udev, pipe,
@@ -505,7 +516,10 @@ static int perform_sglist(
 
                if (retval)
                        break;
+               mod_timer(&sg_timer, jiffies +
+                               msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
                usb_sg_wait(req);
+               del_timer_sync(&sg_timer);
                retval = req->status;
 
                /* FIXME check resulting data pattern */
index d235378..1e58ed2 100644 (file)
@@ -19,21 +19,6 @@ err:
        return ret;
 }
 
-static int of_remove_populated_child(struct device *dev, void *d)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-
-       of_device_unregister(pdev);
-       return 0;
-}
-
-static int am335x_child_remove(struct platform_device *pdev)
-{
-       device_for_each_child(&pdev->dev, NULL, of_remove_populated_child);
-       pm_runtime_disable(&pdev->dev);
-       return 0;
-}
-
 static const struct of_device_id am335x_child_of_match[] = {
        { .compatible = "ti,am33xx-usb" },
        {  },
@@ -42,13 +27,17 @@ MODULE_DEVICE_TABLE(of, am335x_child_of_match);
 
 static struct platform_driver am335x_child_driver = {
        .probe          = am335x_child_probe,
-       .remove         = am335x_child_remove,
        .driver         = {
                .name   = "am335x-usb-childs",
                .of_match_table = am335x_child_of_match,
        },
 };
 
-module_platform_driver(am335x_child_driver);
+static int __init am335x_child_init(void)
+{
+       return platform_driver_register(&am335x_child_driver);
+}
+module_init(am335x_child_init);
+
 MODULE_DESCRIPTION("AM33xx child devices");
 MODULE_LICENSE("GPL v2");
index 61da471..eff3c5c 100644 (file)
@@ -849,7 +849,7 @@ b_host:
        }
 
        /* handle babble condition */
-       if (int_usb & MUSB_INTR_BABBLE)
+       if (int_usb & MUSB_INTR_BABBLE && is_host_active(musb))
                schedule_work(&musb->recover_work);
 
 #if 0
index 7b8bbf5..5341bb2 100644 (file)
@@ -318,7 +318,7 @@ static void cppi41_dma_callback(void *private_data)
                }
                list_add_tail(&cppi41_channel->tx_check,
                                &controller->early_tx_list);
-               if (!hrtimer_active(&controller->early_tx)) {
+               if (!hrtimer_is_queued(&controller->early_tx)) {
                        hrtimer_start_range_ns(&controller->early_tx,
                                ktime_set(0, 140 * NSEC_PER_USEC),
                                40 * NSEC_PER_USEC,
index 51beb13..09529f9 100644 (file)
@@ -494,10 +494,9 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
        struct dsps_glue *glue = dev_get_drvdata(dev->parent);
        const struct dsps_musb_wrapper *wrp = glue->wrp;
        void __iomem *ctrl_base = musb->ctrl_base;
-       void __iomem *base = musb->mregs;
        u32 reg;
 
-       reg = dsps_readl(base, wrp->mode);
+       reg = dsps_readl(ctrl_base, wrp->mode);
 
        switch (mode) {
        case MUSB_HOST:
@@ -510,7 +509,7 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
                 */
                reg |= (1 << wrp->iddig_mux);
 
-               dsps_writel(base, wrp->mode, reg);
+               dsps_writel(ctrl_base, wrp->mode, reg);
                dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
                break;
        case MUSB_PERIPHERAL:
@@ -523,10 +522,10 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
                 */
                reg |= (1 << wrp->iddig_mux);
 
-               dsps_writel(base, wrp->mode, reg);
+               dsps_writel(ctrl_base, wrp->mode, reg);
                break;
        case MUSB_OTG:
-               dsps_writel(base, wrp->phy_utmi, 0x02);
+               dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
                break;
        default:
                dev_err(glue->dev, "unsupported mode %d\n", mode);
index c2e45e6..f202e50 100644 (file)
@@ -274,7 +274,6 @@ static int ux500_probe(struct platform_device *pdev)
        musb->dev.parent                = &pdev->dev;
        musb->dev.dma_mask              = &pdev->dev.coherent_dma_mask;
        musb->dev.coherent_dma_mask     = pdev->dev.coherent_dma_mask;
-       musb->dev.of_node               = pdev->dev.of_node;
 
        glue->dev                       = &pdev->dev;
        glue->musb                      = musb;
index ced34f3..c929370 100644 (file)
@@ -1229,7 +1229,9 @@ static void msm_otg_sm_work(struct work_struct *w)
                        motg->chg_state = USB_CHG_STATE_UNDEFINED;
                        motg->chg_type = USB_INVALID_CHARGER;
                }
-               pm_runtime_put_sync(otg->phy->dev);
+
+               if (otg->phy->state == OTG_STATE_B_IDLE)
+                       pm_runtime_put_sync(otg->phy->dev);
                break;
        case OTG_STATE_B_PERIPHERAL:
                dev_dbg(otg->phy->dev, "OTG_STATE_B_PERIPHERAL state\n");
index d49f9c3..4fd3653 100644 (file)
@@ -681,6 +681,14 @@ usbhs_fifo_read_end:
                usbhs_pipe_number(pipe),
                pkt->length, pkt->actual, *is_done, pkt->zero);
 
+       /*
+        * Transmission end
+        */
+       if (*is_done) {
+               if (usbhs_pipe_is_dcp(pipe))
+                       usbhs_dcp_control_transfer_done(pipe);
+       }
+
 usbhs_fifo_read_busy:
        usbhsf_fifo_unselect(pipe, fifo);
 
index 762e4a5..330df5c 100644 (file)
@@ -153,6 +153,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
        { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+       { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
        { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
        { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
        { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
index edf3b12..8a3813b 100644 (file)
@@ -720,7 +720,8 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
-       { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) },
+       { USB_DEVICE(TESTO_VID, TESTO_1_PID) },
+       { USB_DEVICE(TESTO_VID, TESTO_3_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) },
@@ -944,6 +945,8 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
+       /* Infineon Devices */
+       { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
        { }                                     /* Terminating entry */
 };
 
@@ -1566,14 +1569,17 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
        struct usb_device *udev = serial->dev;
 
        struct usb_interface *interface = serial->interface;
-       struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
+       struct usb_endpoint_descriptor *ep_desc;
 
        unsigned num_endpoints;
-       int i;
+       unsigned i;
 
        num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
        dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
 
+       if (!num_endpoints)
+               return;
+
        /* NOTE: some customers have programmed FT232R/FT245R devices
         * with an endpoint size of 0 - not good.  In this case, we
         * want to override the endpoint descriptor setting and use a
index 500474c..c4777bc 100644 (file)
 #define RATOC_VENDOR_ID                0x0584
 #define RATOC_PRODUCT_ID_USB60F        0xb020
 
+/*
+ * Infineon Technologies
+ */
+#define INFINEON_VID           0x058b
+#define INFINEON_TRIBOARD_PID  0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+
 /*
  * Acton Research Corp.
  */
  * Submitted by Colin Leroy
  */
 #define TESTO_VID                      0x128D
-#define TESTO_USB_INTERFACE_PID                0x0001
+#define TESTO_1_PID                    0x0001
+#define TESTO_3_PID                    0x0003
 
 /*
  * Mobility Electronics products.
index 59c3108..a968894 100644 (file)
@@ -352,6 +352,9 @@ static void option_instat_callback(struct urb *urb);
 /* Zoom */
 #define ZOOM_PRODUCT_4597                      0x9607
 
+/* SpeedUp SU9800 usb 3g modem */
+#define SPEEDUP_PRODUCT_SU9800                 0x9800
+
 /* Haier products */
 #define HAIER_VENDOR_ID                                0x201e
 #define HAIER_PRODUCT_CE100                    0x2009
@@ -372,8 +375,12 @@ static void option_instat_callback(struct urb *urb);
 /* Olivetti products */
 #define OLIVETTI_VENDOR_ID                     0x0b3c
 #define OLIVETTI_PRODUCT_OLICARD100            0xc000
+#define OLIVETTI_PRODUCT_OLICARD120            0xc001
+#define OLIVETTI_PRODUCT_OLICARD140            0xc002
 #define OLIVETTI_PRODUCT_OLICARD145            0xc003
+#define OLIVETTI_PRODUCT_OLICARD155            0xc004
 #define OLIVETTI_PRODUCT_OLICARD200            0xc005
+#define OLIVETTI_PRODUCT_OLICARD160            0xc00a
 #define OLIVETTI_PRODUCT_OLICARD500            0xc00b
 
 /* Celot products */
@@ -1480,6 +1487,8 @@ static const struct usb_device_id option_ids[] = {
                .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff),  /* ZTE MF91 */
                .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
+               .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1577,6 +1586,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
          .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
        },
+       { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
        { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -1611,15 +1621,21 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
        { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
        { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
-
-       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
+               .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
-               .driver_info = (kernel_ulong_t)&net_intf6_blacklist
-       },
+               .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
+               .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
-               .driver_info = (kernel_ulong_t)&net_intf4_blacklist
-       },
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
        { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
index 9d38ddc..866b5df 100644 (file)
@@ -256,6 +256,10 @@ static int slave_configure(struct scsi_device *sdev)
                if (us->fflags & US_FL_WRITE_CACHE)
                        sdev->wce_default_on = 1;
 
+               /* A few buggy USB-ATA bridges don't understand FUA */
+               if (us->fflags & US_FL_BROKEN_FUA)
+                       sdev->broken_fua = 1;
+
        } else {
 
                /* Non-disk-type devices don't need to blacklist any pages
index 174a447..80a5b36 100644 (file)
@@ -1936,6 +1936,13 @@ UNUSUAL_DEV(  0x14cd, 0x6600, 0x0201, 0x0201,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Michael Büsch <m@bues.ch> */
+UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0114,
+               "JMicron",
+               "USB to ATA/ATAPI Bridge",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BROKEN_FUA ),
+
 /* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
  * JMicron responds to USN and several other SCSI ioctls with a
  * residue that causes subsequent I/O requests to fail.  */
index 971a760..8dae2f7 100644 (file)
@@ -700,14 +700,6 @@ static void handle_rx_net(struct vhost_work *work)
        handle_rx(net);
 }
 
-static void vhost_net_free(void *addr)
-{
-       if (is_vmalloc_addr(addr))
-               vfree(addr);
-       else
-               kfree(addr);
-}
-
 static int vhost_net_open(struct inode *inode, struct file *f)
 {
        struct vhost_net *n;
@@ -723,7 +715,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
        }
        vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
        if (!vqs) {
-               vhost_net_free(n);
+               kvfree(n);
                return -ENOMEM;
        }
 
@@ -840,7 +832,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
         * since jobs can re-queue themselves. */
        vhost_net_flush(n);
        kfree(n->dev.vqs);
-       vhost_net_free(n);
+       kvfree(n);
        return 0;
 }
 
index 4f4ffa4..69906ca 100644 (file)
@@ -1503,14 +1503,6 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
        return 0;
 }
 
-static void vhost_scsi_free(struct vhost_scsi *vs)
-{
-       if (is_vmalloc_addr(vs))
-               vfree(vs);
-       else
-               kfree(vs);
-}
-
 static int vhost_scsi_open(struct inode *inode, struct file *f)
 {
        struct vhost_scsi *vs;
@@ -1550,7 +1542,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
        return 0;
 
 err_vqs:
-       vhost_scsi_free(vs);
+       kvfree(vs);
 err_vs:
        return r;
 }
@@ -1569,7 +1561,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
        /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
        vhost_scsi_flush(vs);
        kfree(vs->dev.vqs);
-       vhost_scsi_free(vs);
+       kvfree(vs);
        return 0;
 }
 
index b63860f..40bec8d 100644 (file)
@@ -77,3 +77,4 @@ const struct consw dummy_con = {
     .con_set_palette = DUMMY,
     .con_scrolldelta = DUMMY,
 };
+EXPORT_SYMBOL_GPL(dummy_con);
index f267284..6e6aa70 100644 (file)
@@ -1441,5 +1441,6 @@ const struct consw vga_con = {
        .con_build_attr = vgacon_build_attr,
        .con_invert_region = vgacon_invert_region,
 };
+EXPORT_SYMBOL(vga_con);
 
 MODULE_LICENSE("GPL");
index e683b6e..d36e830 100644 (file)
@@ -1057,6 +1057,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
                goto put_display_node;
        }
 
+       INIT_LIST_HEAD(&pdata->pwr_gpios);
        ret = -ENOMEM;
        for (i = 0; i < of_gpio_named_count(display_np, "atmel,power-control-gpio"); i++) {
                gpio = of_get_named_gpio_flags(display_np, "atmel,power-control-gpio",
@@ -1082,6 +1083,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
                        dev_err(dev, "set direction output gpio %d failed\n", gpio);
                        goto put_display_node;
                }
+               list_add(&og->list, &pdata->pwr_gpios);
        }
 
        if (is_gpio_power)
index a54f7f7..8fe41ca 100644 (file)
@@ -408,7 +408,7 @@ static int bfin_adv7393_fb_probe(struct i2c_client *client,
        /* Workaround "PPI Does Not Start Properly In Specific Mode" */
        if (ANOMALY_05000400) {
                ret = gpio_request_one(P_IDENT(P_PPI0_FS3), GPIOF_OUT_INIT_LOW,
-                                       "PPI0_FS3")
+                                       "PPI0_FS3");
                if (ret) {
                        dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n");
                        ret = -EBUSY;
index ae9618f..982f6ab 100644 (file)
@@ -19,8 +19,6 @@
 
 static bool request_mem_succeeded = false;
 
-static struct pci_dev *default_vga;
-
 static struct fb_var_screeninfo efifb_defined = {
        .activate               = FB_ACTIVATE_NOW,
        .height                 = -1,
@@ -84,23 +82,10 @@ static struct fb_ops efifb_ops = {
        .fb_imageblit   = cfb_imageblit,
 };
 
-struct pci_dev *vga_default_device(void)
-{
-       return default_vga;
-}
-
-EXPORT_SYMBOL_GPL(vga_default_device);
-
-void vga_set_default_device(struct pci_dev *pdev)
-{
-       default_vga = pdev;
-}
-
 static int efifb_setup(char *options)
 {
        char *this_opt;
        int i;
-       struct pci_dev *dev = NULL;
 
        if (options && *options) {
                while ((this_opt = strsep(&options, ",")) != NULL) {
@@ -126,30 +111,6 @@ static int efifb_setup(char *options)
                }
        }
 
-       for_each_pci_dev(dev) {
-               int i;
-
-               if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
-                       continue;
-
-               for (i=0; i < DEVICE_COUNT_RESOURCE; i++) {
-                       resource_size_t start, end;
-
-                       if (!(pci_resource_flags(dev, i) & IORESOURCE_MEM))
-                               continue;
-
-                       start = pci_resource_start(dev, i);
-                       end  = pci_resource_end(dev, i);
-
-                       if (!start || !end)
-                               continue;
-
-                       if (screen_info.lfb_base >= start &&
-                           (screen_info.lfb_base + screen_info.lfb_size) < end)
-                               default_vga = dev;
-               }
-       }
-
        return 0;
 }
 
index 7d44d66..43a0a52 100644 (file)
@@ -91,15 +91,6 @@ extern boot_infos_t *boot_infos;
 #define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN        0x6cd4
 #define AVIVO_DC_LUTB_WHITE_OFFSET_RED          0x6cd8
 
-#define FB_RIGHT_POS(p, bpp)         (fb_be_math(p) ? 0 : (32 - (bpp)))
-
-static inline u32 offb_cmap_byteswap(struct fb_info *info, u32 value)
-{
-       u32 bpp = info->var.bits_per_pixel;
-
-       return cpu_to_be32(value) >> FB_RIGHT_POS(info, bpp);
-}
-
     /*
      *  Set a single color register. The values supplied are already
      *  rounded down to the hardware's capabilities (according to the
@@ -129,7 +120,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
                        mask <<= info->var.transp.offset;
                        value |= mask;
                }
-               pal[regno] = offb_cmap_byteswap(info, value);
+               pal[regno] = value;
                return 0;
        }
 
index 99af9e8..2f0822e 100644 (file)
@@ -121,9 +121,11 @@ static void __init omapdss_add_to_list(struct device_node *node, bool root)
 {
        struct dss_conv_node *n = kmalloc(sizeof(struct dss_conv_node),
                GFP_KERNEL);
-       n->node = node;
-       n->root = root;
-       list_add(&n->list, &dss_conv_list);
+       if (n) {
+               n->node = node;
+               n->root = root;
+               list_add(&n->list, &dss_conv_list);
+       }
 }
 
 static bool __init omapdss_list_contains(const struct device_node *node)
index a8f2b28..a1134c3 100644 (file)
@@ -474,8 +474,6 @@ static int vt8500lcd_remove(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        release_mem_region(res->start, resource_size(res));
 
-       kfree(fbi);
-
        return 0;
 }
 
index 67b067a..a5df5e8 100644 (file)
@@ -66,7 +66,7 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
 
                udelay(100);
        }
-       return !!(reg_val & MXC_W1_CONTROL_PST);
+       return !(reg_val & MXC_W1_CONTROL_PST);
 }
 
 /*
index c845527..76dd541 100644 (file)
@@ -1280,14 +1280,17 @@ config WATCHDOG_RTAS
 
 # S390 Architecture
 
-config ZVM_WATCHDOG
-       tristate "z/VM Watchdog Timer"
+config DIAG288_WATCHDOG
+       tristate "System z diag288 Watchdog"
        depends on S390
+       select WATCHDOG_CORE
        help
          IBM s/390 and zSeries machines running under z/VM 5.1 or later
          provide a virtual watchdog timer to their guest that cause a
          user define Control Program command to be executed after a
          timeout.
+         LPAR provides a very similar interface. This driver handles
+         both.
 
          To compile this driver as a module, choose M here. The module
          will be called vmwatchdog.
index 7b8a91e..468c320 100644 (file)
@@ -153,6 +153,7 @@ obj-$(CONFIG_MEN_A21_WDT) += mena21_wdt.o
 obj-$(CONFIG_WATCHDOG_RTAS) += wdrtas.o
 
 # S390 Architecture
+obj-$(CONFIG_DIAG288_WATCHDOG) += diag288_wdt.o
 
 # SUPERH (sh + sh64) Architecture
 obj-$(CONFIG_SH_WDT) += shwdt.o
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
new file mode 100644 (file)
index 0000000..429494b
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+ * Watchdog driver for z/VM and LPAR using the diag 288 interface.
+ *
+ * Under z/VM, expiration of the watchdog will send a "system restart" command
+ * to CP.
+ *
+ * The command can be altered using the module parameter "cmd". This is
+ * not recommended because it's only supported on z/VM but not whith LPAR.
+ *
+ * On LPAR, the watchdog will always trigger a system restart. the module
+ * paramter cmd is meaningless here.
+ *
+ *
+ * Copyright IBM Corp. 2004, 2013
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ *           Philipp Hachtmann (phacht@de.ibm.com)
+ *
+ */
+
+#define KMSG_COMPONENT "diag288_wdt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/suspend.h>
+#include <asm/ebcdic.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#define MAX_CMDLEN 240
+#define DEFAULT_CMD "SYSTEM RESTART"
+
+#define MIN_INTERVAL 15     /* Minimal time supported by diag88 */
+#define MAX_INTERVAL 3600   /* One hour should be enough - pure estimation */
+
+#define WDT_DEFAULT_TIMEOUT 30
+
+/* Function codes - init, change, cancel */
+#define WDT_FUNC_INIT 0
+#define WDT_FUNC_CHANGE 1
+#define WDT_FUNC_CANCEL 2
+#define WDT_FUNC_CONCEAL 0x80000000
+
+/* Action codes for LPAR watchdog */
+#define LPARWDT_RESTART 0
+
+static char wdt_cmd[MAX_CMDLEN] = DEFAULT_CMD;
+static bool conceal_on;
+static bool nowayout_info = WATCHDOG_NOWAYOUT;
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
+MODULE_AUTHOR("Philipp Hachtmann <phacht@de.ibm.com>");
+
+MODULE_DESCRIPTION("System z diag288  Watchdog Timer");
+
+module_param_string(cmd, wdt_cmd, MAX_CMDLEN, 0644);
+MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers (z/VM only)");
+
+module_param_named(conceal, conceal_on, bool, 0644);
+MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog is active (z/VM only)");
+
+module_param_named(nowayout, nowayout_info, bool, 0444);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default = CONFIG_WATCHDOG_NOWAYOUT)");
+
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("vmwatchdog");
+
+static int __diag288(unsigned int func, unsigned int timeout,
+                    unsigned long action, unsigned int len)
+{
+       register unsigned long __func asm("2") = func;
+       register unsigned long __timeout asm("3") = timeout;
+       register unsigned long __action asm("4") = action;
+       register unsigned long __len asm("5") = len;
+       int err;
+
+       err = -EINVAL;
+       asm volatile(
+               "       diag    %1, %3, 0x288\n"
+               "0:     la      %0, 0\n"
+               "1:\n"
+               EX_TABLE(0b, 1b)
+               : "+d" (err) : "d"(__func), "d"(__timeout),
+                 "d"(__action), "d"(__len) : "1", "cc");
+       return err;
+}
+
+static int __diag288_vm(unsigned int  func, unsigned int timeout,
+                       char *cmd, size_t len)
+{
+       return __diag288(func, timeout, virt_to_phys(cmd), len);
+}
+
+static int __diag288_lpar(unsigned int func, unsigned int timeout,
+                         unsigned long action)
+{
+       return __diag288(func, timeout, action, 0);
+}
+
+static int wdt_start(struct watchdog_device *dev)
+{
+       char *ebc_cmd;
+       size_t len;
+       int ret;
+       unsigned int func;
+
+       ret = -ENODEV;
+
+       if (MACHINE_IS_VM) {
+               ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
+               if (!ebc_cmd)
+                       return -ENOMEM;
+               len = strlcpy(ebc_cmd, wdt_cmd, MAX_CMDLEN);
+               ASCEBC(ebc_cmd, MAX_CMDLEN);
+               EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
+
+               func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
+                       : WDT_FUNC_INIT;
+               ret = __diag288_vm(func, dev->timeout, ebc_cmd, len);
+               WARN_ON(ret != 0);
+               kfree(ebc_cmd);
+       }
+
+       if (MACHINE_IS_LPAR) {
+               ret = __diag288_lpar(WDT_FUNC_INIT,
+                                    dev->timeout, LPARWDT_RESTART);
+       }
+
+       if (ret) {
+               pr_err("The watchdog cannot be activated\n");
+               return ret;
+       }
+       pr_info("The watchdog was activated\n");
+       return 0;
+}
+
+static int wdt_stop(struct watchdog_device *dev)
+{
+       int ret;
+
+       ret = __diag288(WDT_FUNC_CANCEL, 0, 0, 0);
+       pr_info("The watchdog was deactivated\n");
+       return ret;
+}
+
+static int wdt_ping(struct watchdog_device *dev)
+{
+       char *ebc_cmd;
+       size_t len;
+       int ret;
+       unsigned int func;
+
+       ret = -ENODEV;
+
+       if (MACHINE_IS_VM) {
+               ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
+               if (!ebc_cmd)
+                       return -ENOMEM;
+               len = strlcpy(ebc_cmd, wdt_cmd, MAX_CMDLEN);
+               ASCEBC(ebc_cmd, MAX_CMDLEN);
+               EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
+
+               /*
+                * It seems to be ok to z/VM to use the init function to
+                * retrigger the watchdog. On LPAR WDT_FUNC_CHANGE must
+                * be used when the watchdog is running.
+                */
+               func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
+                       : WDT_FUNC_INIT;
+
+               ret = __diag288_vm(func, dev->timeout, ebc_cmd, len);
+               WARN_ON(ret != 0);
+               kfree(ebc_cmd);
+       }
+
+       if (MACHINE_IS_LPAR)
+               ret = __diag288_lpar(WDT_FUNC_CHANGE, dev->timeout, 0);
+
+       if (ret)
+               pr_err("The watchdog timer cannot be started or reset\n");
+       return ret;
+}
+
+static int wdt_set_timeout(struct watchdog_device * dev, unsigned int new_to)
+{
+       dev->timeout = new_to;
+       return wdt_ping(dev);
+}
+
+static struct watchdog_ops wdt_ops = {
+       .owner = THIS_MODULE,
+       .start = wdt_start,
+       .stop = wdt_stop,
+       .ping = wdt_ping,
+       .set_timeout = wdt_set_timeout,
+};
+
+static struct watchdog_info wdt_info = {
+       .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
+       .firmware_version = 0,
+       .identity = "z Watchdog",
+};
+
+static struct watchdog_device wdt_dev = {
+       .parent = NULL,
+       .info = &wdt_info,
+       .ops = &wdt_ops,
+       .bootstatus = 0,
+       .timeout = WDT_DEFAULT_TIMEOUT,
+       .min_timeout = MIN_INTERVAL,
+       .max_timeout = MAX_INTERVAL,
+};
+
+/*
+ * It makes no sense to go into suspend while the watchdog is running.
+ * Depending on the memory size, the watchdog might trigger, while we
+ * are still saving the memory.
+ * We reuse the open flag to ensure that suspend and watchdog open are
+ * exclusive operations
+ */
+static int wdt_suspend(void)
+{
+       if (test_and_set_bit(WDOG_DEV_OPEN, &wdt_dev.status)) {
+               pr_err("Linux cannot be suspended while the watchdog is in use\n");
+               return notifier_from_errno(-EBUSY);
+       }
+       if (test_bit(WDOG_ACTIVE, &wdt_dev.status)) {
+               clear_bit(WDOG_DEV_OPEN, &wdt_dev.status);
+               pr_err("Linux cannot be suspended while the watchdog is in use\n");
+               return notifier_from_errno(-EBUSY);
+       }
+       return NOTIFY_DONE;
+}
+
+static int wdt_resume(void)
+{
+       clear_bit(WDOG_DEV_OPEN, &wdt_dev.status);
+       return NOTIFY_DONE;
+}
+
+static int wdt_power_event(struct notifier_block *this, unsigned long event,
+                          void *ptr)
+{
+       switch (event) {
+       case PM_POST_HIBERNATION:
+       case PM_POST_SUSPEND:
+               return wdt_resume();
+       case PM_HIBERNATION_PREPARE:
+       case PM_SUSPEND_PREPARE:
+               return wdt_suspend();
+       default:
+               return NOTIFY_DONE;
+       }
+}
+
+static struct notifier_block wdt_power_notifier = {
+       .notifier_call = wdt_power_event,
+};
+
+static int __init diag288_init(void)
+{
+       int ret;
+       char ebc_begin[] = {
+               194, 197, 199, 201, 213
+       };
+
+       watchdog_set_nowayout(&wdt_dev, nowayout_info);
+
+       if (MACHINE_IS_VM) {
+               pr_info("The watchdog device driver detected a z/VM environment\n");
+               if (__diag288_vm(WDT_FUNC_INIT, 15,
+                                ebc_begin, sizeof(ebc_begin)) != 0) {
+                       pr_err("The watchdog cannot be initialized\n");
+                       return -EINVAL;
+               }
+       } else if (MACHINE_IS_LPAR) {
+               pr_info("The watchdog device driver detected an LPAR environment\n");
+               if (__diag288_lpar(WDT_FUNC_INIT, 30, LPARWDT_RESTART)) {
+                       pr_err("The watchdog cannot be initialized\n");
+                       return -EINVAL;
+               }
+       } else {
+               pr_err("Linux runs in an environment that does not support the diag288 watchdog\n");
+               return -ENODEV;
+       }
+
+       if (__diag288_lpar(WDT_FUNC_CANCEL, 0, 0)) {
+               pr_err("The watchdog cannot be deactivated\n");
+               return -EINVAL;
+       }
+
+       ret = register_pm_notifier(&wdt_power_notifier);
+       if (ret)
+               return ret;
+
+       ret = watchdog_register_device(&wdt_dev);
+       if (ret)
+               unregister_pm_notifier(&wdt_power_notifier);
+
+       return ret;
+}
+
+static void __exit diag288_exit(void)
+{
+       watchdog_unregister_device(&wdt_dev);
+       unregister_pm_notifier(&wdt_power_notifier);
+}
+
+module_init(diag288_init);
+module_exit(diag288_exit);
index b7a506f..5c660c7 100644 (file)
@@ -426,20 +426,18 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
                 * p2m are consistent.
                 */
                if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-                       unsigned long p;
-                       struct page   *scratch_page = get_balloon_scratch_page();
-
                        if (!PageHighMem(page)) {
+                               struct page *scratch_page = get_balloon_scratch_page();
+
                                ret = HYPERVISOR_update_va_mapping(
                                                (unsigned long)__va(pfn << PAGE_SHIFT),
                                                pfn_pte(page_to_pfn(scratch_page),
                                                        PAGE_KERNEL_RO), 0);
                                BUG_ON(ret);
-                       }
-                       p = page_to_pfn(scratch_page);
-                       __set_phys_to_machine(pfn, pfn_to_mfn(p));
 
-                       put_balloon_scratch_page();
+                               put_balloon_scratch_page();
+                       }
+                       __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
                }
 #endif
 
index 6d325bd..eeba754 100644 (file)
@@ -1168,7 +1168,8 @@ int gnttab_resume(void)
 
 int gnttab_suspend(void)
 {
-       gnttab_interface->unmap_frames();
+       if (!xen_feature(XENFEAT_auto_translated_physmap))
+               gnttab_interface->unmap_frames();
        return 0;
 }
 
@@ -1194,18 +1195,20 @@ static int gnttab_expand(unsigned int req_entries)
 int gnttab_init(void)
 {
        int i;
+       unsigned long max_nr_grant_frames;
        unsigned int max_nr_glist_frames, nr_glist_frames;
        unsigned int nr_init_grefs;
        int ret;
 
        gnttab_request_version();
+       max_nr_grant_frames = gnttab_max_grant_frames();
        nr_grant_frames = 1;
 
        /* Determine the maximum number of frames required for the
         * grant reference free list on the current hypervisor.
         */
        BUG_ON(grefs_per_grant_frame == 0);
-       max_nr_glist_frames = (gnttab_max_grant_frames() *
+       max_nr_glist_frames = (max_nr_grant_frames *
                               grefs_per_grant_frame / RPP);
 
        gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
@@ -1222,6 +1225,11 @@ int gnttab_init(void)
                }
        }
 
+       ret = arch_gnttab_init(max_nr_grant_frames,
+                              nr_status_frames(max_nr_grant_frames));
+       if (ret < 0)
+               goto ini_nomem;
+
        if (gnttab_setup() < 0) {
                ret = -ENODEV;
                goto ini_nomem;
index c3667b2..5f1e1f3 100644 (file)
@@ -88,7 +88,6 @@ static int xen_suspend(void *data)
 
        if (!si->cancelled) {
                xen_irq_resume();
-               xen_console_resume();
                xen_timer_resume();
        }
 
@@ -135,6 +134,10 @@ static void do_suspend(void)
 
        err = stop_machine(xen_suspend, &si, cpumask_of(0));
 
+       /* Resume console as early as possible. */
+       if (!si.cancelled)
+               xen_console_resume();
+
        raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
 
        dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
index 6f3fd99..83eeddd 100644 (file)
@@ -46,13 +46,13 @@ struct zorro_manuf_info {
 #include "devlist.h"
 
 static struct zorro_manuf_info __initdata zorro_manuf_list[] = {
-#define MANUF( manuf, name )           { 0x##manuf, sizeof(__prods_##manuf) / sizeof(struct zorro_prod_info), __manufstr_##manuf, __prods_##manuf },
+#define MANUF( manuf, name )           { 0x##manuf, ARRAY_SIZE(__prods_##manuf), __manufstr_##manuf, __prods_##manuf },
 #define ENDMANUF()
 #define PRODUCT( manuf, prod, name )
 #include "devlist.h"
 };
 
-#define MANUFS (sizeof(zorro_manuf_list)/sizeof(struct zorro_manuf_info))
+#define MANUFS ARRAY_SIZE(zorro_manuf_list)
 
 void __init zorro_name_device(struct zorro_dev *dev)
 {
index 5747417..0862d34 100644 (file)
@@ -219,6 +219,12 @@ $(obj)/%.fw: $(obj)/%.H16 $(ihex2fw_dep)
 obj-y                           += $(patsubst %,%.gen.o, $(fw-external-y))
 obj-$(CONFIG_FIRMWARE_IN_KERNEL) += $(patsubst %,%.gen.o, $(fw-shipped-y))
 
+ifeq ($(KBUILD_SRC),)
+# Makefile.build only creates subdirectories for O= builds, but external
+# firmware might live outside the kernel source tree
+_dummy := $(foreach d,$(addprefix $(obj)/,$(dir $(fw-external-y))), $(shell [ -d $(d) ] || mkdir -p $(d)))
+endif
+
 # Remove .S files and binaries created from ihex
 # (during 'make clean' .config isn't included so they're all in $(fw-shipped-))
 targets := $(fw-shipped-) $(patsubst $(obj)/%,%, \
index 42dd2e4..35de0c0 100644 (file)
@@ -55,13 +55,13 @@ static int __init afs_get_client_UUID(void)
        afs_uuid.time_low = uuidtime;
        afs_uuid.time_mid = uuidtime >> 32;
        afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK;
-       afs_uuid.time_hi_and_version = AFS_UUID_VERSION_TIME;
+       afs_uuid.time_hi_and_version |= AFS_UUID_VERSION_TIME;
 
        get_random_bytes(&clockseq, 2);
        afs_uuid.clock_seq_low = clockseq;
        afs_uuid.clock_seq_hi_and_reserved =
                (clockseq >> 8) & AFS_UUID_CLOCKHI_MASK;
-       afs_uuid.clock_seq_hi_and_reserved = AFS_UUID_VARIANT_STD;
+       afs_uuid.clock_seq_hi_and_reserved |= AFS_UUID_VARIANT_STD;
 
        _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
               afs_uuid.time_low,
index 4f078c0..bd7ec2c 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -506,6 +506,8 @@ static void free_ioctx(struct work_struct *work)
 
        aio_free_ring(ctx);
        free_percpu(ctx->cpu);
+       percpu_ref_exit(&ctx->reqs);
+       percpu_ref_exit(&ctx->users);
        kmem_cache_free(kioctx_cachep, ctx);
 }
 
@@ -715,8 +717,8 @@ err_ctx:
 err:
        mutex_unlock(&ctx->ring_lock);
        free_percpu(ctx->cpu);
-       free_percpu(ctx->reqs.pcpu_count);
-       free_percpu(ctx->users.pcpu_count);
+       percpu_ref_exit(&ctx->reqs);
+       percpu_ref_exit(&ctx->users);
        kmem_cache_free(kioctx_cachep, ctx);
        pr_debug("error allocating ioctx %d\n", err);
        return ERR_PTR(err);
@@ -830,16 +832,20 @@ void exit_aio(struct mm_struct *mm)
 static void put_reqs_available(struct kioctx *ctx, unsigned nr)
 {
        struct kioctx_cpu *kcpu;
+       unsigned long flags;
 
        preempt_disable();
        kcpu = this_cpu_ptr(ctx->cpu);
 
+       local_irq_save(flags);
        kcpu->reqs_available += nr;
+
        while (kcpu->reqs_available >= ctx->req_batch * 2) {
                kcpu->reqs_available -= ctx->req_batch;
                atomic_add(ctx->req_batch, &ctx->reqs_available);
        }
 
+       local_irq_restore(flags);
        preempt_enable();
 }
 
@@ -847,10 +853,12 @@ static bool get_reqs_available(struct kioctx *ctx)
 {
        struct kioctx_cpu *kcpu;
        bool ret = false;
+       unsigned long flags;
 
        preempt_disable();
        kcpu = this_cpu_ptr(ctx->cpu);
 
+       local_irq_save(flags);
        if (!kcpu->reqs_available) {
                int old, avail = atomic_read(&ctx->reqs_available);
 
@@ -869,6 +877,7 @@ static bool get_reqs_available(struct kioctx *ctx)
        ret = true;
        kcpu->reqs_available--;
 out:
+       local_irq_restore(flags);
        preempt_enable();
        return ret;
 }
@@ -1021,6 +1030,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
 
        /* everything turned out well, dispose of the aiocb. */
        kiocb_free(iocb);
+       put_reqs_available(ctx, 1);
 
        /*
         * We have to order our ring_info tail store above and test
@@ -1062,6 +1072,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
        if (head == tail)
                goto out;
 
+       head %= ctx->nr_events;
+       tail %= ctx->nr_events;
+
        while (ret < nr) {
                long avail;
                struct io_event *ev;
@@ -1100,8 +1113,6 @@ static long aio_read_events_ring(struct kioctx *ctx,
        flush_dcache_page(ctx->ring_pages[0]);
 
        pr_debug("%li  h%u t%u\n", ret, head, tail);
-
-       put_reqs_available(ctx, ret);
 out:
        mutex_unlock(&ctx->ring_lock);
 
index d7bd395..1c55388 100644 (file)
@@ -210,7 +210,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
        int pipefd;
        struct autofs_sb_info *sbi;
        struct autofs_info *ino;
-       int pgrp;
+       int pgrp = 0;
        bool pgrp_set = false;
        int ret = -EINVAL;
 
index 92371c4..1daea0b 100644 (file)
@@ -821,7 +821,7 @@ static void free_workspace(int type, struct list_head *workspace)
 
        spin_lock(workspace_lock);
        if (*num_workspace < num_online_cpus()) {
-               list_add_tail(workspace, idle_workspace);
+               list_add(workspace, idle_workspace);
                (*num_workspace)++;
                spin_unlock(workspace_lock);
                goto wake;
index b7e2c1c..be91397 100644 (file)
@@ -1259,11 +1259,19 @@ struct btrfs_block_group_cache {
        spinlock_t lock;
        u64 pinned;
        u64 reserved;
+       u64 delalloc_bytes;
        u64 bytes_super;
        u64 flags;
        u64 sectorsize;
        u64 cache_generation;
 
+       /*
+        * It is just used for the delayed data space allocation because
+        * only the data space allocation and the relative metadata update
+        * can be done cross the transaction.
+        */
+       struct rw_semaphore data_rwsem;
+
        /* for raid56, this is a full stripe, without parity */
        unsigned long full_stripe_len;
 
@@ -3316,7 +3324,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
                                   struct btrfs_key *ins);
 int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes,
                         u64 min_alloc_size, u64 empty_size, u64 hint_byte,
-                        struct btrfs_key *ins, int is_data);
+                        struct btrfs_key *ins, int is_data, int delalloc);
 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                  struct extent_buffer *buf, int full_backref, int no_quota);
 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -3330,7 +3338,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
                      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
                      u64 owner, u64 offset, int no_quota);
 
-int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
+int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len,
+                              int delalloc);
 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
                                       u64 start, u64 len);
 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
index 2af6e66..eea26e1 100644 (file)
@@ -36,6 +36,7 @@
 #include "check-integrity.h"
 #include "rcu-string.h"
 #include "dev-replace.h"
+#include "sysfs.h"
 
 static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
                                       int scrub_ret);
@@ -562,6 +563,10 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
                fs_info->fs_devices->latest_bdev = tgt_device->bdev;
        list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
 
+       /* replace the sysfs entry */
+       btrfs_kobj_rm_device(fs_info, src_device);
+       btrfs_kobj_add_device(fs_info, tgt_device);
+
        btrfs_rm_dev_replace_blocked(fs_info);
 
        btrfs_rm_dev_replace_srcdev(fs_info, src_device);
index 8bb4aa1..08e65e9 100644 (file)
@@ -369,7 +369,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
 out:
        unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
                             &cached_state, GFP_NOFS);
-       btrfs_tree_read_unlock_blocking(eb);
+       if (need_lock)
+               btrfs_tree_read_unlock_blocking(eb);
        return ret;
 }
 
@@ -2904,7 +2905,9 @@ retry_root_backup:
                if (ret)
                        goto fail_qgroup;
 
+               mutex_lock(&fs_info->cleaner_mutex);
                ret = btrfs_recover_relocation(tree_root);
+               mutex_unlock(&fs_info->cleaner_mutex);
                if (ret < 0) {
                        printk(KERN_WARNING
                               "BTRFS: failed to recover relocation\n");
index fafb3e5..813537f 100644 (file)
@@ -105,7 +105,8 @@ static int find_next_key(struct btrfs_path *path, int level,
 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
                            int dump_block_groups);
 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
-                                      u64 num_bytes, int reserve);
+                                      u64 num_bytes, int reserve,
+                                      int delalloc);
 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
                               u64 num_bytes);
 int btrfs_pin_extent(struct btrfs_root *root,
@@ -3260,7 +3261,8 @@ again:
 
        spin_lock(&block_group->lock);
        if (block_group->cached != BTRFS_CACHE_FINISHED ||
-           !btrfs_test_opt(root, SPACE_CACHE)) {
+           !btrfs_test_opt(root, SPACE_CACHE) ||
+           block_group->delalloc_bytes) {
                /*
                 * don't bother trying to write stuff out _if_
                 * a) we're not cached,
@@ -5613,6 +5615,7 @@ int btrfs_exclude_logged_extents(struct btrfs_root *log,
  * @cache:     The cache we are manipulating
  * @num_bytes: The number of bytes in question
  * @reserve:   One of the reservation enums
+ * @delalloc:   The blocks are allocated for the delalloc write
  *
  * This is called by the allocator when it reserves space, or by somebody who is
  * freeing space that was never actually used on disk.  For example if you
@@ -5631,7 +5634,7 @@ int btrfs_exclude_logged_extents(struct btrfs_root *log,
  * succeeds.
  */
 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
-                                      u64 num_bytes, int reserve)
+                                      u64 num_bytes, int reserve, int delalloc)
 {
        struct btrfs_space_info *space_info = cache->space_info;
        int ret = 0;
@@ -5650,12 +5653,18 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
                                                num_bytes, 0);
                                space_info->bytes_may_use -= num_bytes;
                        }
+
+                       if (delalloc)
+                               cache->delalloc_bytes += num_bytes;
                }
        } else {
                if (cache->ro)
                        space_info->bytes_readonly += num_bytes;
                cache->reserved -= num_bytes;
                space_info->bytes_reserved -= num_bytes;
+
+               if (delalloc)
+                       cache->delalloc_bytes -= num_bytes;
        }
        spin_unlock(&cache->lock);
        spin_unlock(&space_info->lock);
@@ -5669,7 +5678,6 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
        struct btrfs_caching_control *next;
        struct btrfs_caching_control *caching_ctl;
        struct btrfs_block_group_cache *cache;
-       struct btrfs_space_info *space_info;
 
        down_write(&fs_info->commit_root_sem);
 
@@ -5692,9 +5700,6 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
 
        up_write(&fs_info->commit_root_sem);
 
-       list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
-               percpu_counter_set(&space_info->total_bytes_pinned, 0);
-
        update_global_block_rsv(fs_info);
 }
 
@@ -5732,6 +5737,7 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
                spin_lock(&cache->lock);
                cache->pinned -= len;
                space_info->bytes_pinned -= len;
+               percpu_counter_add(&space_info->total_bytes_pinned, -len);
                if (cache->ro) {
                        space_info->bytes_readonly += len;
                        readonly = true;
@@ -6206,7 +6212,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
 
                btrfs_add_free_space(cache, buf->start, buf->len);
-               btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
+               btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
                trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
                pin = 0;
        }
@@ -6365,6 +6371,70 @@ enum btrfs_loop_type {
        LOOP_NO_EMPTY_SIZE = 3,
 };
 
+static inline void
+btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
+                      int delalloc)
+{
+       if (delalloc)
+               down_read(&cache->data_rwsem);
+}
+
+static inline void
+btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
+                      int delalloc)
+{
+       btrfs_get_block_group(cache);
+       if (delalloc)
+               down_read(&cache->data_rwsem);
+}
+
+static struct btrfs_block_group_cache *
+btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
+                  struct btrfs_free_cluster *cluster,
+                  int delalloc)
+{
+       struct btrfs_block_group_cache *used_bg;
+       bool locked = false;
+again:
+       spin_lock(&cluster->refill_lock);
+       if (locked) {
+               if (used_bg == cluster->block_group)
+                       return used_bg;
+
+               up_read(&used_bg->data_rwsem);
+               btrfs_put_block_group(used_bg);
+       }
+
+       used_bg = cluster->block_group;
+       if (!used_bg)
+               return NULL;
+
+       if (used_bg == block_group)
+               return used_bg;
+
+       btrfs_get_block_group(used_bg);
+
+       if (!delalloc)
+               return used_bg;
+
+       if (down_read_trylock(&used_bg->data_rwsem))
+               return used_bg;
+
+       spin_unlock(&cluster->refill_lock);
+       down_read(&used_bg->data_rwsem);
+       locked = true;
+       goto again;
+}
+
+static inline void
+btrfs_release_block_group(struct btrfs_block_group_cache *cache,
+                        int delalloc)
+{
+       if (delalloc)
+               up_read(&cache->data_rwsem);
+       btrfs_put_block_group(cache);
+}
+
 /*
  * walks the btree of allocated extents and find a hole of a given size.
  * The key ins is changed to record the hole:
@@ -6379,7 +6449,7 @@ enum btrfs_loop_type {
 static noinline int find_free_extent(struct btrfs_root *orig_root,
                                     u64 num_bytes, u64 empty_size,
                                     u64 hint_byte, struct btrfs_key *ins,
-                                    u64 flags)
+                                    u64 flags, int delalloc)
 {
        int ret = 0;
        struct btrfs_root *root = orig_root->fs_info->extent_root;
@@ -6467,6 +6537,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
                                up_read(&space_info->groups_sem);
                        } else {
                                index = get_block_group_index(block_group);
+                               btrfs_lock_block_group(block_group, delalloc);
                                goto have_block_group;
                        }
                } else if (block_group) {
@@ -6481,7 +6552,7 @@ search:
                u64 offset;
                int cached;
 
-               btrfs_get_block_group(block_group);
+               btrfs_grab_block_group(block_group, delalloc);
                search_start = block_group->key.objectid;
 
                /*
@@ -6529,16 +6600,16 @@ have_block_group:
                         * the refill lock keeps out other
                         * people trying to start a new cluster
                         */
-                       spin_lock(&last_ptr->refill_lock);
-                       used_block_group = last_ptr->block_group;
-                       if (used_block_group != block_group &&
-                           (!used_block_group ||
-                            used_block_group->ro ||
-                            !block_group_bits(used_block_group, flags)))
+                       used_block_group = btrfs_lock_cluster(block_group,
+                                                             last_ptr,
+                                                             delalloc);
+                       if (!used_block_group)
                                goto refill_cluster;
 
-                       if (used_block_group != block_group)
-                               btrfs_get_block_group(used_block_group);
+                       if (used_block_group != block_group &&
+                           (used_block_group->ro ||
+                            !block_group_bits(used_block_group, flags)))
+                               goto release_cluster;
 
                        offset = btrfs_alloc_from_cluster(used_block_group,
                                                last_ptr,
@@ -6552,16 +6623,15 @@ have_block_group:
                                                used_block_group,
                                                search_start, num_bytes);
                                if (used_block_group != block_group) {
-                                       btrfs_put_block_group(block_group);
+                                       btrfs_release_block_group(block_group,
+                                                                 delalloc);
                                        block_group = used_block_group;
                                }
                                goto checks;
                        }
 
                        WARN_ON(last_ptr->block_group != used_block_group);
-                       if (used_block_group != block_group)
-                               btrfs_put_block_group(used_block_group);
-refill_cluster:
+release_cluster:
                        /* If we are on LOOP_NO_EMPTY_SIZE, we can't
                         * set up a new clusters, so lets just skip it
                         * and let the allocator find whatever block
@@ -6578,8 +6648,10 @@ refill_cluster:
                         * succeeding in the unclustered
                         * allocation.  */
                        if (loop >= LOOP_NO_EMPTY_SIZE &&
-                           last_ptr->block_group != block_group) {
+                           used_block_group != block_group) {
                                spin_unlock(&last_ptr->refill_lock);
+                               btrfs_release_block_group(used_block_group,
+                                                         delalloc);
                                goto unclustered_alloc;
                        }
 
@@ -6589,6 +6661,10 @@ refill_cluster:
                         */
                        btrfs_return_cluster_to_free_space(NULL, last_ptr);
 
+                       if (used_block_group != block_group)
+                               btrfs_release_block_group(used_block_group,
+                                                         delalloc);
+refill_cluster:
                        if (loop >= LOOP_NO_EMPTY_SIZE) {
                                spin_unlock(&last_ptr->refill_lock);
                                goto unclustered_alloc;
@@ -6696,7 +6772,7 @@ checks:
                BUG_ON(offset > search_start);
 
                ret = btrfs_update_reserved_bytes(block_group, num_bytes,
-                                                 alloc_type);
+                                                 alloc_type, delalloc);
                if (ret == -EAGAIN) {
                        btrfs_add_free_space(block_group, offset, num_bytes);
                        goto loop;
@@ -6708,13 +6784,13 @@ checks:
 
                trace_btrfs_reserve_extent(orig_root, block_group,
                                           search_start, num_bytes);
-               btrfs_put_block_group(block_group);
+               btrfs_release_block_group(block_group, delalloc);
                break;
 loop:
                failed_cluster_refill = false;
                failed_alloc = false;
                BUG_ON(index != get_block_group_index(block_group));
-               btrfs_put_block_group(block_group);
+               btrfs_release_block_group(block_group, delalloc);
        }
        up_read(&space_info->groups_sem);
 
@@ -6827,7 +6903,7 @@ again:
 int btrfs_reserve_extent(struct btrfs_root *root,
                         u64 num_bytes, u64 min_alloc_size,
                         u64 empty_size, u64 hint_byte,
-                        struct btrfs_key *ins, int is_data)
+                        struct btrfs_key *ins, int is_data, int delalloc)
 {
        bool final_tried = false;
        u64 flags;
@@ -6837,7 +6913,7 @@ int btrfs_reserve_extent(struct btrfs_root *root,
 again:
        WARN_ON(num_bytes < root->sectorsize);
        ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
-                              flags);
+                              flags, delalloc);
 
        if (ret == -ENOSPC) {
                if (!final_tried && ins->offset) {
@@ -6862,7 +6938,8 @@ again:
 }
 
 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
-                                       u64 start, u64 len, int pin)
+                                       u64 start, u64 len,
+                                       int pin, int delalloc)
 {
        struct btrfs_block_group_cache *cache;
        int ret = 0;
@@ -6881,7 +6958,7 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
                pin_down_extent(root, cache, start, len, 1);
        else {
                btrfs_add_free_space(cache, start, len);
-               btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
+               btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
        }
        btrfs_put_block_group(cache);
 
@@ -6891,15 +6968,15 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
 }
 
 int btrfs_free_reserved_extent(struct btrfs_root *root,
-                                       u64 start, u64 len)
+                              u64 start, u64 len, int delalloc)
 {
-       return __btrfs_free_reserved_extent(root, start, len, 0);
+       return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
 }
 
 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
                                       u64 start, u64 len)
 {
-       return __btrfs_free_reserved_extent(root, start, len, 1);
+       return __btrfs_free_reserved_extent(root, start, len, 1, 0);
 }
 
 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
@@ -7114,7 +7191,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
                return -EINVAL;
 
        ret = btrfs_update_reserved_bytes(block_group, ins->offset,
-                                         RESERVE_ALLOC_NO_ACCOUNT);
+                                         RESERVE_ALLOC_NO_ACCOUNT, 0);
        BUG_ON(ret); /* logic error */
        ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
                                         0, owner, offset, ins, 1);
@@ -7256,7 +7333,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                return ERR_CAST(block_rsv);
 
        ret = btrfs_reserve_extent(root, blocksize, blocksize,
-                                  empty_size, hint, &ins, 0);
+                                  empty_size, hint, &ins, 0, 0);
        if (ret) {
                unuse_block_rsv(root->fs_info, block_rsv, blocksize);
                return ERR_PTR(ret);
@@ -8659,6 +8736,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
                                               start);
        atomic_set(&cache->count, 1);
        spin_lock_init(&cache->lock);
+       init_rwsem(&cache->data_rwsem);
        INIT_LIST_HEAD(&cache->list);
        INIT_LIST_HEAD(&cache->cluster_list);
        INIT_LIST_HEAD(&cache->new_bg_list);
index a389820..3e11aab 100644 (file)
@@ -3437,16 +3437,10 @@ done_unlocked:
        return 0;
 }
 
-static int eb_wait(void *word)
-{
-       io_schedule();
-       return 0;
-}
-
 void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
 {
-       wait_on_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK, eb_wait,
-                   TASK_UNINTERRUPTIBLE);
+       wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_WRITEBACK,
+                      TASK_UNINTERRUPTIBLE);
 }
 
 static noinline_for_stack int
index 15ce5f2..ccc264e 100644 (file)
@@ -158,7 +158,6 @@ struct extent_buffer {
         * to unlock
         */
        wait_queue_head_t read_lock_wq;
-       wait_queue_head_t lock_wq;
        struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
 #ifdef CONFIG_BTRFS_DEBUG
        struct list_head leak_list;
index 1874aee..225302b 100644 (file)
@@ -75,6 +75,8 @@ void free_extent_map(struct extent_map *em)
        if (atomic_dec_and_test(&em->refs)) {
                WARN_ON(extent_map_in_tree(em));
                WARN_ON(!list_empty(&em->list));
+               if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
+                       kfree(em->bdev);
                kmem_cache_free(extent_map_cache, em);
        }
 }
index e7fd8a5..b2991fd 100644 (file)
@@ -15,6 +15,7 @@
 #define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */
 #define EXTENT_FLAG_LOGGING 4 /* Logging this extent */
 #define EXTENT_FLAG_FILLING 5 /* Filling in a preallocated extent */
+#define EXTENT_FLAG_FS_MAPPING 6 /* filesystem extent mapping type */
 
 struct extent_map {
        struct rb_node rb_node;
index 372b05f..2b0a627 100644 (file)
@@ -274,18 +274,32 @@ struct io_ctl {
 };
 
 static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
-                      struct btrfs_root *root)
+                      struct btrfs_root *root, int write)
 {
+       int num_pages;
+       int check_crcs = 0;
+
+       num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+                   PAGE_CACHE_SHIFT;
+
+       if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
+               check_crcs = 1;
+
+       /* Make sure we can fit our crcs into the first page */
+       if (write && check_crcs &&
+           (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
+               return -ENOSPC;
+
        memset(io_ctl, 0, sizeof(struct io_ctl));
-       io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
-               PAGE_CACHE_SHIFT;
-       io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
-                               GFP_NOFS);
+
+       io_ctl->pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
        if (!io_ctl->pages)
                return -ENOMEM;
+
+       io_ctl->num_pages = num_pages;
        io_ctl->root = root;
-       if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
-               io_ctl->check_crcs = 1;
+       io_ctl->check_crcs = check_crcs;
+
        return 0;
 }
 
@@ -666,6 +680,13 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        generation = btrfs_free_space_generation(leaf, header);
        btrfs_release_path(path);
 
+       if (!BTRFS_I(inode)->generation) {
+               btrfs_info(root->fs_info,
+                          "The free space cache file (%llu) is invalid. skip it\n",
+                          offset);
+               return 0;
+       }
+
        if (BTRFS_I(inode)->generation != generation) {
                btrfs_err(root->fs_info,
                        "free space inode generation (%llu) "
@@ -677,7 +698,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        if (!num_entries)
                return 0;
 
-       ret = io_ctl_init(&io_ctl, inode, root);
+       ret = io_ctl_init(&io_ctl, inode, root, 0);
        if (ret)
                return ret;
 
@@ -957,19 +978,18 @@ fail:
 }
 
 static noinline_for_stack int
-add_ioctl_entries(struct btrfs_root *root,
-                 struct inode *inode,
-                 struct btrfs_block_group_cache *block_group,
-                 struct io_ctl *io_ctl,
-                 struct extent_state **cached_state,
-                 struct list_head *bitmap_list,
-                 int *entries)
+write_pinned_extent_entries(struct btrfs_root *root,
+                           struct btrfs_block_group_cache *block_group,
+                           struct io_ctl *io_ctl,
+                           int *entries)
 {
        u64 start, extent_start, extent_end, len;
-       struct list_head *pos, *n;
        struct extent_io_tree *unpin = NULL;
        int ret;
 
+       if (!block_group)
+               return 0;
+
        /*
         * We want to add any pinned extents to our free space cache
         * so we don't leak the space
@@ -979,23 +999,19 @@ add_ioctl_entries(struct btrfs_root *root,
         */
        unpin = root->fs_info->pinned_extents;
 
-       if (block_group)
-               start = block_group->key.objectid;
+       start = block_group->key.objectid;
 
-       while (block_group && (start < block_group->key.objectid +
-                              block_group->key.offset)) {
+       while (start < block_group->key.objectid + block_group->key.offset) {
                ret = find_first_extent_bit(unpin, start,
                                            &extent_start, &extent_end,
                                            EXTENT_DIRTY, NULL);
-               if (ret) {
-                       ret = 0;
-                       break;
-               }
+               if (ret)
+                       return 0;
 
                /* This pinned extent is out of our range */
                if (extent_start >= block_group->key.objectid +
                    block_group->key.offset)
-                       break;
+                       return 0;
 
                extent_start = max(extent_start, start);
                extent_end = min(block_group->key.objectid +
@@ -1005,11 +1021,20 @@ add_ioctl_entries(struct btrfs_root *root,
                *entries += 1;
                ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
                if (ret)
-                       goto out_nospc;
+                       return -ENOSPC;
 
                start = extent_end;
        }
 
+       return 0;
+}
+
+static noinline_for_stack int
+write_bitmap_entries(struct io_ctl *io_ctl, struct list_head *bitmap_list)
+{
+       struct list_head *pos, *n;
+       int ret;
+
        /* Write out the bitmaps */
        list_for_each_safe(pos, n, bitmap_list) {
                struct btrfs_free_space *entry =
@@ -1017,36 +1042,24 @@ add_ioctl_entries(struct btrfs_root *root,
 
                ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
                if (ret)
-                       goto out_nospc;
+                       return -ENOSPC;
                list_del_init(&entry->list);
        }
 
-       /* Zero out the rest of the pages just to make sure */
-       io_ctl_zero_remaining_pages(io_ctl);
-
-       ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages,
-                               0, i_size_read(inode), cached_state);
-       io_ctl_drop_pages(io_ctl);
-       unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
-                            i_size_read(inode) - 1, cached_state, GFP_NOFS);
+       return 0;
+}
 
-       if (ret)
-               goto fail;
+static int flush_dirty_cache(struct inode *inode)
+{
+       int ret;
 
        ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
-       if (ret) {
+       if (ret)
                clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
                                 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
                                 GFP_NOFS);
-               goto fail;
-       }
-       return 0;
 
-fail:
-       return -1;
-
-out_nospc:
-       return -ENOSPC;
+       return ret;
 }
 
 static void noinline_for_stack
@@ -1056,6 +1069,7 @@ cleanup_write_cache_enospc(struct inode *inode,
                           struct list_head *bitmap_list)
 {
        struct list_head *pos, *n;
+
        list_for_each_safe(pos, n, bitmap_list) {
                struct btrfs_free_space *entry =
                        list_entry(pos, struct btrfs_free_space, list);
@@ -1088,64 +1102,104 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 {
        struct extent_state *cached_state = NULL;
        struct io_ctl io_ctl;
-       struct list_head bitmap_list;
+       LIST_HEAD(bitmap_list);
        int entries = 0;
        int bitmaps = 0;
        int ret;
-       int err = -1;
-
-       INIT_LIST_HEAD(&bitmap_list);
 
        if (!i_size_read(inode))
                return -1;
 
-       ret = io_ctl_init(&io_ctl, inode, root);
+       ret = io_ctl_init(&io_ctl, inode, root, 1);
        if (ret)
                return -1;
 
+       if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
+               down_write(&block_group->data_rwsem);
+               spin_lock(&block_group->lock);
+               if (block_group->delalloc_bytes) {
+                       block_group->disk_cache_state = BTRFS_DC_WRITTEN;
+                       spin_unlock(&block_group->lock);
+                       up_write(&block_group->data_rwsem);
+                       BTRFS_I(inode)->generation = 0;
+                       ret = 0;
+                       goto out;
+               }
+               spin_unlock(&block_group->lock);
+       }
+
        /* Lock all pages first so we can lock the extent safely. */
        io_ctl_prepare_pages(&io_ctl, inode, 0);
 
        lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
                         0, &cached_state);
 
-
-       /* Make sure we can fit our crcs into the first page */
-       if (io_ctl.check_crcs &&
-           (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
-               goto out_nospc;
-
        io_ctl_set_generation(&io_ctl, trans->transid);
 
+       /* Write out the extent entries in the free space cache */
        ret = write_cache_extent_entries(&io_ctl, ctl,
                                         block_group, &entries, &bitmaps,
                                         &bitmap_list);
        if (ret)
                goto out_nospc;
 
-       ret = add_ioctl_entries(root, inode, block_group, &io_ctl,
-                               &cached_state, &bitmap_list, &entries);
+       /*
+        * Some spaces that are freed in the current transaction are pinned,
+        * they will be added into free space cache after the transaction is
+        * committed, we shouldn't lose them.
+        */
+       ret = write_pinned_extent_entries(root, block_group, &io_ctl, &entries);
+       if (ret)
+               goto out_nospc;
 
-       if (ret == -ENOSPC)
+       /* At last, we write out all the bitmaps. */
+       ret = write_bitmap_entries(&io_ctl, &bitmap_list);
+       if (ret)
                goto out_nospc;
-       else if (ret)
+
+       /* Zero out the rest of the pages just to make sure */
+       io_ctl_zero_remaining_pages(&io_ctl);
+
+       /* Everything is written out, now we dirty the pages in the file. */
+       ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
+                               0, i_size_read(inode), &cached_state);
+       if (ret)
+               goto out_nospc;
+
+       if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+               up_write(&block_group->data_rwsem);
+       /*
+        * Release the pages and unlock the extent, we will flush
+        * them out later
+        */
+       io_ctl_drop_pages(&io_ctl);
+
+       unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
+                            i_size_read(inode) - 1, &cached_state, GFP_NOFS);
+
+       /* Flush the dirty pages in the cache file. */
+       ret = flush_dirty_cache(inode);
+       if (ret)
                goto out;
 
-       err = update_cache_item(trans, root, inode, path, offset,
+       /* Update the cache item to tell everyone this cache file is valid. */
+       ret = update_cache_item(trans, root, inode, path, offset,
                                entries, bitmaps);
-
 out:
        io_ctl_free(&io_ctl);
-       if (err) {
+       if (ret) {
                invalidate_inode_pages2(inode->i_mapping);
                BTRFS_I(inode)->generation = 0;
        }
        btrfs_update_inode(trans, root, inode);
-       return err;
+       return ret;
 
 out_nospc:
-
        cleanup_write_cache_enospc(inode, &io_ctl, &cached_state, &bitmap_list);
+
+       if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+               up_write(&block_group->data_rwsem);
+
        goto out;
 }
 
@@ -1165,6 +1219,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                spin_unlock(&block_group->lock);
                return 0;
        }
+
+       if (block_group->delalloc_bytes) {
+               block_group->disk_cache_state = BTRFS_DC_WRITTEN;
+               spin_unlock(&block_group->lock);
+               return 0;
+       }
        spin_unlock(&block_group->lock);
 
        inode = lookup_free_space_inode(root, block_group, path);
index 8925f66..3668048 100644 (file)
@@ -693,7 +693,7 @@ retry:
                ret = btrfs_reserve_extent(root,
                                           async_extent->compressed_size,
                                           async_extent->compressed_size,
-                                          0, alloc_hint, &ins, 1);
+                                          0, alloc_hint, &ins, 1, 1);
                if (ret) {
                        int i;
 
@@ -794,7 +794,7 @@ retry:
 out:
        return ret;
 out_free_reserve:
-       btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
+       btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
 out_free:
        extent_clear_unlock_delalloc(inode, async_extent->start,
                                     async_extent->start +
@@ -917,7 +917,7 @@ static noinline int cow_file_range(struct inode *inode,
                cur_alloc_size = disk_num_bytes;
                ret = btrfs_reserve_extent(root, cur_alloc_size,
                                           root->sectorsize, 0, alloc_hint,
-                                          &ins, 1);
+                                          &ins, 1, 1);
                if (ret < 0)
                        goto out_unlock;
 
@@ -995,7 +995,7 @@ out:
        return ret;
 
 out_reserve:
-       btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
+       btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
 out_unlock:
        extent_clear_unlock_delalloc(inode, start, end, locked_page,
                                     EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
@@ -2599,6 +2599,21 @@ out_kfree:
        return NULL;
 }
 
+static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
+                                        u64 start, u64 len)
+{
+       struct btrfs_block_group_cache *cache;
+
+       cache = btrfs_lookup_block_group(root->fs_info, start);
+       ASSERT(cache);
+
+       spin_lock(&cache->lock);
+       cache->delalloc_bytes -= len;
+       spin_unlock(&cache->lock);
+
+       btrfs_put_block_group(cache);
+}
+
 /* as ordered data IO finishes, this gets called so we can finish
  * an ordered extent if the range of bytes in the file it covers are
  * fully written.
@@ -2698,6 +2713,10 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
                                                logical_len, logical_len,
                                                compress_type, 0, 0,
                                                BTRFS_FILE_EXTENT_REG);
+               if (!ret)
+                       btrfs_release_delalloc_bytes(root,
+                                                    ordered_extent->start,
+                                                    ordered_extent->disk_len);
        }
        unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
                           ordered_extent->file_offset, ordered_extent->len,
@@ -2750,7 +2769,7 @@ out:
                    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
                    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
                        btrfs_free_reserved_extent(root, ordered_extent->start,
-                                                  ordered_extent->disk_len);
+                                                  ordered_extent->disk_len, 1);
        }
 
 
@@ -6535,21 +6554,21 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
 
        alloc_hint = get_extent_allocation_hint(inode, start, len);
        ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
-                                  alloc_hint, &ins, 1);
+                                  alloc_hint, &ins, 1, 1);
        if (ret)
                return ERR_PTR(ret);
 
        em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
                              ins.offset, ins.offset, ins.offset, 0);
        if (IS_ERR(em)) {
-               btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
+               btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
                return em;
        }
 
        ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
                                           ins.offset, ins.offset, 0);
        if (ret) {
-               btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
+               btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
                free_extent_map(em);
                return ERR_PTR(ret);
        }
@@ -7437,7 +7456,7 @@ free_ordered:
                if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
                    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
                        btrfs_free_reserved_extent(root, ordered->start,
-                                                  ordered->disk_len);
+                                                  ordered->disk_len, 1);
                btrfs_put_ordered_extent(ordered);
                btrfs_put_ordered_extent(ordered);
        }
@@ -8808,7 +8827,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
                cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
                cur_bytes = max(cur_bytes, min_size);
                ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
-                                          *alloc_hint, &ins, 1);
+                                          *alloc_hint, &ins, 1, 0);
                if (ret) {
                        if (own_trans)
                                btrfs_end_transaction(trans, root);
@@ -8822,7 +8841,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
                                                  BTRFS_FILE_EXTENT_PREALLOC);
                if (ret) {
                        btrfs_free_reserved_extent(root, ins.objectid,
-                                                  ins.offset);
+                                                  ins.offset, 0);
                        btrfs_abort_transaction(trans, root, ret);
                        if (own_trans)
                                btrfs_end_transaction(trans, root);
index 0d321c2..47aceb4 100644 (file)
@@ -136,19 +136,22 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
 void btrfs_update_iflags(struct inode *inode)
 {
        struct btrfs_inode *ip = BTRFS_I(inode);
-
-       inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+       unsigned int new_fl = 0;
 
        if (ip->flags & BTRFS_INODE_SYNC)
-               inode->i_flags |= S_SYNC;
+               new_fl |= S_SYNC;
        if (ip->flags & BTRFS_INODE_IMMUTABLE)
-               inode->i_flags |= S_IMMUTABLE;
+               new_fl |= S_IMMUTABLE;
        if (ip->flags & BTRFS_INODE_APPEND)
-               inode->i_flags |= S_APPEND;
+               new_fl |= S_APPEND;
        if (ip->flags & BTRFS_INODE_NOATIME)
-               inode->i_flags |= S_NOATIME;
+               new_fl |= S_NOATIME;
        if (ip->flags & BTRFS_INODE_DIRSYNC)
-               inode->i_flags |= S_DIRSYNC;
+               new_fl |= S_DIRSYNC;
+
+       set_mask_bits(&inode->i_flags,
+                     S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
+                     new_fl);
 }
 
 /*
@@ -3139,7 +3142,6 @@ out:
 static void clone_update_extent_map(struct inode *inode,
                                    const struct btrfs_trans_handle *trans,
                                    const struct btrfs_path *path,
-                                   struct btrfs_file_extent_item *fi,
                                    const u64 hole_offset,
                                    const u64 hole_len)
 {
@@ -3154,7 +3156,11 @@ static void clone_update_extent_map(struct inode *inode,
                return;
        }
 
-       if (fi) {
+       if (path) {
+               struct btrfs_file_extent_item *fi;
+
+               fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
+                                   struct btrfs_file_extent_item);
                btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
                em->generation = -1;
                if (btrfs_file_extent_type(path->nodes[0], fi) ==
@@ -3508,18 +3514,15 @@ process_slot:
                                            btrfs_item_ptr_offset(leaf, slot),
                                            size);
                                inode_add_bytes(inode, datal);
-                               extent = btrfs_item_ptr(leaf, slot,
-                                               struct btrfs_file_extent_item);
                        }
 
                        /* If we have an implicit hole (NO_HOLES feature). */
                        if (drop_start < new_key.offset)
                                clone_update_extent_map(inode, trans,
-                                               path, NULL, drop_start,
+                                               NULL, drop_start,
                                                new_key.offset - drop_start);
 
-                       clone_update_extent_map(inode, trans, path,
-                                               extent, 0, 0);
+                       clone_update_extent_map(inode, trans, path, 0, 0);
 
                        btrfs_mark_buffer_dirty(leaf);
                        btrfs_release_path(path);
@@ -3562,12 +3565,10 @@ process_slot:
                        btrfs_end_transaction(trans, root);
                        goto out;
                }
+               clone_update_extent_map(inode, trans, NULL, last_dest_end,
+                                       destoff + len - last_dest_end);
                ret = clone_finish_inode_update(trans, inode, destoff + len,
                                                destoff, olen);
-               if (ret)
-                       goto out;
-               clone_update_extent_map(inode, trans, path, NULL, last_dest_end,
-                                       destoff + len - last_dest_end);
        }
 
 out:
index 01277b8..5665d21 100644 (file)
@@ -33,14 +33,14 @@ static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
  */
 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
 {
-       if (eb->lock_nested) {
-               read_lock(&eb->lock);
-               if (eb->lock_nested && current->pid == eb->lock_owner) {
-                       read_unlock(&eb->lock);
-                       return;
-               }
-               read_unlock(&eb->lock);
-       }
+       /*
+        * no lock is required.  The lock owner may change if
+        * we have a read lock, but it won't change to or away
+        * from us.  If we have the write lock, we are the owner
+        * and it'll never change.
+        */
+       if (eb->lock_nested && current->pid == eb->lock_owner)
+               return;
        if (rw == BTRFS_WRITE_LOCK) {
                if (atomic_read(&eb->blocking_writers) == 0) {
                        WARN_ON(atomic_read(&eb->spinning_writers) != 1);
@@ -65,14 +65,15 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
  */
 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 {
-       if (eb->lock_nested) {
-               read_lock(&eb->lock);
-               if (eb->lock_nested && current->pid == eb->lock_owner) {
-                       read_unlock(&eb->lock);
-                       return;
-               }
-               read_unlock(&eb->lock);
-       }
+       /*
+        * no lock is required.  The lock owner may change if
+        * we have a read lock, but it won't change to or away
+        * from us.  If we have the write lock, we are the owner
+        * and it'll never change.
+        */
+       if (eb->lock_nested && current->pid == eb->lock_owner)
+               return;
+
        if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
                BUG_ON(atomic_read(&eb->blocking_writers) != 1);
                write_lock(&eb->lock);
@@ -99,6 +100,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 void btrfs_tree_read_lock(struct extent_buffer *eb)
 {
 again:
+       BUG_ON(!atomic_read(&eb->blocking_writers) &&
+              current->pid == eb->lock_owner);
+
        read_lock(&eb->lock);
        if (atomic_read(&eb->blocking_writers) &&
            current->pid == eb->lock_owner) {
@@ -132,7 +136,9 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
        if (atomic_read(&eb->blocking_writers))
                return 0;
 
-       read_lock(&eb->lock);
+       if (!read_trylock(&eb->lock))
+               return 0;
+
        if (atomic_read(&eb->blocking_writers)) {
                read_unlock(&eb->lock);
                return 0;
@@ -151,7 +157,10 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
        if (atomic_read(&eb->blocking_writers) ||
            atomic_read(&eb->blocking_readers))
                return 0;
-       write_lock(&eb->lock);
+
+       if (!write_trylock(&eb->lock))
+               return 0;
+
        if (atomic_read(&eb->blocking_writers) ||
            atomic_read(&eb->blocking_readers)) {
                write_unlock(&eb->lock);
@@ -168,14 +177,15 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
  */
 void btrfs_tree_read_unlock(struct extent_buffer *eb)
 {
-       if (eb->lock_nested) {
-               read_lock(&eb->lock);
-               if (eb->lock_nested && current->pid == eb->lock_owner) {
-                       eb->lock_nested = 0;
-                       read_unlock(&eb->lock);
-                       return;
-               }
-               read_unlock(&eb->lock);
+       /*
+        * if we're nested, we have the write lock.  No new locking
+        * is needed as long as we are the lock owner.
+        * The write unlock will do a barrier for us, and the lock_nested
+        * field only matters to the lock owner.
+        */
+       if (eb->lock_nested && current->pid == eb->lock_owner) {
+               eb->lock_nested = 0;
+               return;
        }
        btrfs_assert_tree_read_locked(eb);
        WARN_ON(atomic_read(&eb->spinning_readers) == 0);
@@ -189,14 +199,15 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
  */
 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
 {
-       if (eb->lock_nested) {
-               read_lock(&eb->lock);
-               if (eb->lock_nested && current->pid == eb->lock_owner) {
-                       eb->lock_nested = 0;
-                       read_unlock(&eb->lock);
-                       return;
-               }
-               read_unlock(&eb->lock);
+       /*
+        * if we're nested, we have the write lock.  No new locking
+        * is needed as long as we are the lock owner.
+        * The write unlock will do a barrier for us, and the lock_nested
+        * field only matters to the lock owner.
+        */
+       if (eb->lock_nested && current->pid == eb->lock_owner) {
+               eb->lock_nested = 0;
+               return;
        }
        btrfs_assert_tree_read_locked(eb);
        WARN_ON(atomic_read(&eb->blocking_readers) == 0);
@@ -244,6 +255,7 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
        BUG_ON(blockers > 1);
 
        btrfs_assert_tree_locked(eb);
+       eb->lock_owner = 0;
        atomic_dec(&eb->write_locks);
 
        if (blockers) {
index e12441c..7187b14 100644 (file)
@@ -484,8 +484,19 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
                                           log_list);
                list_del_init(&ordered->log_list);
                spin_unlock_irq(&log->log_extents_lock[index]);
+
+               if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
+                   !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
+                       struct inode *inode = ordered->inode;
+                       u64 start = ordered->file_offset;
+                       u64 end = ordered->file_offset + ordered->len - 1;
+
+                       WARN_ON(!inode);
+                       filemap_fdatawrite_range(inode->i_mapping, start, end);
+               }
                wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
                                                   &ordered->flags));
+
                btrfs_put_ordered_extent(ordered);
                spin_lock_irq(&log->log_extents_lock[index]);
        }
index 6efd70d..9626b4a 100644 (file)
@@ -54,7 +54,7 @@ static void print_extent_data_ref(struct extent_buffer *eb,
               btrfs_extent_data_ref_count(eb, ref));
 }
 
-static void print_extent_item(struct extent_buffer *eb, int slot)
+static void print_extent_item(struct extent_buffer *eb, int slot, int type)
 {
        struct btrfs_extent_item *ei;
        struct btrfs_extent_inline_ref *iref;
@@ -63,7 +63,6 @@ static void print_extent_item(struct extent_buffer *eb, int slot)
        struct btrfs_disk_key key;
        unsigned long end;
        unsigned long ptr;
-       int type;
        u32 item_size = btrfs_item_size_nr(eb, slot);
        u64 flags;
        u64 offset;
@@ -88,7 +87,8 @@ static void print_extent_item(struct extent_buffer *eb, int slot)
               btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei),
               flags);
 
-       if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+       if ((type == BTRFS_EXTENT_ITEM_KEY) &&
+           flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
                struct btrfs_tree_block_info *info;
                info = (struct btrfs_tree_block_info *)(ei + 1);
                btrfs_tree_block_key(eb, info, &key);
@@ -223,7 +223,8 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
                                btrfs_disk_root_refs(l, ri));
                        break;
                case BTRFS_EXTENT_ITEM_KEY:
-                       print_extent_item(l, i);
+               case BTRFS_METADATA_ITEM_KEY:
+                       print_extent_item(l, i, type);
                        break;
                case BTRFS_TREE_BLOCK_REF_KEY:
                        printk(KERN_INFO "\t\ttree block backref\n");
index 4055291..4a88f07 100644 (file)
@@ -1956,9 +1956,10 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
         * pages are going to be uptodate.
         */
        for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
-               if (rbio->faila == stripe ||
-                   rbio->failb == stripe)
+               if (rbio->faila == stripe || rbio->failb == stripe) {
+                       atomic_inc(&rbio->bbio->error);
                        continue;
+               }
 
                for (pagenr = 0; pagenr < nr_pages; pagenr++) {
                        struct page *p;
index ac80188..b6d198f 100644 (file)
@@ -2725,11 +2725,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
                length = btrfs_dev_extent_length(l, dev_extent);
 
-               if (found_key.offset + length <= start) {
-                       key.offset = found_key.offset + length;
-                       btrfs_release_path(path);
-                       continue;
-               }
+               if (found_key.offset + length <= start)
+                       goto skip;
 
                chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
                chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
@@ -2740,10 +2737,12 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                 * the chunk from going away while we scrub it
                 */
                cache = btrfs_lookup_block_group(fs_info, chunk_offset);
-               if (!cache) {
-                       ret = -ENOENT;
-                       break;
-               }
+
+               /* some chunks are removed but not committed to disk yet,
+                * continue scrubbing */
+               if (!cache)
+                       goto skip;
+
                dev_replace->cursor_right = found_key.offset + length;
                dev_replace->cursor_left = found_key.offset;
                dev_replace->item_needs_writeback = 1;
@@ -2802,7 +2801,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 
                dev_replace->cursor_left = dev_replace->cursor_right;
                dev_replace->item_needs_writeback = 1;
-
+skip:
                key.offset = found_key.offset + length;
                btrfs_release_path(path);
        }
index 4662d92..8e16bca 100644 (file)
@@ -522,9 +522,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                case Opt_ssd_spread:
                        btrfs_set_and_info(root, SSD_SPREAD,
                                           "use spread ssd allocation scheme");
+                       btrfs_set_opt(info->mount_opt, SSD);
                        break;
                case Opt_nossd:
-                       btrfs_clear_and_info(root, NOSSD,
+                       btrfs_set_and_info(root, NOSSD,
                                             "not using ssd allocation scheme");
                        btrfs_clear_opt(info->mount_opt, SSD);
                        break;
@@ -1467,7 +1468,9 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                        goto restore;
 
                /* recover relocation */
+               mutex_lock(&fs_info->cleaner_mutex);
                ret = btrfs_recover_relocation(root);
+               mutex_unlock(&fs_info->cleaner_mutex);
                if (ret)
                        goto restore;
 
@@ -1808,6 +1811,8 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
                list_for_each_entry(dev, head, dev_list) {
                        if (dev->missing)
                                continue;
+                       if (!dev->name)
+                               continue;
                        if (!first_dev || dev->devid < first_dev->devid)
                                first_dev = dev;
                }
index df39458..7869936 100644 (file)
@@ -605,14 +605,37 @@ static void init_feature_attrs(void)
        }
 }
 
-static int add_device_membership(struct btrfs_fs_info *fs_info)
+int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info,
+               struct btrfs_device *one_device)
+{
+       struct hd_struct *disk;
+       struct kobject *disk_kobj;
+
+       if (!fs_info->device_dir_kobj)
+               return -EINVAL;
+
+       if (one_device) {
+               disk = one_device->bdev->bd_part;
+               disk_kobj = &part_to_dev(disk)->kobj;
+
+               sysfs_remove_link(fs_info->device_dir_kobj,
+                                               disk_kobj->name);
+       }
+
+       return 0;
+}
+
+int btrfs_kobj_add_device(struct btrfs_fs_info *fs_info,
+               struct btrfs_device *one_device)
 {
        int error = 0;
        struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
        struct btrfs_device *dev;
 
-       fs_info->device_dir_kobj = kobject_create_and_add("devices",
+       if (!fs_info->device_dir_kobj)
+               fs_info->device_dir_kobj = kobject_create_and_add("devices",
                                                &fs_info->super_kobj);
+
        if (!fs_info->device_dir_kobj)
                return -ENOMEM;
 
@@ -623,6 +646,9 @@ static int add_device_membership(struct btrfs_fs_info *fs_info)
                if (!dev->bdev)
                        continue;
 
+               if (one_device && one_device != dev)
+                       continue;
+
                disk = dev->bdev->bd_part;
                disk_kobj = &part_to_dev(disk)->kobj;
 
@@ -666,7 +692,7 @@ int btrfs_sysfs_add_one(struct btrfs_fs_info *fs_info)
        if (error)
                goto failure;
 
-       error = add_device_membership(fs_info);
+       error = btrfs_kobj_add_device(fs_info, NULL);
        if (error)
                goto failure;
 
index 9ab5763..ac46df3 100644 (file)
@@ -66,4 +66,8 @@ char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags);
 extern const char * const btrfs_feature_set_names[3];
 extern struct kobj_type space_info_ktype;
 extern struct kobj_type btrfs_raid_ktype;
+int btrfs_kobj_add_device(struct btrfs_fs_info *fs_info,
+               struct btrfs_device *one_device);
+int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info,
+                struct btrfs_device *one_device);
 #endif /* _BTRFS_SYSFS_H_ */
index 511839c..5f379af 100644 (file)
@@ -386,11 +386,13 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
        bool reloc_reserved = false;
        int ret;
 
+       /* Send isn't supposed to start transactions. */
+       ASSERT(current->journal_info != (void *)BTRFS_SEND_TRANS_STUB);
+
        if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
                return ERR_PTR(-EROFS);
 
-       if (current->journal_info &&
-           current->journal_info != (void *)BTRFS_SEND_TRANS_STUB) {
+       if (current->journal_info) {
                WARN_ON(type & TRANS_EXTWRITERS);
                h = current->journal_info;
                h->use_count++;
@@ -491,6 +493,7 @@ again:
        smp_mb();
        if (cur_trans->state >= TRANS_STATE_BLOCKED &&
            may_wait_transaction(root, type)) {
+               current->journal_info = h;
                btrfs_commit_transaction(h, root);
                goto again;
        }
@@ -1615,11 +1618,6 @@ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
        int ret;
 
        ret = btrfs_run_delayed_items(trans, root);
-       /*
-        * running the delayed items may have added new refs. account
-        * them now so that they hinder processing of more delayed refs
-        * as little as possible.
-        */
        if (ret)
                return ret;
 
index ffeed6d..6cb82f6 100644 (file)
@@ -40,6 +40,7 @@
 #include "rcu-string.h"
 #include "math.h"
 #include "dev-replace.h"
+#include "sysfs.h"
 
 static int init_first_rw_device(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
@@ -554,12 +555,14 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
                 * This is ok to do without rcu read locked because we hold the
                 * uuid mutex so nothing we touch in here is going to disappear.
                 */
-               name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
-               if (!name) {
-                       kfree(device);
-                       goto error;
+               if (orig_dev->name) {
+                       name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
+                       if (!name) {
+                               kfree(device);
+                               goto error;
+                       }
+                       rcu_assign_pointer(device->name, name);
                }
-               rcu_assign_pointer(device->name, name);
 
                list_add(&device->dev_list, &fs_devices->devices);
                device->fs_devices = fs_devices;
@@ -1677,8 +1680,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        if (device->bdev == root->fs_info->fs_devices->latest_bdev)
                root->fs_info->fs_devices->latest_bdev = next_device->bdev;
 
-       if (device->bdev)
+       if (device->bdev) {
                device->fs_devices->open_devices--;
+               /* remove sysfs entry */
+               btrfs_kobj_rm_device(root->fs_info, device);
+       }
 
        call_rcu(&device->rcu, free_device);
 
@@ -2143,9 +2149,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
        btrfs_set_super_num_devices(root->fs_info->super_copy,
                                    total_bytes + 1);
+
+       /* add sysfs device entry */
+       btrfs_kobj_add_device(root->fs_info, device);
+
        mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
 
        if (seeding_dev) {
+               char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
                ret = init_first_rw_device(trans, root, device);
                if (ret) {
                        btrfs_abort_transaction(trans, root, ret);
@@ -2156,6 +2167,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
                        btrfs_abort_transaction(trans, root, ret);
                        goto error_trans;
                }
+
+               /* Sprouting would change fsid of the mounted root,
+                * so rename the fsid on the sysfs
+                */
+               snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
+                                               root->fs_info->fsid);
+               if (kobject_rename(&root->fs_info->super_kobj, fsid_buf))
+                       goto error_trans;
        } else {
                ret = btrfs_add_device(trans, root, device);
                if (ret) {
@@ -2205,6 +2224,7 @@ error_trans:
        unlock_chunks(root);
        btrfs_end_transaction(trans, root);
        rcu_string_free(device->name);
+       btrfs_kobj_rm_device(root->fs_info, device);
        kfree(device);
 error:
        blkdev_put(bdev, FMODE_EXCL);
@@ -2543,9 +2563,6 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
        remove_extent_mapping(em_tree, em);
        write_unlock(&em_tree->lock);
 
-       kfree(map);
-       em->bdev = NULL;
-
        /* once for the tree */
        free_extent_map(em);
        /* once for us */
@@ -4301,9 +4318,11 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 
        em = alloc_extent_map();
        if (!em) {
+               kfree(map);
                ret = -ENOMEM;
                goto error;
        }
+       set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
        em->bdev = (struct block_device *)map;
        em->start = start;
        em->len = num_bytes;
@@ -4346,7 +4365,6 @@ error_del_extent:
        /* One for the tree reference */
        free_extent_map(em);
 error:
-       kfree(map);
        kfree(devices_info);
        return ret;
 }
@@ -4558,7 +4576,6 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
                write_unlock(&tree->map_tree.lock);
                if (!em)
                        break;
-               kfree(em->bdev);
                /* once for us */
                free_extent_map(em);
                /* once for the tree */
@@ -5362,6 +5379,15 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
        return 0;
 }
 
+static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int err)
+{
+       if (likely(bbio->flags & BTRFS_BIO_ORIG_BIO_SUBMITTED))
+               bio_endio_nodec(bio, err);
+       else
+               bio_endio(bio, err);
+       kfree(bbio);
+}
+
 static void btrfs_end_bio(struct bio *bio, int err)
 {
        struct btrfs_bio *bbio = bio->bi_private;
@@ -5402,12 +5428,6 @@ static void btrfs_end_bio(struct bio *bio, int err)
                        bio = bbio->orig_bio;
                }
 
-               /*
-                * We have original bio now. So increment bi_remaining to
-                * account for it in endio
-                */
-               atomic_inc(&bio->bi_remaining);
-
                bio->bi_private = bbio->private;
                bio->bi_end_io = bbio->end_io;
                btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
@@ -5424,9 +5444,8 @@ static void btrfs_end_bio(struct bio *bio, int err)
                        set_bit(BIO_UPTODATE, &bio->bi_flags);
                        err = 0;
                }
-               kfree(bbio);
 
-               bio_endio(bio, err);
+               btrfs_end_bbio(bbio, bio, err);
        } else if (!is_orig_bio) {
                bio_put(bio);
        }
@@ -5589,12 +5608,15 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
 {
        atomic_inc(&bbio->error);
        if (atomic_dec_and_test(&bbio->stripes_pending)) {
+               /* Shoud be the original bio. */
+               WARN_ON(bio != bbio->orig_bio);
+
                bio->bi_private = bbio->private;
                bio->bi_end_io = bbio->end_io;
                btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
                bio->bi_iter.bi_sector = logical >> 9;
-               kfree(bbio);
-               bio_endio(bio, -EIO);
+
+               btrfs_end_bbio(bbio, bio, -EIO);
        }
 }
 
@@ -5681,6 +5703,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
                        BUG_ON(!bio); /* -ENOMEM */
                } else {
                        bio = first_bio;
+                       bbio->flags |= BTRFS_BIO_ORIG_BIO_SUBMITTED;
                }
 
                submit_stripe_bio(root, bbio, bio,
@@ -5822,6 +5845,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                return -ENOMEM;
        }
 
+       set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
        em->bdev = (struct block_device *)map;
        em->start = logical;
        em->len = length;
@@ -5846,7 +5870,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
                                                        uuid, NULL);
                if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
-                       kfree(map);
                        free_extent_map(em);
                        return -EIO;
                }
@@ -5854,7 +5877,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                        map->stripes[i].dev =
                                add_missing_dev(root, devid, uuid);
                        if (!map->stripes[i].dev) {
-                               kfree(map);
                                free_extent_map(em);
                                return -EIO;
                        }
index 1a15bbe..2aaa00c 100644 (file)
@@ -190,11 +190,14 @@ struct btrfs_bio_stripe {
 struct btrfs_bio;
 typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
 
+#define BTRFS_BIO_ORIG_BIO_SUBMITTED   0x1
+
 struct btrfs_bio {
        atomic_t stripes_pending;
        struct btrfs_fs_info *fs_info;
        bio_end_io_t *end_io;
        struct bio *orig_bio;
+       unsigned long flags;
        void *private;
        atomic_t error;
        int max_errors;
index 4f19631..b67d8fc 100644 (file)
@@ -136,7 +136,7 @@ static int zlib_compress_pages(struct list_head *ws,
                if (workspace->def_strm.total_in > 8192 &&
                    workspace->def_strm.total_in <
                    workspace->def_strm.total_out) {
-                       ret = -EIO;
+                       ret = -E2BIG;
                        goto out;
                }
                /* we need another page for writing out.  Test this
index eba6e4f..8f05111 100644 (file)
@@ -61,16 +61,9 @@ inline void touch_buffer(struct buffer_head *bh)
 }
 EXPORT_SYMBOL(touch_buffer);
 
-static int sleep_on_buffer(void *word)
-{
-       io_schedule();
-       return 0;
-}
-
 void __lock_buffer(struct buffer_head *bh)
 {
-       wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
-                                                       TASK_UNINTERRUPTIBLE);
+       wait_on_bit_lock_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__lock_buffer);
 
@@ -123,7 +116,7 @@ EXPORT_SYMBOL(buffer_check_dirty_writeback);
  */
 void __wait_on_buffer(struct buffer_head * bh)
 {
-       wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
+       wait_on_bit_io(&bh->b_state, BH_Lock, TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__wait_on_buffer);
 
index 0227b45..15e9505 100644 (file)
@@ -290,7 +290,8 @@ int
 cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
                 const struct nls_table *cp, int mapChars)
 {
-       int i, j, charlen;
+       int i, charlen;
+       int j = 0;
        char src_char;
        __le16 dst_char;
        wchar_t tmp;
@@ -298,12 +299,11 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
        if (!mapChars)
                return cifs_strtoUTF16(target, source, PATH_MAX, cp);
 
-       for (i = 0, j = 0; i < srclen; j++) {
+       for (i = 0; i < srclen; j++) {
                src_char = source[i];
                charlen = 1;
                switch (src_char) {
                case 0:
-                       put_unaligned(0, &target[j]);
                        goto ctoUTF16_out;
                case ':':
                        dst_char = cpu_to_le16(UNI_COLON);
@@ -350,6 +350,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
        }
 
 ctoUTF16_out:
+       put_unaligned(0, &target[j]); /* Null terminate target unicode string */
        return j;
 }
 
index 2c90d07..8883980 100644 (file)
@@ -725,6 +725,19 @@ out_nls:
        goto out;
 }
 
+static ssize_t
+cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       ssize_t rc;
+       struct inode *inode = file_inode(iocb->ki_filp);
+
+       rc = cifs_revalidate_mapping(inode);
+       if (rc)
+               return rc;
+
+       return generic_file_read_iter(iocb, iter);
+}
+
 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct inode *inode = file_inode(iocb->ki_filp);
@@ -881,7 +894,7 @@ const struct inode_operations cifs_symlink_inode_ops = {
 const struct file_operations cifs_file_ops = {
        .read = new_sync_read,
        .write = new_sync_write,
-       .read_iter = generic_file_read_iter,
+       .read_iter = cifs_loose_read_iter,
        .write_iter = cifs_file_write_iter,
        .open = cifs_open,
        .release = cifs_close,
@@ -939,7 +952,7 @@ const struct file_operations cifs_file_direct_ops = {
 const struct file_operations cifs_file_nobrl_ops = {
        .read = new_sync_read,
        .write = new_sync_write,
-       .read_iter = generic_file_read_iter,
+       .read_iter = cifs_loose_read_iter,
        .write_iter = cifs_file_write_iter,
        .open = cifs_open,
        .release = cifs_close,
index 20d75b8..b98366f 100644 (file)
@@ -3934,13 +3934,6 @@ cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
        return tlink_tcon(cifs_sb_master_tlink(cifs_sb));
 }
 
-static int
-cifs_sb_tcon_pending_wait(void *unused)
-{
-       schedule();
-       return signal_pending(current) ? -ERESTARTSYS : 0;
-}
-
 /* find and return a tlink with given uid */
 static struct tcon_link *
 tlink_rb_search(struct rb_root *root, kuid_t uid)
@@ -4039,11 +4032,10 @@ cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
        } else {
 wait_for_construction:
                ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
-                                 cifs_sb_tcon_pending_wait,
                                  TASK_INTERRUPTIBLE);
                if (ret) {
                        cifs_put_tlink(tlink);
-                       return ERR_PTR(ret);
+                       return ERR_PTR(-ERESTARTSYS);
                }
 
                /* if it's good, return it */
index e90a1e9..b88b1ad 100644 (file)
@@ -3618,13 +3618,6 @@ static int cifs_launder_page(struct page *page)
        return rc;
 }
 
-static int
-cifs_pending_writers_wait(void *unused)
-{
-       schedule();
-       return 0;
-}
-
 void cifs_oplock_break(struct work_struct *work)
 {
        struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
@@ -3636,7 +3629,7 @@ void cifs_oplock_break(struct work_struct *work)
        int rc = 0;
 
        wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
-                       cifs_pending_writers_wait, TASK_UNINTERRUPTIBLE);
+                       TASK_UNINTERRUPTIBLE);
 
        server->ops->downgrade_oplock(server, cinode,
                test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
index a174605..41de393 100644 (file)
@@ -1780,7 +1780,7 @@ cifs_invalidate_mapping(struct inode *inode)
  * @word: long word containing the bit lock
  */
 static int
-cifs_wait_bit_killable(void *word)
+cifs_wait_bit_killable(struct wait_bit_key *key)
 {
        if (fatal_signal_pending(current))
                return -ERESTARTSYS;
@@ -1794,8 +1794,8 @@ cifs_revalidate_mapping(struct inode *inode)
        int rc;
        unsigned long *flags = &CIFS_I(inode)->flags;
 
-       rc = wait_on_bit_lock(flags, CIFS_INO_LOCK, cifs_wait_bit_killable,
-                               TASK_KILLABLE);
+       rc = wait_on_bit_lock_action(flags, CIFS_INO_LOCK, cifs_wait_bit_killable,
+                                    TASK_KILLABLE);
        if (rc)
                return rc;
 
index 264ece7..68559fd 100644 (file)
@@ -374,7 +374,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
        oparms.cifs_sb = cifs_sb;
        oparms.desired_access = GENERIC_WRITE;
        oparms.create_options = create_options;
-       oparms.disposition = FILE_OPEN;
+       oparms.disposition = FILE_CREATE;
        oparms.path = path;
        oparms.fid = &fid;
        oparms.reconnect = false;
index 3b0c62e..6bf55d0 100644 (file)
@@ -582,7 +582,7 @@ int cifs_get_writer(struct cifsInodeInfo *cinode)
 
 start:
        rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
-                                  cifs_oplock_break_wait, TASK_KILLABLE);
+                        TASK_KILLABLE);
        if (rc)
                return rc;
 
index 0b2528f..a93f7e6 100644 (file)
@@ -306,7 +306,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
        if (unlikely(nr < 0))
                return nr;
 
-       tsk->flags = PF_DUMPCORE;
+       tsk->flags |= PF_DUMPCORE;
        if (atomic_read(&mm->mm_users) == nr + 1)
                goto done;
        /*
index 98040ba..17e39b0 100644 (file)
@@ -71,7 +71,6 @@ struct dio_submit {
                                           been performed at the start of a
                                           write */
        int pages_in_io;                /* approximate total IO pages */
-       size_t  size;                   /* total request size (doesn't change)*/
        sector_t block_in_file;         /* Current offset into the underlying
                                           file in dio_block units. */
        unsigned blocks_available;      /* At block_in_file.  changes */
@@ -198,9 +197,8 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
  * L1 cache.
  */
 static inline struct page *dio_get_page(struct dio *dio,
-               struct dio_submit *sdio, size_t *from, size_t *to)
+                                       struct dio_submit *sdio)
 {
-       int n;
        if (dio_pages_present(sdio) == 0) {
                int ret;
 
@@ -209,10 +207,7 @@ static inline struct page *dio_get_page(struct dio *dio,
                        return ERR_PTR(ret);
                BUG_ON(dio_pages_present(sdio) == 0);
        }
-       n = sdio->head++;
-       *from = n ? 0 : sdio->from;
-       *to = (n == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
-       return dio->pages[n];
+       return dio->pages[sdio->head];
 }
 
 /**
@@ -911,11 +906,15 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
        while (sdio->block_in_file < sdio->final_block_in_request) {
                struct page *page;
                size_t from, to;
-               page = dio_get_page(dio, sdio, &from, &to);
+
+               page = dio_get_page(dio, sdio);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        goto out;
                }
+               from = sdio->head ? 0 : sdio->from;
+               to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
+               sdio->head++;
 
                while (from < to) {
                        unsigned this_chunk_bytes;      /* # of bytes mapped */
@@ -1104,7 +1103,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
        unsigned blkbits = i_blkbits;
        unsigned blocksize_mask = (1 << blkbits) - 1;
        ssize_t retval = -EINVAL;
-       loff_t end = offset + iov_iter_count(iter);
+       size_t count = iov_iter_count(iter);
+       loff_t end = offset + count;
        struct dio *dio;
        struct dio_submit sdio = { 0, };
        struct buffer_head map_bh = { 0, };
@@ -1287,10 +1287,9 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
         */
        BUG_ON(retval == -EIOCBQUEUED);
        if (dio->is_async && retval == 0 && dio->result &&
-           ((rw == READ) || (dio->result == sdio.size)))
+           (rw == READ || dio->result == count))
                retval = -EIOCBQUEUED;
-
-       if (retval != -EIOCBQUEUED)
+       else
                dio_await_completion(dio);
 
        if (drop_refcount(dio) == 0) {
index b73e062..b10b48c 100644 (file)
@@ -910,7 +910,7 @@ static const struct file_operations eventpoll_fops = {
 void eventpoll_release_file(struct file *file)
 {
        struct eventpoll *ep;
-       struct epitem *epi;
+       struct epitem *epi, *next;
 
        /*
         * We don't want to get "file->f_lock" because it is not
@@ -926,7 +926,7 @@ void eventpoll_release_file(struct file *file)
         * Besides, ep_remove() acquires the lock, so we can't hold it here.
         */
        mutex_lock(&epmutex);
-       list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
+       list_for_each_entry_safe(epi, next, &file->f_ep_links, fllink) {
                ep = epi->ep;
                mutex_lock_nested(&ep->mtx, 0);
                ep_remove(ep, epi);
index 0762d14..fca3820 100644 (file)
@@ -194,7 +194,16 @@ static void ext4_init_block_bitmap(struct super_block *sb,
        if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
                ext4_error(sb, "Checksum bad for group %u", block_group);
                grp = ext4_get_group_info(sb, block_group);
+               if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+                       percpu_counter_sub(&sbi->s_freeclusters_counter,
+                                          grp->bb_free);
                set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
+               if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+                       int count;
+                       count = ext4_free_inodes_count(sb, gdp);
+                       percpu_counter_sub(&sbi->s_freeinodes_counter,
+                                          count);
+               }
                set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
                return;
        }
@@ -359,6 +368,7 @@ static void ext4_validate_block_bitmap(struct super_block *sb,
 {
        ext4_fsblk_t    blk;
        struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
 
        if (buffer_verified(bh))
                return;
@@ -369,6 +379,9 @@ static void ext4_validate_block_bitmap(struct super_block *sb,
                ext4_unlock_group(sb, block_group);
                ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
                           block_group, blk);
+               if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+                       percpu_counter_sub(&sbi->s_freeclusters_counter,
+                                          grp->bb_free);
                set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
                return;
        }
@@ -376,6 +389,9 @@ static void ext4_validate_block_bitmap(struct super_block *sb,
                        desc, bh))) {
                ext4_unlock_group(sb, block_group);
                ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
+               if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+                       percpu_counter_sub(&sbi->s_freeclusters_counter,
+                                          grp->bb_free);
                set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
                return;
        }
index 3f5c188..0b7e28e 100644 (file)
@@ -966,10 +966,10 @@ retry:
                        continue;
                }
 
-               if (ei->i_es_lru_nr == 0 || ei == locked_ei)
+               if (ei->i_es_lru_nr == 0 || ei == locked_ei ||
+                   !write_trylock(&ei->i_es_lock))
                        continue;
 
-               write_lock(&ei->i_es_lock);
                shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
                if (ei->i_es_lru_nr == 0)
                        list_del_init(&ei->i_es_lru);
index 0ee59a6..5b87fc3 100644 (file)
@@ -71,6 +71,7 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb,
                                       struct ext4_group_desc *gdp)
 {
        struct ext4_group_info *grp;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
        J_ASSERT_BH(bh, buffer_locked(bh));
 
        /* If checksum is bad mark all blocks and inodes use to prevent
@@ -78,7 +79,16 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb,
        if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
                ext4_error(sb, "Checksum bad for group %u", block_group);
                grp = ext4_get_group_info(sb, block_group);
+               if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+                       percpu_counter_sub(&sbi->s_freeclusters_counter,
+                                          grp->bb_free);
                set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
+               if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+                       int count;
+                       count = ext4_free_inodes_count(sb, gdp);
+                       percpu_counter_sub(&sbi->s_freeinodes_counter,
+                                          count);
+               }
                set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
                return 0;
        }
@@ -116,6 +126,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
        struct buffer_head *bh = NULL;
        ext4_fsblk_t bitmap_blk;
        struct ext4_group_info *grp;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
 
        desc = ext4_get_group_desc(sb, block_group, NULL);
        if (!desc)
@@ -185,6 +196,12 @@ verify:
                ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
                           "inode_bitmap = %llu", block_group, bitmap_blk);
                grp = ext4_get_group_info(sb, block_group);
+               if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+                       int count;
+                       count = ext4_free_inodes_count(sb, desc);
+                       percpu_counter_sub(&sbi->s_freeinodes_counter,
+                                          count);
+               }
                set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
                return NULL;
        }
@@ -321,6 +338,12 @@ out:
                        fatal = err;
        } else {
                ext4_error(sb, "bit already cleared for inode %lu", ino);
+               if (gdp && !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+                       int count;
+                       count = ext4_free_inodes_count(sb, gdp);
+                       percpu_counter_sub(&sbi->s_freeinodes_counter,
+                                          count);
+               }
                set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
        }
 
@@ -851,6 +874,13 @@ got:
                goto out;
        }
 
+       BUFFER_TRACE(group_desc_bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, group_desc_bh);
+       if (err) {
+               ext4_std_error(sb, err);
+               goto out;
+       }
+
        /* We may have to initialize the block bitmap if it isn't already */
        if (ext4_has_group_desc_csum(sb) &&
            gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
@@ -887,13 +917,6 @@ got:
                }
        }
 
-       BUFFER_TRACE(group_desc_bh, "get_write_access");
-       err = ext4_journal_get_write_access(handle, group_desc_bh);
-       if (err) {
-               ext4_std_error(sb, err);
-               goto out;
-       }
-
        /* Update the relevant bg descriptor fields */
        if (ext4_has_group_desc_csum(sb)) {
                int free;
index 8a57e9f..fd69da1 100644 (file)
@@ -389,7 +389,13 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
        return 0;
 failed:
        for (; i >= 0; i--) {
-               if (i != indirect_blks && branch[i].bh)
+               /*
+                * We want to ext4_forget() only freshly allocated indirect
+                * blocks.  Buffer for new_blocks[i-1] is at branch[i].bh and
+                * buffer at branch[0].bh is indirect block / inode already
+                * existing before ext4_alloc_branch() was called.
+                */
+               if (i > 0 && i != indirect_blks && branch[i].bh)
                        ext4_forget(handle, 1, inode, branch[i].bh,
                                    branch[i].bh->b_blocknr);
                ext4_free_blocks(handle, inode, NULL, new_blocks[i],
@@ -1310,16 +1316,24 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
                blk = *i_data;
                if (level > 0) {
                        ext4_lblk_t first2;
+                       ext4_lblk_t count2;
+
                        bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
                        if (!bh) {
                                EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
                                                       "Read failure");
                                return -EIO;
                        }
-                       first2 = (first > offset) ? first - offset : 0;
+                       if (first > offset) {
+                               first2 = first - offset;
+                               count2 = count;
+                       } else {
+                               first2 = 0;
+                               count2 = count - (offset - first);
+                       }
                        ret = free_hole_blocks(handle, inode, bh,
                                               (__le32 *)bh->b_data, level - 1,
-                                              first2, count - offset,
+                                              first2, count2,
                                               inode->i_sb->s_blocksize >> 2);
                        if (ret) {
                                brelse(bh);
@@ -1329,8 +1343,8 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
                if (level == 0 ||
                    (bh && all_zeroes((__le32 *)bh->b_data,
                                      (__le32 *)bh->b_data + addr_per_block))) {
-                       ext4_free_data(handle, inode, parent_bh, &blk, &blk+1);
-                       *i_data = 0;
+                       ext4_free_data(handle, inode, parent_bh,
+                                      i_data, i_data + 1);
                }
                brelse(bh);
                bh = NULL;
index 59e3162..2dcb936 100644 (file)
@@ -722,6 +722,7 @@ void ext4_mb_generate_buddy(struct super_block *sb,
                                void *buddy, void *bitmap, ext4_group_t group)
 {
        struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
        ext4_grpblk_t i = 0;
        ext4_grpblk_t first;
@@ -751,14 +752,17 @@ void ext4_mb_generate_buddy(struct super_block *sb,
 
        if (free != grp->bb_free) {
                ext4_grp_locked_error(sb, group, 0, 0,
-                                     "%u clusters in bitmap, %u in gd; "
-                                     "block bitmap corrupt.",
+                                     "block bitmap and bg descriptor "
+                                     "inconsistent: %u vs %u free clusters",
                                      free, grp->bb_free);
                /*
                 * If we intend to continue, we consider group descriptor
                 * corrupt and update bb_free using bitmap value
                 */
                grp->bb_free = free;
+               if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+                       percpu_counter_sub(&sbi->s_freeclusters_counter,
+                                          grp->bb_free);
                set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
        }
        mb_set_largest_free_order(sb, grp);
@@ -1431,6 +1435,7 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
                right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
 
        if (unlikely(block != -1)) {
+               struct ext4_sb_info *sbi = EXT4_SB(sb);
                ext4_fsblk_t blocknr;
 
                blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
@@ -1441,6 +1446,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
                                      "freeing already freed block "
                                      "(bit %u); block bitmap corrupt.",
                                      block);
+               if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))
+                       percpu_counter_sub(&sbi->s_freeclusters_counter,
+                                          e4b->bd_info->bb_free);
                /* Mark the block group as corrupt. */
                set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
                        &e4b->bd_info->bb_state);
index b9b9aab..6df7bc6 100644 (file)
@@ -1525,8 +1525,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
                        arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
                sbi->s_commit_interval = HZ * arg;
        } else if (token == Opt_max_batch_time) {
-               if (arg == 0)
-                       arg = EXT4_DEF_MAX_BATCH_TIME;
                sbi->s_max_batch_time = arg;
        } else if (token == Opt_min_batch_time) {
                sbi->s_min_batch_time = arg;
@@ -2809,10 +2807,11 @@ static void print_daily_error_info(unsigned long arg)
        es = sbi->s_es;
 
        if (es->s_error_count)
-               ext4_msg(sb, KERN_NOTICE, "error count: %u",
+               /* fsck newer than v1.41.13 is needed to clean this condition. */
+               ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
                         le32_to_cpu(es->s_error_count));
        if (es->s_first_error_time) {
-               printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d",
+               printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
                       sb->s_id, le32_to_cpu(es->s_first_error_time),
                       (int) sizeof(es->s_first_error_func),
                       es->s_first_error_func,
@@ -2826,7 +2825,7 @@ static void print_daily_error_info(unsigned long arg)
                printk("\n");
        }
        if (es->s_last_error_time) {
-               printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d",
+               printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
                       sb->s_id, le32_to_cpu(es->s_last_error_time),
                       (int) sizeof(es->s_last_error_func),
                       es->s_last_error_func,
@@ -3880,38 +3879,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                        goto failed_mount2;
                }
        }
-
-       /*
-        * set up enough so that it can read an inode,
-        * and create new inode for buddy allocator
-        */
-       sbi->s_gdb_count = db_count;
-       if (!test_opt(sb, NOLOAD) &&
-           EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
-               sb->s_op = &ext4_sops;
-       else
-               sb->s_op = &ext4_nojournal_sops;
-
-       ext4_ext_init(sb);
-       err = ext4_mb_init(sb);
-       if (err) {
-               ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
-                        err);
-               goto failed_mount2;
-       }
-
        if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
                ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
-               goto failed_mount2a;
+               goto failed_mount2;
        }
        if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
                if (!ext4_fill_flex_info(sb)) {
                        ext4_msg(sb, KERN_ERR,
                               "unable to initialize "
                               "flex_bg meta info!");
-                       goto failed_mount2a;
+                       goto failed_mount2;
                }
 
+       sbi->s_gdb_count = db_count;
        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
        spin_lock_init(&sbi->s_next_gen_lock);
 
@@ -3946,6 +3926,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_stripe = ext4_get_stripe_size(sbi);
        sbi->s_extent_max_zeroout_kb = 32;
 
+       /*
+        * set up enough so that it can read an inode
+        */
+       if (!test_opt(sb, NOLOAD) &&
+           EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
+               sb->s_op = &ext4_sops;
+       else
+               sb->s_op = &ext4_nojournal_sops;
        sb->s_export_op = &ext4_export_ops;
        sb->s_xattr = ext4_xattr_handlers;
 #ifdef CONFIG_QUOTA
@@ -4135,13 +4123,21 @@ no_journal:
        if (err) {
                ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
                         "reserved pool", ext4_calculate_resv_clusters(sb));
-               goto failed_mount5;
+               goto failed_mount4a;
        }
 
        err = ext4_setup_system_zone(sb);
        if (err) {
                ext4_msg(sb, KERN_ERR, "failed to initialize system "
                         "zone (%d)", err);
+               goto failed_mount4a;
+       }
+
+       ext4_ext_init(sb);
+       err = ext4_mb_init(sb);
+       if (err) {
+               ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
+                        err);
                goto failed_mount5;
        }
 
@@ -4218,8 +4214,11 @@ failed_mount8:
 failed_mount7:
        ext4_unregister_li_request(sb);
 failed_mount6:
-       ext4_release_system_zone(sb);
+       ext4_mb_release(sb);
 failed_mount5:
+       ext4_ext_release(sb);
+       ext4_release_system_zone(sb);
+failed_mount4a:
        dput(sb->s_root);
        sb->s_root = NULL;
 failed_mount4:
@@ -4243,14 +4242,11 @@ failed_mount3:
        percpu_counter_destroy(&sbi->s_extent_cache_cnt);
        if (sbi->s_mmp_tsk)
                kthread_stop(sbi->s_mmp_tsk);
-failed_mount2a:
-       ext4_mb_release(sb);
 failed_mount2:
        for (i = 0; i < db_count; i++)
                brelse(sbi->s_group_desc[i]);
        ext4_kvfree(sbi->s_group_desc);
 failed_mount:
-       ext4_ext_release(sb);
        if (sbi->s_chksum_driver)
                crypto_free_shash(sbi->s_chksum_driver);
        if (sbi->s_proc) {
index 0924521..f8cf619 100644 (file)
@@ -608,8 +608,8 @@ static int __allocate_data_block(struct dnode_of_data *dn)
  *     b. do not use extent cache for better performance
  *     c. give the block addresses to blockdev
  */
-static int get_data_block(struct inode *inode, sector_t iblock,
-                       struct buffer_head *bh_result, int create)
+static int __get_data_block(struct inode *inode, sector_t iblock,
+                       struct buffer_head *bh_result, int create, bool fiemap)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        unsigned int blkbits = inode->i_sb->s_blocksize_bits;
@@ -637,7 +637,7 @@ static int get_data_block(struct inode *inode, sector_t iblock,
                        err = 0;
                goto unlock_out;
        }
-       if (dn.data_blkaddr == NEW_ADDR)
+       if (dn.data_blkaddr == NEW_ADDR && !fiemap)
                goto put_out;
 
        if (dn.data_blkaddr != NULL_ADDR) {
@@ -671,7 +671,7 @@ get_next:
                                err = 0;
                        goto unlock_out;
                }
-               if (dn.data_blkaddr == NEW_ADDR)
+               if (dn.data_blkaddr == NEW_ADDR && !fiemap)
                        goto put_out;
 
                end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
@@ -708,10 +708,23 @@ out:
        return err;
 }
 
+static int get_data_block(struct inode *inode, sector_t iblock,
+                       struct buffer_head *bh_result, int create)
+{
+       return __get_data_block(inode, iblock, bh_result, create, false);
+}
+
+static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
+                       struct buffer_head *bh_result, int create)
+{
+       return __get_data_block(inode, iblock, bh_result, create, true);
+}
+
 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                u64 start, u64 len)
 {
-       return generic_block_fiemap(inode, fieinfo, start, len, get_data_block);
+       return generic_block_fiemap(inode, fieinfo,
+                               start, len, get_data_block_fiemap);
 }
 
 static int f2fs_read_data_page(struct file *file, struct page *page)
index 966acb0..a4addd7 100644 (file)
@@ -376,11 +376,11 @@ static struct page *init_inode_metadata(struct inode *inode,
 
 put_error:
        f2fs_put_page(page, 1);
+error:
        /* once the failed inode becomes a bad inode, i_mode is S_IFREG */
        truncate_inode_pages(&inode->i_data, 0);
        truncate_blocks(inode, 0);
        remove_dirty_dir_inode(inode);
-error:
        remove_inode_page(inode);
        return ERR_PTR(err);
 }
index e51c732..58df97e 100644 (file)
@@ -342,9 +342,6 @@ struct f2fs_sm_info {
        struct dirty_seglist_info *dirty_info;  /* dirty segment information */
        struct curseg_info *curseg_array;       /* active segment information */
 
-       struct list_head wblist_head;   /* list of under-writeback pages */
-       spinlock_t wblist_lock;         /* lock for checkpoint */
-
        block_t seg0_blkaddr;           /* block address of 0'th segment */
        block_t main_blkaddr;           /* start block address of main area */
        block_t ssa_blkaddr;            /* start block address of SSA area */
@@ -644,7 +641,8 @@ static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
  */
 static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
 {
-       WARN_ON((nid >= NM_I(sbi)->max_nid));
+       if (unlikely(nid < F2FS_ROOT_INO(sbi)))
+               return -EINVAL;
        if (unlikely(nid >= NM_I(sbi)->max_nid))
                return -EINVAL;
        return 0;
index c58e330..7d8b962 100644 (file)
@@ -659,16 +659,19 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
        off_start = offset & (PAGE_CACHE_SIZE - 1);
        off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
 
+       f2fs_lock_op(sbi);
+
        for (index = pg_start; index <= pg_end; index++) {
                struct dnode_of_data dn;
 
-               f2fs_lock_op(sbi);
+               if (index == pg_end && !off_end)
+                       goto noalloc;
+
                set_new_dnode(&dn, inode, NULL, NULL, 0);
                ret = f2fs_reserve_block(&dn, index);
-               f2fs_unlock_op(sbi);
                if (ret)
                        break;
-
+noalloc:
                if (pg_start == pg_end)
                        new_size = offset + len;
                else if (index == pg_start && off_start)
@@ -683,8 +686,9 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
                i_size_read(inode) < new_size) {
                i_size_write(inode, new_size);
                mark_inode_dirty(inode);
-               f2fs_write_inode(inode, NULL);
+               update_inode_page(inode);
        }
+       f2fs_unlock_op(sbi);
 
        return ret;
 }
index adc622c..2cf6962 100644 (file)
@@ -78,6 +78,7 @@ static int do_read_inode(struct inode *inode)
        if (check_nid_range(sbi, inode->i_ino)) {
                f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
                         (unsigned long) inode->i_ino);
+               WARN_ON(1);
                return -EINVAL;
        }
 
index 9138c32..a6bdddc 100644 (file)
@@ -417,9 +417,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
                }
 
                f2fs_set_link(new_dir, new_entry, new_page, old_inode);
-               down_write(&F2FS_I(old_inode)->i_sem);
-               F2FS_I(old_inode)->i_pino = new_dir->i_ino;
-               up_write(&F2FS_I(old_inode)->i_sem);
 
                new_inode->i_ctime = CURRENT_TIME;
                down_write(&F2FS_I(new_inode)->i_sem);
@@ -448,6 +445,10 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
                }
        }
 
+       down_write(&F2FS_I(old_inode)->i_sem);
+       file_lost_pino(old_inode);
+       up_write(&F2FS_I(old_inode)->i_sem);
+
        old_inode->i_ctime = CURRENT_TIME;
        mark_inode_dirty(old_inode);
 
@@ -457,9 +458,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
                if (old_dir != new_dir) {
                        f2fs_set_link(old_inode, old_dir_entry,
                                                old_dir_page, new_dir);
-                       down_write(&F2FS_I(old_inode)->i_sem);
-                       F2FS_I(old_inode)->i_pino = new_dir->i_ino;
-                       up_write(&F2FS_I(old_inode)->i_sem);
                        update_inode_page(old_inode);
                } else {
                        kunmap(old_dir_page);
@@ -474,7 +472,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
        return 0;
 
 put_out_dir:
-       f2fs_put_page(new_page, 1);
+       kunmap(new_page);
+       f2fs_put_page(new_page, 0);
 out_dir:
        if (old_dir_entry) {
                kunmap(old_dir_page);
index 9dfb9a0..4b697cc 100644 (file)
@@ -42,6 +42,8 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
                mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12;
                res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
        } else if (type == DIRTY_DENTS) {
+               if (sbi->sb->s_bdi->dirty_exceeded)
+                       return false;
                mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
                res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1);
        }
index f25f0e0..d04613d 100644 (file)
@@ -272,14 +272,15 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
                return -ENOMEM;
        spin_lock_init(&fcc->issue_lock);
        init_waitqueue_head(&fcc->flush_wait_queue);
+       sbi->sm_info->cmd_control_info = fcc;
        fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
                                "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
        if (IS_ERR(fcc->f2fs_issue_flush)) {
                err = PTR_ERR(fcc->f2fs_issue_flush);
                kfree(fcc);
+               sbi->sm_info->cmd_control_info = NULL;
                return err;
        }
-       sbi->sm_info->cmd_control_info = fcc;
 
        return err;
 }
@@ -1885,8 +1886,6 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
 
        /* init sm info */
        sbi->sm_info = sm_info;
-       INIT_LIST_HEAD(&sm_info->wblist_head);
-       spin_lock_init(&sm_info->wblist_lock);
        sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
        sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
        sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
index b2b1863..8f96d93 100644 (file)
@@ -689,9 +689,7 @@ static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
        struct inode *inode;
 
-       if (unlikely(ino < F2FS_ROOT_INO(sbi)))
-               return ERR_PTR(-ESTALE);
-       if (unlikely(ino >= NM_I(sbi)->max_nid))
+       if (check_nid_range(sbi, ino))
                return ERR_PTR(-ESTALE);
 
        /*
index be568b7..ef9bef1 100644 (file)
@@ -342,7 +342,8 @@ static void __inode_wait_for_writeback(struct inode *inode)
        wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
        while (inode->i_state & I_SYNC) {
                spin_unlock(&inode->i_lock);
-               __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
+               __wait_on_bit(wqh, &wq, bit_wait,
+                             TASK_UNINTERRUPTIBLE);
                spin_lock(&inode->i_lock);
        }
 }
index aec01be..89acec7 100644 (file)
@@ -160,7 +160,7 @@ void __fscache_enable_cookie(struct fscache_cookie *cookie,
        _enter("%p", cookie);
 
        wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
-                        fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+                        TASK_UNINTERRUPTIBLE);
 
        if (test_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
                goto out_unlock;
@@ -255,7 +255,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
        if (!fscache_defer_lookup) {
                _debug("non-deferred lookup %p", &cookie->flags);
                wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
-                           fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+                           TASK_UNINTERRUPTIBLE);
                _debug("complete");
                if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags))
                        goto unavailable;
@@ -463,7 +463,6 @@ void __fscache_wait_on_invalidate(struct fscache_cookie *cookie)
        _enter("%p", cookie);
 
        wait_on_bit(&cookie->flags, FSCACHE_COOKIE_INVALIDATING,
-                   fscache_wait_bit_interruptible,
                    TASK_UNINTERRUPTIBLE);
 
        _leave("");
@@ -525,7 +524,7 @@ void __fscache_disable_cookie(struct fscache_cookie *cookie, bool invalidate)
        }
 
        wait_on_bit_lock(&cookie->flags, FSCACHE_COOKIE_ENABLEMENT_LOCK,
-                        fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+                        TASK_UNINTERRUPTIBLE);
        if (!test_and_clear_bit(FSCACHE_COOKIE_ENABLED, &cookie->flags))
                goto out_unlock_enable;
 
index bc6c08f..7872a62 100644 (file)
@@ -97,8 +97,6 @@ static inline bool fscache_object_congested(void)
        return workqueue_congested(WORK_CPU_UNBOUND, fscache_object_wq);
 }
 
-extern int fscache_wait_bit(void *);
-extern int fscache_wait_bit_interruptible(void *);
 extern int fscache_wait_atomic_t(atomic_t *);
 
 /*
index 63f868e..a31b83c 100644 (file)
@@ -196,24 +196,6 @@ static void __exit fscache_exit(void)
 
 module_exit(fscache_exit);
 
-/*
- * wait_on_bit() sleep function for uninterruptible waiting
- */
-int fscache_wait_bit(void *flags)
-{
-       schedule();
-       return 0;
-}
-
-/*
- * wait_on_bit() sleep function for interruptible waiting
- */
-int fscache_wait_bit_interruptible(void *flags)
-{
-       schedule();
-       return signal_pending(current);
-}
-
 /*
  * wait_on_atomic_t() sleep function for uninterruptible waiting
  */
index ed70714..85332b9 100644 (file)
@@ -298,7 +298,6 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
 
        jif = jiffies;
        if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
-                       fscache_wait_bit_interruptible,
                        TASK_INTERRUPTIBLE) != 0) {
                fscache_stat(&fscache_n_retrievals_intr);
                _leave(" = -ERESTARTSYS");
@@ -342,7 +341,6 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
        if (stat_op_waits)
                fscache_stat(stat_op_waits);
        if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
-                       fscache_wait_bit_interruptible,
                        TASK_INTERRUPTIBLE) != 0) {
                ret = fscache_cancel_op(op, do_cancel);
                if (ret == 0)
@@ -351,7 +349,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
                /* it's been removed from the pending queue by another party,
                 * so we should get to run shortly */
                wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
-                           fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+                           TASK_UNINTERRUPTIBLE);
        }
        _debug("<<< GO");
 
index 098f97b..ca88731 100644 (file)
@@ -643,9 +643,8 @@ struct fuse_copy_state {
        unsigned long seglen;
        unsigned long addr;
        struct page *pg;
-       void *mapaddr;
-       void *buf;
        unsigned len;
+       unsigned offset;
        unsigned move_pages:1;
 };
 
@@ -666,23 +665,17 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
        if (cs->currbuf) {
                struct pipe_buffer *buf = cs->currbuf;
 
-               if (!cs->write) {
-                       kunmap_atomic(cs->mapaddr);
-               } else {
-                       kunmap_atomic(cs->mapaddr);
+               if (cs->write)
                        buf->len = PAGE_SIZE - cs->len;
-               }
                cs->currbuf = NULL;
-               cs->mapaddr = NULL;
-       } else if (cs->mapaddr) {
-               kunmap_atomic(cs->mapaddr);
+       } else if (cs->pg) {
                if (cs->write) {
                        flush_dcache_page(cs->pg);
                        set_page_dirty_lock(cs->pg);
                }
                put_page(cs->pg);
-               cs->mapaddr = NULL;
        }
+       cs->pg = NULL;
 }
 
 /*
@@ -691,7 +684,7 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
  */
 static int fuse_copy_fill(struct fuse_copy_state *cs)
 {
-       unsigned long offset;
+       struct page *page;
        int err;
 
        unlock_request(cs->fc, cs->req);
@@ -706,14 +699,12 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
 
                        BUG_ON(!cs->nr_segs);
                        cs->currbuf = buf;
-                       cs->mapaddr = kmap_atomic(buf->page);
+                       cs->pg = buf->page;
+                       cs->offset = buf->offset;
                        cs->len = buf->len;
-                       cs->buf = cs->mapaddr + buf->offset;
                        cs->pipebufs++;
                        cs->nr_segs--;
                } else {
-                       struct page *page;
-
                        if (cs->nr_segs == cs->pipe->buffers)
                                return -EIO;
 
@@ -726,8 +717,8 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
                        buf->len = 0;
 
                        cs->currbuf = buf;
-                       cs->mapaddr = kmap_atomic(page);
-                       cs->buf = cs->mapaddr;
+                       cs->pg = page;
+                       cs->offset = 0;
                        cs->len = PAGE_SIZE;
                        cs->pipebufs++;
                        cs->nr_segs++;
@@ -740,14 +731,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
                        cs->iov++;
                        cs->nr_segs--;
                }
-               err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
+               err = get_user_pages_fast(cs->addr, 1, cs->write, &page);
                if (err < 0)
                        return err;
                BUG_ON(err != 1);
-               offset = cs->addr % PAGE_SIZE;
-               cs->mapaddr = kmap_atomic(cs->pg);
-               cs->buf = cs->mapaddr + offset;
-               cs->len = min(PAGE_SIZE - offset, cs->seglen);
+               cs->pg = page;
+               cs->offset = cs->addr % PAGE_SIZE;
+               cs->len = min(PAGE_SIZE - cs->offset, cs->seglen);
                cs->seglen -= cs->len;
                cs->addr += cs->len;
        }
@@ -760,15 +750,20 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
 {
        unsigned ncpy = min(*size, cs->len);
        if (val) {
+               void *pgaddr = kmap_atomic(cs->pg);
+               void *buf = pgaddr + cs->offset;
+
                if (cs->write)
-                       memcpy(cs->buf, *val, ncpy);
+                       memcpy(buf, *val, ncpy);
                else
-                       memcpy(*val, cs->buf, ncpy);
+                       memcpy(*val, buf, ncpy);
+
+               kunmap_atomic(pgaddr);
                *val += ncpy;
        }
        *size -= ncpy;
        cs->len -= ncpy;
-       cs->buf += ncpy;
+       cs->offset += ncpy;
        return ncpy;
 }
 
@@ -874,8 +869,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
 out_fallback_unlock:
        unlock_page(newpage);
 out_fallback:
-       cs->mapaddr = kmap_atomic(buf->page);
-       cs->buf = cs->mapaddr + buf->offset;
+       cs->pg = buf->page;
+       cs->offset = buf->offset;
 
        err = lock_request(cs->fc, cs->req);
        if (err)
index 4219835..0c60482 100644 (file)
@@ -198,7 +198,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
        inode = ACCESS_ONCE(entry->d_inode);
        if (inode && is_bad_inode(inode))
                goto invalid;
-       else if (fuse_dentry_time(entry) < get_jiffies_64()) {
+       else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
+                (flags & LOOKUP_REVAL)) {
                int err;
                struct fuse_entry_out outarg;
                struct fuse_req *req;
@@ -814,13 +815,6 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
        return err;
 }
 
-static int fuse_rename(struct inode *olddir, struct dentry *oldent,
-                      struct inode *newdir, struct dentry *newent)
-{
-       return fuse_rename_common(olddir, oldent, newdir, newent, 0,
-                                 FUSE_RENAME, sizeof(struct fuse_rename_in));
-}
-
 static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
                        struct inode *newdir, struct dentry *newent,
                        unsigned int flags)
@@ -831,17 +825,30 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
        if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
                return -EINVAL;
 
-       if (fc->no_rename2 || fc->minor < 23)
-               return -EINVAL;
+       if (flags) {
+               if (fc->no_rename2 || fc->minor < 23)
+                       return -EINVAL;
 
-       err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
-                                FUSE_RENAME2, sizeof(struct fuse_rename2_in));
-       if (err == -ENOSYS) {
-               fc->no_rename2 = 1;
-               err = -EINVAL;
+               err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
+                                        FUSE_RENAME2,
+                                        sizeof(struct fuse_rename2_in));
+               if (err == -ENOSYS) {
+                       fc->no_rename2 = 1;
+                       err = -EINVAL;
+               }
+       } else {
+               err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
+                                        FUSE_RENAME,
+                                        sizeof(struct fuse_rename_in));
        }
+
        return err;
+}
 
+static int fuse_rename(struct inode *olddir, struct dentry *oldent,
+                      struct inode *newdir, struct dentry *newent)
+{
+       return fuse_rename2(olddir, oldent, newdir, newent, 0);
 }
 
 static int fuse_link(struct dentry *entry, struct inode *newdir,
@@ -985,7 +992,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
        int err;
        bool r;
 
-       if (fi->i_time < get_jiffies_64()) {
+       if (time_before64(fi->i_time, get_jiffies_64())) {
                r = true;
                err = fuse_do_getattr(inode, stat, file);
        } else {
@@ -1171,7 +1178,7 @@ static int fuse_permission(struct inode *inode, int mask)
            ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
                struct fuse_inode *fi = get_fuse_inode(inode);
 
-               if (fi->i_time < get_jiffies_64()) {
+               if (time_before64(fi->i_time, get_jiffies_64())) {
                        refreshed = true;
 
                        err = fuse_perm_getattr(inode, mask);
index 6e16dad..40ac262 100644 (file)
@@ -1687,7 +1687,7 @@ static int fuse_writepage_locked(struct page *page)
        error = -EIO;
        req->ff = fuse_write_file_get(fc, fi);
        if (!req->ff)
-               goto err_free;
+               goto err_nofile;
 
        fuse_write_fill(req, req->ff, page_offset(page), 0);
 
@@ -1715,6 +1715,8 @@ static int fuse_writepage_locked(struct page *page)
 
        return 0;
 
+err_nofile:
+       __free_page(tmp_page);
 err_free:
        fuse_request_free(req);
 err:
@@ -1955,8 +1957,8 @@ static int fuse_writepages(struct address_space *mapping,
        data.ff = NULL;
 
        err = -ENOMEM;
-       data.orig_pages = kzalloc(sizeof(struct page *) *
-                                 FUSE_MAX_PAGES_PER_REQ,
+       data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ,
+                                 sizeof(struct page *),
                                  GFP_NOFS);
        if (!data.orig_pages)
                goto out;
index 754dcf2..03246cd 100644 (file)
@@ -478,6 +478,17 @@ static const match_table_t tokens = {
        {OPT_ERR,                       NULL}
 };
 
+static int fuse_match_uint(substring_t *s, unsigned int *res)
+{
+       int err = -ENOMEM;
+       char *buf = match_strdup(s);
+       if (buf) {
+               err = kstrtouint(buf, 10, res);
+               kfree(buf);
+       }
+       return err;
+}
+
 static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
 {
        char *p;
@@ -488,6 +499,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
        while ((p = strsep(&opt, ",")) != NULL) {
                int token;
                int value;
+               unsigned uv;
                substring_t args[MAX_OPT_ARGS];
                if (!*p)
                        continue;
@@ -511,18 +523,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
                        break;
 
                case OPT_USER_ID:
-                       if (match_int(&args[0], &value))
+                       if (fuse_match_uint(&args[0], &uv))
                                return 0;
-                       d->user_id = make_kuid(current_user_ns(), value);
+                       d->user_id = make_kuid(current_user_ns(), uv);
                        if (!uid_valid(d->user_id))
                                return 0;
                        d->user_id_present = 1;
                        break;
 
                case OPT_GROUP_ID:
-                       if (match_int(&args[0], &value))
+                       if (fuse_match_uint(&args[0], &uv))
                                return 0;
-                       d->group_id = make_kgid(current_user_ns(), value);
+                       d->group_id = make_kgid(current_user_ns(), uv);
                        if (!gid_valid(d->group_id))
                                return 0;
                        d->group_id_present = 1;
@@ -895,9 +907,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                                fc->writeback_cache = 1;
                        if (arg->time_gran && arg->time_gran <= 1000000000)
                                fc->sb->s_time_gran = arg->time_gran;
-                       else
-                               fc->sb->s_time_gran = 1000000000;
-
                } else {
                        ra_pages = fc->max_read / PAGE_CACHE_SIZE;
                        fc->no_lock = 1;
@@ -926,7 +935,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
                FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
                FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
                FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
-               FUSE_WRITEBACK_CACHE;
+               FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
        req->in.h.opcode = FUSE_INIT;
        req->in.numargs = 1;
        req->in.args[0].size = sizeof(*arg);
@@ -1006,7 +1015,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
 
-       if (!parse_fuse_opt((char *) data, &d, is_bdev))
+       if (!parse_fuse_opt(data, &d, is_bdev))
                goto err;
 
        if (is_bdev) {
index 4fc3a30..26b3f95 100644 (file)
@@ -981,7 +981,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
        int error = 0;
 
        state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
-       flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
+       flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT;
 
        mutex_lock(&fp->f_fl_mutex);
 
@@ -991,7 +991,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
                        goto out;
                flock_lock_file_wait(file,
                                     &(struct file_lock){.fl_type = F_UNLCK});
-               gfs2_glock_dq_wait(fl_gh);
+               gfs2_glock_dq(fl_gh);
                gfs2_holder_reinit(state, flags, fl_gh);
        } else {
                error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
index c355f73..7f513b1 100644 (file)
@@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
                cachep = gfs2_glock_aspace_cachep;
        else
                cachep = gfs2_glock_cachep;
-       gl = kmem_cache_alloc(cachep, GFP_KERNEL);
+       gl = kmem_cache_alloc(cachep, GFP_NOFS);
        if (!gl)
                return -ENOMEM;
 
        memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
 
        if (glops->go_flags & GLOF_LVB) {
-               gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
+               gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
                if (!gl->gl_lksb.sb_lvbptr) {
                        kmem_cache_free(cachep, gl);
                        return -ENOMEM;
@@ -855,27 +855,6 @@ void gfs2_holder_uninit(struct gfs2_holder *gh)
        gh->gh_ip = 0;
 }
 
-/**
- * gfs2_glock_holder_wait
- * @word: unused
- *
- * This function and gfs2_glock_demote_wait both show up in the WCHAN
- * field. Thus I've separated these otherwise identical functions in
- * order to be more informative to the user.
- */
-
-static int gfs2_glock_holder_wait(void *word)
-{
-        schedule();
-        return 0;
-}
-
-static int gfs2_glock_demote_wait(void *word)
-{
-       schedule();
-       return 0;
-}
-
 /**
  * gfs2_glock_wait - wait on a glock acquisition
  * @gh: the glock holder
@@ -888,7 +867,7 @@ int gfs2_glock_wait(struct gfs2_holder *gh)
        unsigned long time1 = jiffies;
 
        might_sleep();
-       wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
+       wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
        if (time_after(jiffies, time1 + HZ)) /* have we waited > a second? */
                /* Lengthen the minimum hold time. */
                gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time +
@@ -1128,7 +1107,7 @@ void gfs2_glock_dq_wait(struct gfs2_holder *gh)
        struct gfs2_glock *gl = gh->gh_gl;
        gfs2_glock_dq(gh);
        might_sleep();
-       wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
+       wait_on_bit(&gl->gl_flags, GLF_DEMOTE, TASK_UNINTERRUPTIBLE);
 }
 
 /**
@@ -1404,12 +1383,16 @@ __acquires(&lru_lock)
                gl = list_entry(list->next, struct gfs2_glock, gl_lru);
                list_del_init(&gl->gl_lru);
                if (!spin_trylock(&gl->gl_spin)) {
+add_back_to_lru:
                        list_add(&gl->gl_lru, &lru_list);
                        atomic_inc(&lru_count);
                        continue;
                }
+               if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
+                       spin_unlock(&gl->gl_spin);
+                       goto add_back_to_lru;
+               }
                clear_bit(GLF_LRU, &gl->gl_flags);
-               spin_unlock(&lru_lock);
                gl->gl_lockref.count++;
                if (demote_ok(gl))
                        handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1417,7 +1400,7 @@ __acquires(&lru_lock)
                if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
                        gl->gl_lockref.count--;
                spin_unlock(&gl->gl_spin);
-               spin_lock(&lru_lock);
+               cond_resched_lock(&lru_lock);
        }
 }
 
@@ -1442,7 +1425,7 @@ static long gfs2_scan_glock_lru(int nr)
                gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
 
                /* Test for being demotable */
-               if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
+               if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
                        list_move(&gl->gl_lru, &dispose);
                        atomic_dec(&lru_count);
                        freed++;
index fc11007..2ffc67d 100644 (file)
@@ -234,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl)
  * inode_go_inval - prepare a inode glock to be released
  * @gl: the glock
  * @flags:
- * 
- * Normally we invlidate everything, but if we are moving into
+ *
+ * Normally we invalidate everything, but if we are moving into
  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
  * can keep hold of the metadata, since it won't have changed.
  *
index 91f274d..641383a 100644 (file)
@@ -936,12 +936,6 @@ fail:
        return error;
 }
 
-static int dlm_recovery_wait(void *word)
-{
-       schedule();
-       return 0;
-}
-
 static int control_first_done(struct gfs2_sbd *sdp)
 {
        struct lm_lockstruct *ls = &sdp->sd_lockstruct;
@@ -976,7 +970,7 @@ restart:
                fs_info(sdp, "control_first_done wait gen %u\n", start_gen);
 
                wait_on_bit(&ls->ls_recover_flags, DFL_DLM_RECOVERY,
-                           dlm_recovery_wait, TASK_UNINTERRUPTIBLE);
+                           TASK_UNINTERRUPTIBLE);
                goto restart;
        }
 
@@ -1036,8 +1030,8 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
 
        new_size = old_size + RECOVER_SIZE_INC;
 
-       submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS);
-       result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS);
+       submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
+       result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
        if (!submit || !result) {
                kfree(submit);
                kfree(result);
index bc564c0..d3eae24 100644 (file)
@@ -1024,20 +1024,13 @@ void gfs2_lm_unmount(struct gfs2_sbd *sdp)
                lm->lm_unmount(sdp);
 }
 
-static int gfs2_journalid_wait(void *word)
-{
-       if (signal_pending(current))
-               return -EINTR;
-       schedule();
-       return 0;
-}
-
 static int wait_on_journal(struct gfs2_sbd *sdp)
 {
        if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
                return 0;
 
-       return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, gfs2_journalid_wait, TASK_INTERRUPTIBLE);
+       return wait_on_bit(&sdp->sd_flags, SDF_NOJOURNALID, TASK_INTERRUPTIBLE)
+               ? -EINTR : 0;
 }
 
 void gfs2_online_uevent(struct gfs2_sbd *sdp)
index 94555d4..573bd3b 100644 (file)
@@ -591,12 +591,6 @@ done:
        wake_up_bit(&jd->jd_flags, JDF_RECOVERY);
 }
 
-static int gfs2_recovery_wait(void *word)
-{
-       schedule();
-       return 0;
-}
-
 int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait)
 {
        int rv;
@@ -609,7 +603,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd, bool wait)
        BUG_ON(!rv);
 
        if (wait)
-               wait_on_bit(&jd->jd_flags, JDF_RECOVERY, gfs2_recovery_wait,
+               wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
                            TASK_UNINTERRUPTIBLE);
 
        return wait ? jd->jd_recover_error : 0;
index db629d1..f4cb9c0 100644 (file)
@@ -337,7 +337,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
 
 /**
  * gfs2_free_extlen - Return extent length of free blocks
- * @rbm: Starting position
+ * @rrbm: Starting position
  * @len: Max length to check
  *
  * Starting at the block specified by the rbm, see how many free blocks
@@ -2522,7 +2522,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
 
 /**
  * gfs2_rlist_free - free a resource group list
- * @list: the list of resource groups
+ * @rlist: the list of resource groups
  *
  */
 
index 1319b5c..2607ff1 100644 (file)
@@ -864,12 +864,6 @@ static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
        return error;
 }
 
-static int gfs2_umount_recovery_wait(void *word)
-{
-       schedule();
-       return 0;
-}
-
 /**
  * gfs2_put_super - Unmount the filesystem
  * @sb: The VFS superblock
@@ -894,7 +888,7 @@ restart:
                        continue;
                spin_unlock(&sdp->sd_jindex_spin);
                wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
-                           gfs2_umount_recovery_wait, TASK_UNINTERRUPTIBLE);
+                           TASK_UNINTERRUPTIBLE);
                goto restart;
        }
        spin_unlock(&sdp->sd_jindex_spin);
index 6eecb7f..5938f39 100644 (file)
@@ -1695,13 +1695,6 @@ int inode_needs_sync(struct inode *inode)
 }
 EXPORT_SYMBOL(inode_needs_sync);
 
-int inode_wait(void *word)
-{
-       schedule();
-       return 0;
-}
-EXPORT_SYMBOL(inode_wait);
-
 /*
  * If we try to find an inode in the inode hash while it is being
  * deleted, we have to wait until the filesystem completes its
index 38cfcf5..5f09370 100644 (file)
@@ -763,12 +763,6 @@ static void warn_dirty_buffer(struct buffer_head *bh)
               bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
 }
 
-static int sleep_on_shadow_bh(void *word)
-{
-       io_schedule();
-       return 0;
-}
-
 /*
  * If the buffer is already part of the current transaction, then there
  * is nothing we need to do.  If it is already part of a prior
@@ -906,8 +900,8 @@ repeat:
                if (buffer_shadow(bh)) {
                        JBUFFER_TRACE(jh, "on shadow: sleep");
                        jbd_unlock_bh_state(bh);
-                       wait_on_bit(&bh->b_state, BH_Shadow,
-                                   sleep_on_shadow_bh, TASK_UNINTERRUPTIBLE);
+                       wait_on_bit_io(&bh->b_state, BH_Shadow,
+                                      TASK_UNINTERRUPTIBLE);
                        goto repeat;
                }
 
@@ -1588,9 +1582,12 @@ int jbd2_journal_stop(handle_t *handle)
         * to perform a synchronous write.  We do this to detect the
         * case where a single process is doing a stream of sync
         * writes.  No point in waiting for joiners in that case.
+        *
+        * Setting max_batch_time to 0 disables this completely.
         */
        pid = current->pid;
-       if (handle->h_sync && journal->j_last_sync_writer != pid) {
+       if (handle->h_sync && journal->j_last_sync_writer != pid &&
+           journal->j_max_batch_time) {
                u64 commit_time, trans_time;
 
                journal->j_last_sync_writer = pid;
index e3d37f6..d895b4b 100644 (file)
@@ -39,6 +39,19 @@ struct kernfs_open_node {
        struct list_head        files; /* goes through kernfs_open_file.list */
 };
 
+/*
+ * kernfs_notify() may be called from any context and bounces notifications
+ * through a work item.  To minimize space overhead in kernfs_node, the
+ * pending queue is implemented as a singly linked list of kernfs_nodes.
+ * The list is terminated with the self pointer so that whether a
+ * kernfs_node is on the list or not can be determined by testing the next
+ * pointer for NULL.
+ */
+#define KERNFS_NOTIFY_EOL                      ((void *)&kernfs_notify_list)
+
+static DEFINE_SPINLOCK(kernfs_notify_lock);
+static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
+
 static struct kernfs_open_file *kernfs_of(struct file *file)
 {
        return ((struct seq_file *)file->private_data)->private;
@@ -783,24 +796,25 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
        return DEFAULT_POLLMASK|POLLERR|POLLPRI;
 }
 
-/**
- * kernfs_notify - notify a kernfs file
- * @kn: file to notify
- *
- * Notify @kn such that poll(2) on @kn wakes up.
- */
-void kernfs_notify(struct kernfs_node *kn)
+static void kernfs_notify_workfn(struct work_struct *work)
 {
-       struct kernfs_root *root = kernfs_root(kn);
+       struct kernfs_node *kn;
        struct kernfs_open_node *on;
        struct kernfs_super_info *info;
-       unsigned long flags;
-
-       if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
+repeat:
+       /* pop one off the notify_list */
+       spin_lock_irq(&kernfs_notify_lock);
+       kn = kernfs_notify_list;
+       if (kn == KERNFS_NOTIFY_EOL) {
+               spin_unlock_irq(&kernfs_notify_lock);
                return;
+       }
+       kernfs_notify_list = kn->attr.notify_next;
+       kn->attr.notify_next = NULL;
+       spin_unlock_irq(&kernfs_notify_lock);
 
        /* kick poll */
-       spin_lock_irqsave(&kernfs_open_node_lock, flags);
+       spin_lock_irq(&kernfs_open_node_lock);
 
        on = kn->attr.open;
        if (on) {
@@ -808,12 +822,12 @@ void kernfs_notify(struct kernfs_node *kn)
                wake_up_interruptible(&on->poll);
        }
 
-       spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
+       spin_unlock_irq(&kernfs_open_node_lock);
 
        /* kick fsnotify */
        mutex_lock(&kernfs_mutex);
 
-       list_for_each_entry(info, &root->supers, node) {
+       list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
                struct inode *inode;
                struct dentry *dentry;
 
@@ -833,6 +847,33 @@ void kernfs_notify(struct kernfs_node *kn)
        }
 
        mutex_unlock(&kernfs_mutex);
+       kernfs_put(kn);
+       goto repeat;
+}
+
+/**
+ * kernfs_notify - notify a kernfs file
+ * @kn: file to notify
+ *
+ * Notify @kn such that poll(2) on @kn wakes up.  Maybe be called from any
+ * context.
+ */
+void kernfs_notify(struct kernfs_node *kn)
+{
+       static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
+       unsigned long flags;
+
+       if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
+               return;
+
+       spin_lock_irqsave(&kernfs_notify_lock, flags);
+       if (!kn->attr.notify_next) {
+               kernfs_get(kn);
+               kn->attr.notify_next = kernfs_notify_list;
+               kernfs_notify_list = kn;
+               schedule_work(&kernfs_notify_work);
+       }
+       spin_unlock_irqrestore(&kernfs_notify_lock, flags);
 }
 EXPORT_SYMBOL_GPL(kernfs_notify);
 
index d171b98..f973ae9 100644 (file)
@@ -211,6 +211,36 @@ void kernfs_kill_sb(struct super_block *sb)
        kernfs_put(root_kn);
 }
 
+/**
+ * kernfs_pin_sb: try to pin the superblock associated with a kernfs_root
+ * @kernfs_root: the kernfs_root in question
+ * @ns: the namespace tag
+ *
+ * Pin the superblock so the superblock won't be destroyed in subsequent
+ * operations.  This can be used to block ->kill_sb() which may be useful
+ * for kernfs users which dynamically manage superblocks.
+ *
+ * Returns NULL if there's no superblock associated to this kernfs_root, or
+ * -EINVAL if the superblock is being freed.
+ */
+struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns)
+{
+       struct kernfs_super_info *info;
+       struct super_block *sb = NULL;
+
+       mutex_lock(&kernfs_mutex);
+       list_for_each_entry(info, &root->supers, node) {
+               if (info->ns == ns) {
+                       sb = info->sb;
+                       if (!atomic_inc_not_zero(&info->sb->s_active))
+                               sb = ERR_PTR(-EINVAL);
+                       break;
+               }
+       }
+       mutex_unlock(&kernfs_mutex);
+       return sb;
+}
+
 void __init kernfs_init(void)
 {
        kernfs_node_cache = kmem_cache_create("kernfs_node_cache",
index da57c9b..a6f5480 100644 (file)
@@ -325,7 +325,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
                return -ENOMEM;
 
        fl->fl_file = filp;
-       fl->fl_owner = (fl_owner_t)filp;
+       fl->fl_owner = filp;
        fl->fl_pid = current->tgid;
        fl->fl_flags = FL_FLOCK;
        fl->fl_type = type;
@@ -431,7 +431,7 @@ static int lease_init(struct file *filp, long type, struct file_lock *fl)
        if (assign_type(fl, type) != 0)
                return -EINVAL;
 
-       fl->fl_owner = (fl_owner_t)filp;
+       fl->fl_owner = current->files;
        fl->fl_pid = current->tgid;
 
        fl->fl_file = filp;
@@ -1155,7 +1155,6 @@ EXPORT_SYMBOL(posix_lock_file_wait);
 int locks_mandatory_locked(struct file *file)
 {
        struct inode *inode = file_inode(file);
-       fl_owner_t owner = current->files;
        struct file_lock *fl;
 
        /*
@@ -1165,7 +1164,8 @@ int locks_mandatory_locked(struct file *file)
        for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) {
                if (!IS_POSIX(fl))
                        continue;
-               if (fl->fl_owner != owner && fl->fl_owner != (fl_owner_t)file)
+               if (fl->fl_owner != current->files &&
+                   fl->fl_owner != file)
                        break;
        }
        spin_unlock(&inode->i_lock);
@@ -1205,7 +1205,7 @@ int locks_mandatory_area(int read_write, struct inode *inode,
 
        for (;;) {
                if (filp) {
-                       fl.fl_owner = (fl_owner_t)filp;
+                       fl.fl_owner = filp;
                        fl.fl_flags &= ~FL_SLEEP;
                        error = __posix_lock_file(inode, &fl, NULL);
                        if (!error)
@@ -1948,7 +1948,7 @@ int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l)
 
                cmd = F_GETLK;
                file_lock.fl_flags |= FL_OFDLCK;
-               file_lock.fl_owner = (fl_owner_t)filp;
+               file_lock.fl_owner = filp;
        }
 
        error = vfs_test_lock(filp, &file_lock);
@@ -2103,7 +2103,7 @@ again:
 
                cmd = F_SETLK;
                file_lock->fl_flags |= FL_OFDLCK;
-               file_lock->fl_owner = (fl_owner_t)filp;
+               file_lock->fl_owner = filp;
                break;
        case F_OFD_SETLKW:
                error = -EINVAL;
@@ -2112,7 +2112,7 @@ again:
 
                cmd = F_SETLKW;
                file_lock->fl_flags |= FL_OFDLCK;
-               file_lock->fl_owner = (fl_owner_t)filp;
+               file_lock->fl_owner = filp;
                /* Fallthrough */
        case F_SETLKW:
                file_lock->fl_flags |= FL_SLEEP;
@@ -2170,7 +2170,7 @@ int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l)
 
                cmd = F_GETLK64;
                file_lock.fl_flags |= FL_OFDLCK;
-               file_lock.fl_owner = (fl_owner_t)filp;
+               file_lock.fl_owner = filp;
        }
 
        error = vfs_test_lock(filp, &file_lock);
@@ -2242,7 +2242,7 @@ again:
 
                cmd = F_SETLK64;
                file_lock->fl_flags |= FL_OFDLCK;
-               file_lock->fl_owner = (fl_owner_t)filp;
+               file_lock->fl_owner = filp;
                break;
        case F_OFD_SETLKW:
                error = -EINVAL;
@@ -2251,7 +2251,7 @@ again:
 
                cmd = F_SETLKW64;
                file_lock->fl_flags |= FL_OFDLCK;
-               file_lock->fl_owner = (fl_owner_t)filp;
+               file_lock->fl_owner = filp;
                /* Fallthrough */
        case F_SETLKW64:
                file_lock->fl_flags |= FL_SLEEP;
@@ -2324,11 +2324,11 @@ void locks_remove_file(struct file *filp)
        if (!inode->i_flock)
                return;
 
-       locks_remove_posix(filp, (fl_owner_t)filp);
+       locks_remove_posix(filp, filp);
 
        if (filp->f_op->flock) {
                struct file_lock fl = {
-                       .fl_owner = (fl_owner_t)filp,
+                       .fl_owner = filp,
                        .fl_pid = current->tgid,
                        .fl_file = filp,
                        .fl_flags = FL_FLOCK,
index bf166e3..187477d 100644 (file)
@@ -73,6 +73,7 @@
 #include <linux/mbcache.h>
 #include <linux/init.h>
 #include <linux/blockgroup_lock.h>
+#include <linux/log2.h>
 
 #ifdef MB_CACHE_DEBUG
 # define mb_debug(f...) do { \
@@ -93,7 +94,7 @@
 
 #define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
 
-#define MB_CACHE_ENTRY_LOCK_BITS       __builtin_log2(NR_BG_LOCKS)
+#define MB_CACHE_ENTRY_LOCK_BITS       ilog2(NR_BG_LOCKS)
 #define        MB_CACHE_ENTRY_LOCK_INDEX(ce)                   \
        (hash_long((unsigned long)ce, MB_CACHE_ENTRY_LOCK_BITS))
 
index 985c6f3..9eb787e 100644 (file)
@@ -2256,9 +2256,10 @@ done:
                goto out;
        }
        path->dentry = dentry;
-       path->mnt = mntget(nd->path.mnt);
+       path->mnt = nd->path.mnt;
        if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
                return 1;
+       mntget(path->mnt);
        follow_mount(path);
        error = 0;
 out:
index 8f98138..f11b9ee 100644 (file)
@@ -756,7 +756,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
        spin_unlock(&dreq->lock);
 
        while (!list_empty(&hdr->pages)) {
-               bool do_destroy = true;
 
                req = nfs_list_entry(hdr->pages.next);
                nfs_list_remove_request(req);
@@ -765,7 +764,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
                case NFS_IOHDR_NEED_COMMIT:
                        kref_get(&req->wb_kref);
                        nfs_mark_request_commit(req, hdr->lseg, &cinfo);
-                       do_destroy = false;
                }
                nfs_unlock_and_release_request(req);
        }
index 4042ff5..524dd80 100644 (file)
@@ -361,8 +361,8 @@ start:
         * Prevent starvation issues if someone is doing a consistency
         * sync-to-disk
         */
-       ret = wait_on_bit(&NFS_I(mapping->host)->flags, NFS_INO_FLUSHING,
-                       nfs_wait_bit_killable, TASK_KILLABLE);
+       ret = wait_on_bit_action(&NFS_I(mapping->host)->flags, NFS_INO_FLUSHING,
+                                nfs_wait_bit_killable, TASK_KILLABLE);
        if (ret)
                return ret;
 
index 44bf014..e2a0361 100644 (file)
@@ -783,8 +783,8 @@ nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j)
 static void nfs4_wait_ds_connect(struct nfs4_pnfs_ds *ds)
 {
        might_sleep();
-       wait_on_bit(&ds->ds_state, NFS4DS_CONNECTING,
-                       nfs_wait_bit_killable, TASK_KILLABLE);
+       wait_on_bit_action(&ds->ds_state, NFS4DS_CONNECTING,
+                          nfs_wait_bit_killable, TASK_KILLABLE);
 }
 
 static void nfs4_clear_ds_conn_bit(struct nfs4_pnfs_ds *ds)
index c496f8a..abd37a3 100644 (file)
@@ -75,7 +75,7 @@ nfs_fattr_to_ino_t(struct nfs_fattr *fattr)
  * nfs_wait_bit_killable - helper for functions that are sleeping on bit locks
  * @word: long word containing the bit lock
  */
-int nfs_wait_bit_killable(void *word)
+int nfs_wait_bit_killable(struct wait_bit_key *key)
 {
        if (fatal_signal_pending(current))
                return -ERESTARTSYS;
@@ -147,6 +147,17 @@ int nfs_sync_mapping(struct address_space *mapping)
        return ret;
 }
 
+static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
+{
+       struct nfs_inode *nfsi = NFS_I(inode);
+
+       if (inode->i_mapping->nrpages == 0)
+               flags &= ~NFS_INO_INVALID_DATA;
+       nfsi->cache_validity |= flags;
+       if (flags & NFS_INO_INVALID_DATA)
+               nfs_fscache_invalidate(inode);
+}
+
 /*
  * Invalidate the local caches
  */
@@ -162,17 +173,16 @@ static void nfs_zap_caches_locked(struct inode *inode)
 
        memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
        if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
-               nfs_fscache_invalidate(inode);
-               nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+               nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
                                        | NFS_INO_INVALID_DATA
                                        | NFS_INO_INVALID_ACCESS
                                        | NFS_INO_INVALID_ACL
-                                       | NFS_INO_REVAL_PAGECACHE;
+                                       | NFS_INO_REVAL_PAGECACHE);
        } else
-               nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+               nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
                                        | NFS_INO_INVALID_ACCESS
                                        | NFS_INO_INVALID_ACL
-                                       | NFS_INO_REVAL_PAGECACHE;
+                                       | NFS_INO_REVAL_PAGECACHE);
        nfs_zap_label_cache_locked(nfsi);
 }
 
@@ -187,8 +197,7 @@ void nfs_zap_mapping(struct inode *inode, struct address_space *mapping)
 {
        if (mapping->nrpages != 0) {
                spin_lock(&inode->i_lock);
-               NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
-               nfs_fscache_invalidate(inode);
+               nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
                spin_unlock(&inode->i_lock);
        }
 }
@@ -209,7 +218,7 @@ EXPORT_SYMBOL_GPL(nfs_zap_acl_cache);
 void nfs_invalidate_atime(struct inode *inode)
 {
        spin_lock(&inode->i_lock);
-       NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
+       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
        spin_unlock(&inode->i_lock);
 }
 EXPORT_SYMBOL_GPL(nfs_invalidate_atime);
@@ -369,7 +378,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
                inode->i_mode = fattr->mode;
                if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
                                && nfs_server_capable(inode, NFS_CAP_MODE))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                /* Why so? Because we want revalidate for devices/FIFOs, and
                 * that's precisely what we have in nfs_file_inode_operations.
                 */
@@ -415,36 +424,36 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
                if (fattr->valid & NFS_ATTR_FATTR_ATIME)
                        inode->i_atime = fattr->atime;
                else if (nfs_server_capable(inode, NFS_CAP_ATIME))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_MTIME)
                        inode->i_mtime = fattr->mtime;
                else if (nfs_server_capable(inode, NFS_CAP_MTIME))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_CTIME)
                        inode->i_ctime = fattr->ctime;
                else if (nfs_server_capable(inode, NFS_CAP_CTIME))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
                        inode->i_version = fattr->change_attr;
                else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_SIZE)
                        inode->i_size = nfs_size_to_loff_t(fattr->size);
                else
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-                               | NFS_INO_REVAL_PAGECACHE;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
+                               | NFS_INO_REVAL_PAGECACHE);
                if (fattr->valid & NFS_ATTR_FATTR_NLINK)
                        set_nlink(inode, fattr->nlink);
                else if (nfs_server_capable(inode, NFS_CAP_NLINK))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_OWNER)
                        inode->i_uid = fattr->uid;
                else if (nfs_server_capable(inode, NFS_CAP_OWNER))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_GROUP)
                        inode->i_gid = fattr->gid;
                else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
                        inode->i_blocks = fattr->du.nfs2.blocks;
                if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
@@ -550,6 +559,9 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset)
 
        spin_lock(&inode->i_lock);
        i_size_write(inode, offset);
+       /* Optimisation */
+       if (offset == 0)
+               NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA;
        spin_unlock(&inode->i_lock);
 
        truncate_pagecache(inode, offset);
@@ -578,7 +590,8 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
                        inode->i_uid = attr->ia_uid;
                if ((attr->ia_valid & ATTR_GID) != 0)
                        inode->i_gid = attr->ia_gid;
-               NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+               nfs_set_cache_invalid(inode, NFS_INO_INVALID_ACCESS
+                               | NFS_INO_INVALID_ACL);
                spin_unlock(&inode->i_lock);
        }
        if ((attr->ia_valid & ATTR_SIZE) != 0) {
@@ -1061,8 +1074,8 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
         * the bit lock here if it looks like we're going to be doing that.
         */
        for (;;) {
-               ret = wait_on_bit(bitlock, NFS_INO_INVALIDATING,
-                                 nfs_wait_bit_killable, TASK_KILLABLE);
+               ret = wait_on_bit_action(bitlock, NFS_INO_INVALIDATING,
+                                        nfs_wait_bit_killable, TASK_KILLABLE);
                if (ret)
                        goto out;
                spin_lock(&inode->i_lock);
@@ -1101,7 +1114,7 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
                        && inode->i_version == fattr->pre_change_attr) {
                inode->i_version = fattr->change_attr;
                if (S_ISDIR(inode->i_mode))
-                       nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
                ret |= NFS_INO_INVALID_ATTR;
        }
        /* If we have atomic WCC data, we may update some attributes */
@@ -1117,7 +1130,7 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
                        && timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) {
                memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
                if (S_ISDIR(inode->i_mode))
-                       nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
                ret |= NFS_INO_INVALID_ATTR;
        }
        if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
@@ -1128,9 +1141,6 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
                ret |= NFS_INO_INVALID_ATTR;
        }
 
-       if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
-               nfs_fscache_invalidate(inode);
-
        return ret;
 }
 
@@ -1189,7 +1199,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
                invalid |= NFS_INO_INVALID_ATIME;
 
        if (invalid != 0)
-               nfsi->cache_validity |= invalid;
+               nfs_set_cache_invalid(inode, invalid);
 
        nfsi->read_cache_jiffies = fattr->time_start;
        return 0;
@@ -1402,13 +1412,11 @@ EXPORT_SYMBOL_GPL(nfs_refresh_inode);
 
 static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
 {
-       struct nfs_inode *nfsi = NFS_I(inode);
+       unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
 
-       nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
-       if (S_ISDIR(inode->i_mode)) {
-               nfsi->cache_validity |= NFS_INO_INVALID_DATA;
-               nfs_fscache_invalidate(inode);
-       }
+       if (S_ISDIR(inode->i_mode))
+               invalid |= NFS_INO_INVALID_DATA;
+       nfs_set_cache_invalid(inode, invalid);
        if ((fattr->valid & NFS_ATTR_FATTR) == 0)
                return 0;
        return nfs_refresh_inode_locked(inode, fattr);
@@ -1601,6 +1609,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                        if ((nfsi->npages == 0) || new_isize > cur_isize) {
                                i_size_write(inode, new_isize);
                                invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
+                               invalid &= ~NFS_INO_REVAL_PAGECACHE;
                        }
                        dprintk("NFS: isize change on server for file %s/%ld "
                                        "(%Ld to %Ld)\n",
@@ -1702,10 +1711,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                invalid &= ~NFS_INO_INVALID_DATA;
        if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ) ||
                        (save_cache_validity & NFS_INO_REVAL_FORCED))
-               nfsi->cache_validity |= invalid;
-
-       if (invalid & NFS_INO_INVALID_DATA)
-               nfs_fscache_invalidate(inode);
+               nfs_set_cache_invalid(inode, invalid);
 
        return 0;
  out_err:
index 82ddbf4..617f366 100644 (file)
@@ -244,6 +244,7 @@ void nfs_pgio_data_release(struct nfs_pgio_data *);
 int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
 int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *,
                      const struct rpc_call_ops *, int, int);
+void nfs_free_request(struct nfs_page *req);
 
 static inline void nfs_iocounter_init(struct nfs_io_counter *c)
 {
@@ -347,7 +348,7 @@ extern int nfs_drop_inode(struct inode *);
 extern void nfs_clear_inode(struct inode *);
 extern void nfs_evict_inode(struct inode *);
 void nfs_zap_acl_cache(struct inode *inode);
-extern int nfs_wait_bit_killable(void *word);
+extern int nfs_wait_bit_killable(struct wait_bit_key *key);
 
 /* super.c */
 extern const struct super_operations nfs_sops;
index 871d6ed..8f854dd 100644 (file)
@@ -247,3 +247,46 @@ const struct xattr_handler *nfs3_xattr_handlers[] = {
        &posix_acl_default_xattr_handler,
        NULL,
 };
+
+static int
+nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
+               size_t size, ssize_t *result)
+{
+       struct posix_acl *acl;
+       char *p = data + *result;
+
+       acl = get_acl(inode, type);
+       if (!acl)
+               return 0;
+
+       posix_acl_release(acl);
+
+       *result += strlen(name);
+       *result += 1;
+       if (!size)
+               return 0;
+       if (*result > size)
+               return -ERANGE;
+
+       strcpy(p, name);
+       return 0;
+}
+
+ssize_t
+nfs3_listxattr(struct dentry *dentry, char *data, size_t size)
+{
+       struct inode *inode = dentry->d_inode;
+       ssize_t result = 0;
+       int error;
+
+       error = nfs3_list_one_acl(inode, ACL_TYPE_ACCESS,
+                       POSIX_ACL_XATTR_ACCESS, data, size, &result);
+       if (error)
+               return error;
+
+       error = nfs3_list_one_acl(inode, ACL_TYPE_DEFAULT,
+                       POSIX_ACL_XATTR_DEFAULT, data, size, &result);
+       if (error)
+               return error;
+       return result;
+}
index e7daa42..f0afa29 100644 (file)
@@ -885,7 +885,7 @@ static const struct inode_operations nfs3_dir_inode_operations = {
        .getattr        = nfs_getattr,
        .setattr        = nfs_setattr,
 #ifdef CONFIG_NFS_V3_ACL
-       .listxattr      = generic_listxattr,
+       .listxattr      = nfs3_listxattr,
        .getxattr       = generic_getxattr,
        .setxattr       = generic_setxattr,
        .removexattr    = generic_removexattr,
@@ -899,7 +899,7 @@ static const struct inode_operations nfs3_file_inode_operations = {
        .getattr        = nfs_getattr,
        .setattr        = nfs_setattr,
 #ifdef CONFIG_NFS_V3_ACL
-       .listxattr      = generic_listxattr,
+       .listxattr      = nfs3_listxattr,
        .getxattr       = generic_getxattr,
        .setxattr       = generic_setxattr,
        .removexattr    = generic_removexattr,
index f63cb87..ba2affa 100644 (file)
@@ -230,7 +230,7 @@ int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
 extern struct file_system_type nfs4_fs_type;
 
 /* nfs4namespace.c */
-struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
+struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *, struct qstr *);
 struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *,
                               struct nfs_fh *, struct nfs_fattr *);
 int nfs4_replace_transport(struct nfs_server *server,
index 3d5dbf8..3d83cb1 100644 (file)
@@ -139,16 +139,22 @@ static size_t nfs_parse_server_name(char *string, size_t len,
  * @server: NFS server struct
  * @flavors: List of security tuples returned by SECINFO procedure
  *
- * Return the pseudoflavor of the first security mechanism in
- * "flavors" that is locally supported.  Return RPC_AUTH_UNIX if
- * no matching flavor is found in the array.  The "flavors" array
+ * Return an rpc client that uses the first security mechanism in
+ * "flavors" that is locally supported.  The "flavors" array
  * is searched in the order returned from the server, per RFC 3530
- * recommendation.
+ * recommendation and each flavor is checked for membership in the
+ * sec= mount option list if it exists.
+ *
+ * Return -EPERM if no matching flavor is found in the array.
+ *
+ * Please call rpc_shutdown_client() when you are done with this rpc client.
+ *
  */
-static rpc_authflavor_t nfs_find_best_sec(struct nfs_server *server,
+static struct rpc_clnt *nfs_find_best_sec(struct rpc_clnt *clnt,
+                                         struct nfs_server *server,
                                          struct nfs4_secinfo_flavors *flavors)
 {
-       rpc_authflavor_t pseudoflavor;
+       rpc_authflavor_t pflavor;
        struct nfs4_secinfo4 *secinfo;
        unsigned int i;
 
@@ -159,62 +165,73 @@ static rpc_authflavor_t nfs_find_best_sec(struct nfs_server *server,
                case RPC_AUTH_NULL:
                case RPC_AUTH_UNIX:
                case RPC_AUTH_GSS:
-                       pseudoflavor = rpcauth_get_pseudoflavor(secinfo->flavor,
+                       pflavor = rpcauth_get_pseudoflavor(secinfo->flavor,
                                                        &secinfo->flavor_info);
-                       /* make sure pseudoflavor matches sec= mount opt */
-                       if (pseudoflavor != RPC_AUTH_MAXFLAVOR &&
-                           nfs_auth_info_match(&server->auth_info,
-                                               pseudoflavor))
-                               return pseudoflavor;
-                       break;
+                       /* does the pseudoflavor match a sec= mount opt? */
+                       if (pflavor != RPC_AUTH_MAXFLAVOR &&
+                           nfs_auth_info_match(&server->auth_info, pflavor)) {
+                               struct rpc_clnt *new;
+                               struct rpc_cred *cred;
+
+                               /* Cloning creates an rpc_auth for the flavor */
+                               new = rpc_clone_client_set_auth(clnt, pflavor);
+                               if (IS_ERR(new))
+                                       continue;
+                               /**
+                               * Check that the user actually can use the
+                               * flavor. This is mostly for RPC_AUTH_GSS
+                               * where cr_init obtains a gss context
+                               */
+                               cred = rpcauth_lookupcred(new->cl_auth, 0);
+                               if (IS_ERR(cred)) {
+                                       rpc_shutdown_client(new);
+                                       continue;
+                               }
+                               put_rpccred(cred);
+                               return new;
+                       }
                }
        }
-
-       /* if there were any sec= options then nothing matched */
-       if (server->auth_info.flavor_len > 0)
-               return -EPERM;
-
-       return RPC_AUTH_UNIX;
+       return ERR_PTR(-EPERM);
 }
 
-static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name)
+/**
+ * nfs4_negotiate_security - in response to an NFS4ERR_WRONGSEC on lookup,
+ * return an rpc_clnt that uses the best available security flavor with
+ * respect to the secinfo flavor list and the sec= mount options.
+ *
+ * @clnt: RPC client to clone
+ * @inode: directory inode
+ * @name: lookup name
+ *
+ * Please call rpc_shutdown_client() when you are done with this rpc client.
+ */
+struct rpc_clnt *
+nfs4_negotiate_security(struct rpc_clnt *clnt, struct inode *inode,
+                                       struct qstr *name)
 {
        struct page *page;
        struct nfs4_secinfo_flavors *flavors;
-       rpc_authflavor_t flavor;
+       struct rpc_clnt *new;
        int err;
 
        page = alloc_page(GFP_KERNEL);
        if (!page)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
+
        flavors = page_address(page);
 
        err = nfs4_proc_secinfo(inode, name, flavors);
        if (err < 0) {
-               flavor = err;
+               new = ERR_PTR(err);
                goto out;
        }
 
-       flavor = nfs_find_best_sec(NFS_SERVER(inode), flavors);
+       new = nfs_find_best_sec(clnt, NFS_SERVER(inode), flavors);
 
 out:
        put_page(page);
-       return flavor;
-}
-
-/*
- * Please call rpc_shutdown_client() when you are done with this client.
- */
-struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt, struct inode *inode,
-                                       struct qstr *name)
-{
-       rpc_authflavor_t flavor;
-
-       flavor = nfs4_negotiate_security(inode, name);
-       if ((int)flavor < 0)
-               return ERR_PTR((int)flavor);
-
-       return rpc_clone_client_set_auth(clnt, flavor);
+       return new;
 }
 
 static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
@@ -397,11 +414,6 @@ struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
 
        if (client->cl_auth->au_flavor != flavor)
                flavor = client->cl_auth->au_flavor;
-       else {
-               rpc_authflavor_t new = nfs4_negotiate_security(dir, name);
-               if ((int)new >= 0)
-                       flavor = new;
-       }
        mnt = nfs_do_submount(dentry, fh, fattr, flavor);
 out:
        rpc_shutdown_client(client);
index 285ad53..4bf3d97 100644 (file)
@@ -3247,7 +3247,7 @@ static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
                        err = -EPERM;
                        if (client != *clnt)
                                goto out;
-                       client = nfs4_create_sec_client(client, dir, name);
+                       client = nfs4_negotiate_security(client, dir, name);
                        if (IS_ERR(client))
                                return PTR_ERR(client);
 
index 848f685..42f1211 100644 (file)
@@ -1251,8 +1251,8 @@ int nfs4_wait_clnt_recover(struct nfs_client *clp)
        might_sleep();
 
        atomic_inc(&clp->cl_count);
-       res = wait_on_bit(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
-                       nfs_wait_bit_killable, TASK_KILLABLE);
+       res = wait_on_bit_action(&clp->cl_state, NFS4CLNT_MANAGER_RUNNING,
+                                nfs_wait_bit_killable, TASK_KILLABLE);
        if (res)
                goto out;
        if (clp->cl_cons_state < 0)
index b6ee3a6..0be5050 100644 (file)
@@ -29,8 +29,6 @@
 static struct kmem_cache *nfs_page_cachep;
 static const struct rpc_call_ops nfs_pgio_common_ops;
 
-static void nfs_free_request(struct nfs_page *);
-
 static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
 {
        p->npages = pagecount;
@@ -117,7 +115,7 @@ __nfs_iocounter_wait(struct nfs_io_counter *c)
                set_bit(NFS_IO_INPROGRESS, &c->flags);
                if (atomic_read(&c->io_count) == 0)
                        break;
-               ret = nfs_wait_bit_killable(&c->flags);
+               ret = nfs_wait_bit_killable(&q.key);
        } while (atomic_read(&c->io_count) != 0);
        finish_wait(wq, &q.wait);
        return ret;
@@ -138,12 +136,6 @@ nfs_iocounter_wait(struct nfs_io_counter *c)
        return __nfs_iocounter_wait(c);
 }
 
-static int nfs_wait_bit_uninterruptible(void *word)
-{
-       io_schedule();
-       return 0;
-}
-
 /*
  * nfs_page_group_lock - lock the head of the page group
  * @req - request in group that is to be locked
@@ -158,7 +150,6 @@ nfs_page_group_lock(struct nfs_page *req)
        WARN_ON_ONCE(head != head->wb_head);
 
        wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
-                       nfs_wait_bit_uninterruptible,
                        TASK_UNINTERRUPTIBLE);
 }
 
@@ -239,20 +230,28 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
        WARN_ON_ONCE(prev == req);
 
        if (!prev) {
+               /* a head request */
                req->wb_head = req;
                req->wb_this_page = req;
        } else {
+               /* a subrequest */
                WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
                WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
                req->wb_head = prev->wb_head;
                req->wb_this_page = prev->wb_this_page;
                prev->wb_this_page = req;
 
+               /* All subrequests take a ref on the head request until
+                * nfs_page_group_destroy is called */
+               kref_get(&req->wb_head->wb_kref);
+
                /* grab extra ref if head request has extra ref from
                 * the write/commit path to handle handoff between write
                 * and commit lists */
-               if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags))
+               if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
+                       set_bit(PG_INODE_REF, &req->wb_flags);
                        kref_get(&req->wb_kref);
+               }
        }
 }
 
@@ -269,6 +268,10 @@ nfs_page_group_destroy(struct kref *kref)
        struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
        struct nfs_page *tmp, *next;
 
+       /* subrequests must release the ref on the head request */
+       if (req->wb_head != req)
+               nfs_release_request(req->wb_head);
+
        if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
                return;
 
@@ -394,7 +397,7 @@ static void nfs_clear_request(struct nfs_page *req)
  *
  * Note: Should never be called with the spinlock held!
  */
-static void nfs_free_request(struct nfs_page *req)
+void nfs_free_request(struct nfs_page *req)
 {
        WARN_ON_ONCE(req->wb_this_page != req);
 
@@ -425,9 +428,8 @@ void nfs_release_request(struct nfs_page *req)
 int
 nfs_wait_on_request(struct nfs_page *req)
 {
-       return wait_on_bit(&req->wb_flags, PG_BUSY,
-                       nfs_wait_bit_uninterruptible,
-                       TASK_UNINTERRUPTIBLE);
+       return wait_on_bit_io(&req->wb_flags, PG_BUSY,
+                             TASK_UNINTERRUPTIBLE);
 }
 
 /*
@@ -925,7 +927,6 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
                        nfs_pageio_doio(desc);
                        if (desc->pg_error < 0)
                                return 0;
-                       desc->pg_moreio = 0;
                        if (desc->pg_recoalesce)
                                return 0;
                        /* retry add_request for this subreq */
@@ -972,6 +973,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
                desc->pg_count = 0;
                desc->pg_base = 0;
                desc->pg_recoalesce = 0;
+               desc->pg_moreio = 0;
 
                while (!list_empty(&head)) {
                        struct nfs_page *req;
index 6fdcd23..a8914b3 100644 (file)
@@ -1885,7 +1885,7 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
        if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
                if (!sync)
                        goto out;
-               status = wait_on_bit_lock(&nfsi->flags,
+               status = wait_on_bit_lock_action(&nfsi->flags,
                                NFS_INO_LAYOUTCOMMITTING,
                                nfs_wait_bit_killable,
                                TASK_KILLABLE);
index 3ee5af4..962c9ee 100644 (file)
@@ -46,6 +46,7 @@ static const struct rpc_call_ops nfs_commit_ops;
 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
 static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
 static const struct nfs_rw_ops nfs_rw_write_ops;
+static void nfs_clear_request_commit(struct nfs_page *req);
 
 static struct kmem_cache *nfs_wdata_cachep;
 static mempool_t *nfs_wdata_mempool;
@@ -91,8 +92,15 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
        set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
 }
 
+/*
+ * nfs_page_find_head_request_locked - find head request associated with @page
+ *
+ * must be called while holding the inode lock.
+ *
+ * returns matching head request with reference held, or NULL if not found.
+ */
 static struct nfs_page *
-nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
+nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
 {
        struct nfs_page *req = NULL;
 
@@ -104,25 +112,33 @@ nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
                /* Linearly search the commit list for the correct req */
                list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
                        if (freq->wb_page == page) {
-                               req = freq;
+                               req = freq->wb_head;
                                break;
                        }
                }
        }
 
-       if (req)
+       if (req) {
+               WARN_ON_ONCE(req->wb_head != req);
+
                kref_get(&req->wb_kref);
+       }
 
        return req;
 }
 
-static struct nfs_page *nfs_page_find_request(struct page *page)
+/*
+ * nfs_page_find_head_request - find head request associated with @page
+ *
+ * returns matching head request with reference held, or NULL if not found.
+ */
+static struct nfs_page *nfs_page_find_head_request(struct page *page)
 {
        struct inode *inode = page_file_mapping(page)->host;
        struct nfs_page *req = NULL;
 
        spin_lock(&inode->i_lock);
-       req = nfs_page_find_request_locked(NFS_I(inode), page);
+       req = nfs_page_find_head_request_locked(NFS_I(inode), page);
        spin_unlock(&inode->i_lock);
        return req;
 }
@@ -274,36 +290,246 @@ static void nfs_end_page_writeback(struct nfs_page *req)
                clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
 }
 
-static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
+
+/* nfs_page_group_clear_bits
+ *   @req - an nfs request
+ * clears all page group related bits from @req
+ */
+static void
+nfs_page_group_clear_bits(struct nfs_page *req)
+{
+       clear_bit(PG_TEARDOWN, &req->wb_flags);
+       clear_bit(PG_UNLOCKPAGE, &req->wb_flags);
+       clear_bit(PG_UPTODATE, &req->wb_flags);
+       clear_bit(PG_WB_END, &req->wb_flags);
+       clear_bit(PG_REMOVE, &req->wb_flags);
+}
+
+
+/*
+ * nfs_unroll_locks_and_wait -  unlock all newly locked reqs and wait on @req
+ *
+ * this is a helper function for nfs_lock_and_join_requests
+ *
+ * @inode - inode associated with request page group, must be holding inode lock
+ * @head  - head request of page group, must be holding head lock
+ * @req   - request that couldn't lock and needs to wait on the req bit lock
+ * @nonblock - if true, don't actually wait
+ *
+ * NOTE: this must be called holding page_group bit lock and inode spin lock
+ *       and BOTH will be released before returning.
+ *
+ * returns 0 on success, < 0 on error.
+ */
+static int
+nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
+                         struct nfs_page *req, bool nonblock)
+       __releases(&inode->i_lock)
+{
+       struct nfs_page *tmp;
+       int ret;
+
+       /* relinquish all the locks successfully grabbed this run */
+       for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
+               nfs_unlock_request(tmp);
+
+       WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
+
+       /* grab a ref on the request that will be waited on */
+       kref_get(&req->wb_kref);
+
+       nfs_page_group_unlock(head);
+       spin_unlock(&inode->i_lock);
+
+       /* release ref from nfs_page_find_head_request_locked */
+       nfs_release_request(head);
+
+       if (!nonblock)
+               ret = nfs_wait_on_request(req);
+       else
+               ret = -EAGAIN;
+       nfs_release_request(req);
+
+       return ret;
+}
+
+/*
+ * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
+ *
+ * @destroy_list - request list (using wb_this_page) terminated by @old_head
+ * @old_head - the old head of the list
+ *
+ * All subrequests must be locked and removed from all lists, so at this point
+ * they are only "active" in this function, and possibly in nfs_wait_on_request
+ * with a reference held by some other context.
+ */
+static void
+nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
+                                struct nfs_page *old_head)
+{
+       while (destroy_list) {
+               struct nfs_page *subreq = destroy_list;
+
+               destroy_list = (subreq->wb_this_page == old_head) ?
+                                  NULL : subreq->wb_this_page;
+
+               WARN_ON_ONCE(old_head != subreq->wb_head);
+
+               /* make sure old group is not used */
+               subreq->wb_head = subreq;
+               subreq->wb_this_page = subreq;
+
+               nfs_clear_request_commit(subreq);
+
+               /* subreq is now totally disconnected from page group or any
+                * write / commit lists. last chance to wake any waiters */
+               nfs_unlock_request(subreq);
+
+               if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) {
+                       /* release ref on old head request */
+                       nfs_release_request(old_head);
+
+                       nfs_page_group_clear_bits(subreq);
+
+                       /* release the PG_INODE_REF reference */
+                       if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags))
+                               nfs_release_request(subreq);
+                       else
+                               WARN_ON_ONCE(1);
+               } else {
+                       WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
+                       /* zombie requests have already released the last
+                        * reference and were waiting on the rest of the
+                        * group to complete. Since it's no longer part of a
+                        * group, simply free the request */
+                       nfs_page_group_clear_bits(subreq);
+                       nfs_free_request(subreq);
+               }
+       }
+}
+
+/*
+ * nfs_lock_and_join_requests - join all subreqs to the head req and return
+ *                              a locked reference, cancelling any pending
+ *                              operations for this page.
+ *
+ * @page - the page used to lookup the "page group" of nfs_page structures
+ * @nonblock - if true, don't block waiting for request locks
+ *
+ * This function joins all sub requests to the head request by first
+ * locking all requests in the group, cancelling any pending operations
+ * and finally updating the head request to cover the whole range covered by
+ * the (former) group.  All subrequests are removed from any write or commit
+ * lists, unlinked from the group and destroyed.
+ *
+ * Returns a locked, referenced pointer to the head request - which after
+ * this call is guaranteed to be the only request associated with the page.
+ * Returns NULL if no requests are found for @page, or a ERR_PTR if an
+ * error was encountered.
+ */
+static struct nfs_page *
+nfs_lock_and_join_requests(struct page *page, bool nonblock)
 {
        struct inode *inode = page_file_mapping(page)->host;
-       struct nfs_page *req;
+       struct nfs_page *head, *subreq;
+       struct nfs_page *destroy_list = NULL;
+       unsigned int total_bytes;
        int ret;
 
+try_again:
+       total_bytes = 0;
+
+       WARN_ON_ONCE(destroy_list);
+
        spin_lock(&inode->i_lock);
-       for (;;) {
-               req = nfs_page_find_request_locked(NFS_I(inode), page);
-               if (req == NULL)
-                       break;
-               if (nfs_lock_request(req))
-                       break;
-               /* Note: If we hold the page lock, as is the case in nfs_writepage,
-                *       then the call to nfs_lock_request() will always
-                *       succeed provided that someone hasn't already marked the
-                *       request as dirty (in which case we don't care).
-                */
+
+       /*
+        * A reference is taken only on the head request which acts as a
+        * reference to the whole page group - the group will not be destroyed
+        * until the head reference is released.
+        */
+       head = nfs_page_find_head_request_locked(NFS_I(inode), page);
+
+       if (!head) {
                spin_unlock(&inode->i_lock);
-               if (!nonblock)
-                       ret = nfs_wait_on_request(req);
-               else
-                       ret = -EAGAIN;
-               nfs_release_request(req);
-               if (ret != 0)
+               return NULL;
+       }
+
+       /* lock each request in the page group */
+       nfs_page_group_lock(head);
+       subreq = head;
+       do {
+               /*
+                * Subrequests are always contiguous, non overlapping
+                * and in order. If not, it's a programming error.
+                */
+               WARN_ON_ONCE(subreq->wb_offset !=
+                    (head->wb_offset + total_bytes));
+
+               /* keep track of how many bytes this group covers */
+               total_bytes += subreq->wb_bytes;
+
+               if (!nfs_lock_request(subreq)) {
+                       /* releases page group bit lock and
+                        * inode spin lock and all references */
+                       ret = nfs_unroll_locks_and_wait(inode, head,
+                               subreq, nonblock);
+
+                       if (ret == 0)
+                               goto try_again;
+
                        return ERR_PTR(ret);
-               spin_lock(&inode->i_lock);
+               }
+
+               subreq = subreq->wb_this_page;
+       } while (subreq != head);
+
+       /* Now that all requests are locked, make sure they aren't on any list.
+        * Commit list removal accounting is done after locks are dropped */
+       subreq = head;
+       do {
+               nfs_list_remove_request(subreq);
+               subreq = subreq->wb_this_page;
+       } while (subreq != head);
+
+       /* unlink subrequests from head, destroy them later */
+       if (head->wb_this_page != head) {
+               /* destroy list will be terminated by head */
+               destroy_list = head->wb_this_page;
+               head->wb_this_page = head;
+
+               /* change head request to cover whole range that
+                * the former page group covered */
+               head->wb_bytes = total_bytes;
        }
+
+       /*
+        * prepare head request to be added to new pgio descriptor
+        */
+       nfs_page_group_clear_bits(head);
+
+       /*
+        * some part of the group was still on the inode list - otherwise
+        * the group wouldn't be involved in async write.
+        * grab a reference for the head request, iff it needs one.
+        */
+       if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
+               kref_get(&head->wb_kref);
+
+       nfs_page_group_unlock(head);
+
+       /* drop lock to clear_request_commit the head req and clean up
+        * requests on destroy list */
        spin_unlock(&inode->i_lock);
-       return req;
+
+       nfs_destroy_unlinked_subrequests(destroy_list, head);
+
+       /* clean up commit list state */
+       nfs_clear_request_commit(head);
+
+       /* still holds ref on head from nfs_page_find_head_request_locked
+        * and still has lock on head from lock loop */
+       return head;
 }
 
 /*
@@ -316,7 +542,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
        struct nfs_page *req;
        int ret = 0;
 
-       req = nfs_find_and_lock_request(page, nonblock);
+       req = nfs_lock_and_join_requests(page, nonblock);
        if (!req)
                goto out;
        ret = PTR_ERR(req);
@@ -397,7 +623,7 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
        int err;
 
        /* Stop dirtying of new pages while we sync */
-       err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING,
+       err = wait_on_bit_lock_action(bitlock, NFS_INO_FLUSHING,
                        nfs_wait_bit_killable, TASK_KILLABLE);
        if (err)
                goto out_err;
@@ -448,7 +674,9 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
                set_page_private(req->wb_page, (unsigned long)req);
        }
        nfsi->npages++;
-       set_bit(PG_INODE_REF, &req->wb_flags);
+       /* this a head request for a page group - mark it as having an
+        * extra reference so sub groups can follow suit */
+       WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
        kref_get(&req->wb_kref);
        spin_unlock(&inode->i_lock);
 }
@@ -474,7 +702,9 @@ static void nfs_inode_remove_request(struct nfs_page *req)
                nfsi->npages--;
                spin_unlock(&inode->i_lock);
        }
-       nfs_release_request(req);
+
+       if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
+               nfs_release_request(req);
 }
 
 static void
@@ -638,7 +868,6 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
 {
        struct nfs_commit_info cinfo;
        unsigned long bytes = 0;
-       bool do_destroy;
 
        if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
                goto out;
@@ -668,7 +897,6 @@ remove_req:
 next:
                nfs_unlock_request(req);
                nfs_end_page_writeback(req);
-               do_destroy = !test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags);
                nfs_release_request(req);
        }
 out:
@@ -769,7 +997,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
        spin_lock(&inode->i_lock);
 
        for (;;) {
-               req = nfs_page_find_request_locked(NFS_I(inode), page);
+               req = nfs_page_find_head_request_locked(NFS_I(inode), page);
                if (req == NULL)
                        goto out_unlock;
 
@@ -877,7 +1105,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
         * dropped page.
         */
        do {
-               req = nfs_page_find_request(page);
+               req = nfs_page_find_head_request(page);
                if (req == NULL)
                        return 0;
                l_ctx = req->wb_lock_context;
@@ -934,12 +1162,14 @@ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
 
        if (nfs_have_delegated_attributes(inode))
                goto out;
-       if (nfsi->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
+       if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
                return false;
        smp_rmb();
        if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
                return false;
 out:
+       if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
+               return false;
        return PageUptodate(page) != 0;
 }
 
@@ -1473,7 +1703,7 @@ int nfs_commit_inode(struct inode *inode, int how)
                        return error;
                if (!may_wait)
                        goto out_mark_dirty;
-               error = wait_on_bit(&NFS_I(inode)->flags,
+               error = wait_on_bit_action(&NFS_I(inode)->flags,
                                NFS_INO_COMMIT,
                                nfs_wait_bit_killable,
                                TASK_KILLABLE);
@@ -1567,27 +1797,28 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
        struct nfs_page *req;
        int ret = 0;
 
-       for (;;) {
-               wait_on_page_writeback(page);
-               req = nfs_page_find_request(page);
-               if (req == NULL)
-                       break;
-               if (nfs_lock_request(req)) {
-                       nfs_clear_request_commit(req);
-                       nfs_inode_remove_request(req);
-                       /*
-                        * In case nfs_inode_remove_request has marked the
-                        * page as being dirty
-                        */
-                       cancel_dirty_page(page, PAGE_CACHE_SIZE);
-                       nfs_unlock_and_release_request(req);
-                       break;
-               }
-               ret = nfs_wait_on_request(req);
-               nfs_release_request(req);
-               if (ret < 0)
-                       break;
+       wait_on_page_writeback(page);
+
+       /* blocking call to cancel all requests and join to a single (head)
+        * request */
+       req = nfs_lock_and_join_requests(page, false);
+
+       if (IS_ERR(req)) {
+               ret = PTR_ERR(req);
+       } else if (req) {
+               /* all requests from this page have been cancelled by
+                * nfs_lock_and_join_requests, so just remove the head
+                * request from the inode / page_private pointer and
+                * release it */
+               nfs_inode_remove_request(req);
+               /*
+                * In case nfs_inode_remove_request has marked the
+                * page as being dirty
+                */
+               cancel_dirty_page(page, PAGE_CACHE_SIZE);
+               nfs_unlock_and_release_request(req);
        }
+
        return ret;
 }
 
index 6851b00..8f029db 100644 (file)
@@ -617,15 +617,6 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 
        switch (create->cr_type) {
        case NF4LNK:
-               /* ugh! we have to null-terminate the linktext, or
-                * vfs_symlink() will choke.  it is always safe to
-                * null-terminate by brute force, since at worst we
-                * will overwrite the first byte of the create namelen
-                * in the XDR buffer, which has already been extracted
-                * during XDR decode.
-                */
-               create->cr_linkname[create->cr_linklen] = 0;
-
                status = nfsd_symlink(rqstp, &cstate->current_fh,
                                      create->cr_name, create->cr_namelen,
                                      create->cr_linkname, create->cr_linklen,
index c0d45ce..2204e1f 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/ratelimit.h>
 #include <linux/sunrpc/svcauth_gss.h>
 #include <linux/sunrpc/addr.h>
+#include <linux/hash.h>
 #include "xdr4.h"
 #include "xdr4cb.h"
 #include "vfs.h"
@@ -364,6 +365,79 @@ static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
        return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
 }
 
+/*
+ * When we recall a delegation, we should be careful not to hand it
+ * out again straight away.
+ * To ensure this we keep a pair of bloom filters ('new' and 'old')
+ * in which the filehandles of recalled delegations are "stored".
+ * If a filehandle appear in either filter, a delegation is blocked.
+ * When a delegation is recalled, the filehandle is stored in the "new"
+ * filter.
+ * Every 30 seconds we swap the filters and clear the "new" one,
+ * unless both are empty of course.
+ *
+ * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
+ * low 3 bytes as hash-table indices.
+ *
+ * 'state_lock', which is always held when block_delegations() is called,
+ * is used to manage concurrent access.  Testing does not need the lock
+ * except when swapping the two filters.
+ */
+static struct bloom_pair {
+       int     entries, old_entries;
+       time_t  swap_time;
+       int     new; /* index into 'set' */
+       DECLARE_BITMAP(set[2], 256);
+} blocked_delegations;
+
+static int delegation_blocked(struct knfsd_fh *fh)
+{
+       u32 hash;
+       struct bloom_pair *bd = &blocked_delegations;
+
+       if (bd->entries == 0)
+               return 0;
+       if (seconds_since_boot() - bd->swap_time > 30) {
+               spin_lock(&state_lock);
+               if (seconds_since_boot() - bd->swap_time > 30) {
+                       bd->entries -= bd->old_entries;
+                       bd->old_entries = bd->entries;
+                       memset(bd->set[bd->new], 0,
+                              sizeof(bd->set[0]));
+                       bd->new = 1-bd->new;
+                       bd->swap_time = seconds_since_boot();
+               }
+               spin_unlock(&state_lock);
+       }
+       hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
+       if (test_bit(hash&255, bd->set[0]) &&
+           test_bit((hash>>8)&255, bd->set[0]) &&
+           test_bit((hash>>16)&255, bd->set[0]))
+               return 1;
+
+       if (test_bit(hash&255, bd->set[1]) &&
+           test_bit((hash>>8)&255, bd->set[1]) &&
+           test_bit((hash>>16)&255, bd->set[1]))
+               return 1;
+
+       return 0;
+}
+
+static void block_delegations(struct knfsd_fh *fh)
+{
+       u32 hash;
+       struct bloom_pair *bd = &blocked_delegations;
+
+       hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
+
+       __set_bit(hash&255, bd->set[bd->new]);
+       __set_bit((hash>>8)&255, bd->set[bd->new]);
+       __set_bit((hash>>16)&255, bd->set[bd->new]);
+       if (bd->entries == 0)
+               bd->swap_time = seconds_since_boot();
+       bd->entries += 1;
+}
+
 static struct nfs4_delegation *
 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh)
 {
@@ -372,6 +446,8 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv
        dprintk("NFSD alloc_init_deleg\n");
        if (num_delegations > max_delegations)
                return NULL;
+       if (delegation_blocked(&current_fh->fh_handle))
+               return NULL;
        dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
        if (dp == NULL)
                return dp;
@@ -2770,6 +2846,8 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
        /* Only place dl_time is set; protected by i_lock: */
        dp->dl_time = get_seconds();
 
+       block_delegations(&dp->dl_fh);
+
        nfsd4_cb_recall(dp);
 }
 
index 2d305a1..944275c 100644 (file)
@@ -600,7 +600,18 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
                READ_BUF(4);
                create->cr_linklen = be32_to_cpup(p++);
                READ_BUF(create->cr_linklen);
-               SAVEMEM(create->cr_linkname, create->cr_linklen);
+               /*
+                * The VFS will want a null-terminated string, and
+                * null-terminating in place isn't safe since this might
+                * end on a page boundary:
+                */
+               create->cr_linkname =
+                               kmalloc(create->cr_linklen + 1, GFP_KERNEL);
+               if (!create->cr_linkname)
+                       return nfserr_jukebox;
+               memcpy(create->cr_linkname, p, create->cr_linklen);
+               create->cr_linkname[create->cr_linklen] = '\0';
+               defer_free(argp, kfree, create->cr_linkname);
                break;
        case NF4BLK:
        case NF4CHR:
@@ -2630,7 +2641,7 @@ nfsd4_encode_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
 {
        __be32 *p;
 
-       p = xdr_reserve_space(xdr, 6);
+       p = xdr_reserve_space(xdr, 20);
        if (!p)
                return NULL;
        *p++ = htonl(2);
@@ -2687,6 +2698,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
                nfserr = nfserr_toosmall;
                goto fail;
        case nfserr_noent:
+               xdr_truncate_encode(xdr, start_offset);
                goto skip_entry;
        default:
                /*
@@ -2867,6 +2879,7 @@ again:
                 * return the conflicting open:
                 */
                if (conf->len) {
+                       kfree(conf->data);
                        conf->len = 0;
                        conf->data = NULL;
                        goto again;
@@ -2879,6 +2892,7 @@ again:
        if (conf->len) {
                p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
                p = xdr_encode_opaque(p, conf->data, conf->len);
+               kfree(conf->data);
        }  else {  /* non - nfsv4 lock in conflict, no clientid nor owner */
                p = xdr_encode_hyper(p, (u64)0); /* clientid */
                *p++ = cpu_to_be32(0); /* length of owner name */
@@ -2895,7 +2909,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo
                nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
        else if (nfserr == nfserr_denied)
                nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
-       kfree(lock->lk_denied.ld_owner.data);
+
        return nfserr;
 }
 
@@ -3266,7 +3280,7 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd
 
        wire_count = htonl(maxcount);
        write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, 4);
-       xdr_truncate_encode(xdr, length_offset + 4 + maxcount);
+       xdr_truncate_encode(xdr, length_offset + 4 + ALIGN(maxcount, 4));
        if (maxcount & 3)
                write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount,
                                                &zero, 4 - (maxcount&3));
index a106b3f..fae17c6 100644 (file)
@@ -331,6 +331,7 @@ struct dlm_lock_resource
        u16 state;
        char lvb[DLM_LVB_LEN];
        unsigned int inflight_locks;
+       unsigned int inflight_assert_workers;
        unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
 };
 
@@ -910,6 +911,9 @@ void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
 void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
                                   struct dlm_lock_resource *res);
 
+void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res);
+
 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
index 3087a21..82abf0c 100644 (file)
@@ -581,6 +581,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
        atomic_set(&res->asts_reserved, 0);
        res->migration_pending = 0;
        res->inflight_locks = 0;
+       res->inflight_assert_workers = 0;
 
        res->dlm = dlm;
 
@@ -683,6 +684,43 @@ void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
        wake_up(&res->wq);
 }
 
+void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res)
+{
+       assert_spin_locked(&res->spinlock);
+       res->inflight_assert_workers++;
+       mlog(0, "%s:%.*s: inflight assert worker++: now %u\n",
+                       dlm->name, res->lockname.len, res->lockname.name,
+                       res->inflight_assert_workers);
+}
+
+static void dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res)
+{
+       spin_lock(&res->spinlock);
+       __dlm_lockres_grab_inflight_worker(dlm, res);
+       spin_unlock(&res->spinlock);
+}
+
+static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res)
+{
+       assert_spin_locked(&res->spinlock);
+       BUG_ON(res->inflight_assert_workers == 0);
+       res->inflight_assert_workers--;
+       mlog(0, "%s:%.*s: inflight assert worker--: now %u\n",
+                       dlm->name, res->lockname.len, res->lockname.name,
+                       res->inflight_assert_workers);
+}
+
+static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res)
+{
+       spin_lock(&res->spinlock);
+       __dlm_lockres_drop_inflight_worker(dlm, res);
+       spin_unlock(&res->spinlock);
+}
+
 /*
  * lookup a lock resource by name.
  * may already exist in the hashtable.
@@ -1603,7 +1641,8 @@ send_response:
                        mlog(ML_ERROR, "failed to dispatch assert master work\n");
                        response = DLM_MASTER_RESP_ERROR;
                        dlm_lockres_put(res);
-               }
+               } else
+                       dlm_lockres_grab_inflight_worker(dlm, res);
        } else {
                if (res)
                        dlm_lockres_put(res);
@@ -2118,6 +2157,8 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
        dlm_lockres_release_ast(dlm, res);
 
 put:
+       dlm_lockres_drop_inflight_worker(dlm, res);
+
        dlm_lockres_put(res);
 
        mlog(0, "finished with dlm_assert_master_worker\n");
@@ -3088,11 +3129,15 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
                        /* remove it so that only one mle will be found */
                        __dlm_unlink_mle(dlm, tmp);
                        __dlm_mle_detach_hb_events(dlm, tmp);
-                       ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
-                       mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
-                           "telling master to get ref for cleared out mle "
-                           "during migration\n", dlm->name, namelen, name,
-                           master, new_master);
+                       if (tmp->type == DLM_MLE_MASTER) {
+                               ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
+                               mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
+                                               "telling master to get ref "
+                                               "for cleared out mle during "
+                                               "migration\n", dlm->name,
+                                               namelen, name, master,
+                                               new_master);
+                       }
                }
                spin_unlock(&tmp->spinlock);
        }
index 5de0194..45067fa 100644 (file)
@@ -1708,7 +1708,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
                                mlog_errno(-ENOMEM);
                                /* retry!? */
                                BUG();
-                       }
+                       } else
+                               __dlm_lockres_grab_inflight_worker(dlm, res);
                } else /* put.. incase we are not the master */
                        dlm_lockres_put(res);
                spin_unlock(&res->spinlock);
index 9db869d..69aac6f 100644 (file)
@@ -259,12 +259,15 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
                 * refs on it. */
                unused = __dlm_lockres_unused(lockres);
                if (!unused ||
-                   (lockres->state & DLM_LOCK_RES_MIGRATING)) {
+                   (lockres->state & DLM_LOCK_RES_MIGRATING) ||
+                   (lockres->inflight_assert_workers != 0)) {
                        mlog(0, "%s: res %.*s is in use or being remastered, "
-                            "used %d, state %d\n", dlm->name,
-                            lockres->lockname.len, lockres->lockname.name,
-                            !unused, lockres->state);
-                       list_move_tail(&dlm->purge_list, &lockres->purge);
+                            "used %d, state %d, assert master workers %u\n",
+                            dlm->name, lockres->lockname.len,
+                            lockres->lockname.name,
+                            !unused, lockres->state,
+                            lockres->inflight_assert_workers);
+                       list_move_tail(&lockres->purge, &dlm->purge_list);
                        spin_unlock(&lockres->spinlock);
                        continue;
                }
index 5698b52..2e3c9db 100644 (file)
@@ -191,7 +191,9 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
                                     DLM_UNLOCK_CLEAR_CONVERT_TYPE);
                } else if (status == DLM_RECOVERING ||
                           status == DLM_MIGRATING ||
-                          status == DLM_FORWARD) {
+                          status == DLM_FORWARD ||
+                          status == DLM_NOLOCKMGR
+                          ) {
                        /* must clear the actions because this unlock
                         * is about to be retried.  cannot free or do
                         * any list manipulation. */
@@ -200,7 +202,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
                             res->lockname.name,
                             status==DLM_RECOVERING?"recovering":
                             (status==DLM_MIGRATING?"migrating":
-                             "forward"));
+                               (status == DLM_FORWARD ? "forward" :
+                                               "nolockmanager")));
                        actions = 0;
                }
                if (flags & LKM_CANCEL)
@@ -364,7 +367,10 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
                         * updated state to the recovery master.  this thread
                         * just needs to finish out the operation and call
                         * the unlockast. */
-                       ret = DLM_NORMAL;
+                       if (dlm_is_node_dead(dlm, owner))
+                               ret = DLM_NORMAL;
+                       else
+                               ret = DLM_NOLOCKMGR;
                } else {
                        /* something bad.  this will BUG in ocfs2 */
                        ret = dlm_err_to_dlm_status(tmpret);
@@ -638,7 +644,9 @@ retry:
 
        if (status == DLM_RECOVERING ||
            status == DLM_MIGRATING ||
-           status == DLM_FORWARD) {
+           status == DLM_FORWARD ||
+           status == DLM_NOLOCKMGR) {
+
                /* We want to go away for a tiny bit to allow recovery
                 * / migration to complete on this resource. I don't
                 * know of any wait queue we could sleep on as this
@@ -650,7 +658,7 @@ retry:
                msleep(50);
 
                mlog(0, "retrying unlock due to pending recovery/"
-                    "migration/in-progress\n");
+                    "migration/in-progress/reconnect\n");
                goto retry;
        }
 
index 2060fc3..8add6f1 100644 (file)
@@ -205,6 +205,21 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
        return inode;
 }
 
+static void ocfs2_cleanup_add_entry_failure(struct ocfs2_super *osb,
+               struct dentry *dentry, struct inode *inode)
+{
+       struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
+
+       ocfs2_simple_drop_lockres(osb, &dl->dl_lockres);
+       ocfs2_lock_res_free(&dl->dl_lockres);
+       BUG_ON(dl->dl_count != 1);
+       spin_lock(&dentry_attach_lock);
+       dentry->d_fsdata = NULL;
+       spin_unlock(&dentry_attach_lock);
+       kfree(dl);
+       iput(inode);
+}
+
 static int ocfs2_mknod(struct inode *dir,
                       struct dentry *dentry,
                       umode_t mode,
@@ -231,6 +246,7 @@ static int ocfs2_mknod(struct inode *dir,
        sigset_t oldset;
        int did_block_signals = 0;
        struct posix_acl *default_acl = NULL, *acl = NULL;
+       struct ocfs2_dentry_lock *dl = NULL;
 
        trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
                          (unsigned long long)OCFS2_I(dir)->ip_blkno,
@@ -423,6 +439,8 @@ static int ocfs2_mknod(struct inode *dir,
                goto leave;
        }
 
+       dl = dentry->d_fsdata;
+
        status = ocfs2_add_entry(handle, dentry, inode,
                                 OCFS2_I(inode)->ip_blkno, parent_fe_bh,
                                 &lookup);
@@ -469,6 +487,9 @@ leave:
         * ocfs2_delete_inode will mutex_lock again.
         */
        if ((status < 0) && inode) {
+               if (dl)
+                       ocfs2_cleanup_add_entry_failure(osb, dentry, inode);
+
                OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR;
                clear_nlink(inode);
                iput(inode);
@@ -991,6 +1012,65 @@ leave:
        return status;
 }
 
+static int ocfs2_check_if_ancestor(struct ocfs2_super *osb,
+               u64 src_inode_no, u64 dest_inode_no)
+{
+       int ret = 0, i = 0;
+       u64 parent_inode_no = 0;
+       u64 child_inode_no = src_inode_no;
+       struct inode *child_inode;
+
+#define MAX_LOOKUP_TIMES 32
+       while (1) {
+               child_inode = ocfs2_iget(osb, child_inode_no, 0, 0);
+               if (IS_ERR(child_inode)) {
+                       ret = PTR_ERR(child_inode);
+                       break;
+               }
+
+               ret = ocfs2_inode_lock(child_inode, NULL, 0);
+               if (ret < 0) {
+                       iput(child_inode);
+                       if (ret != -ENOENT)
+                               mlog_errno(ret);
+                       break;
+               }
+
+               ret = ocfs2_lookup_ino_from_name(child_inode, "..", 2,
+                               &parent_inode_no);
+               ocfs2_inode_unlock(child_inode, 0);
+               iput(child_inode);
+               if (ret < 0) {
+                       ret = -ENOENT;
+                       break;
+               }
+
+               if (parent_inode_no == dest_inode_no) {
+                       ret = 1;
+                       break;
+               }
+
+               if (parent_inode_no == osb->root_inode->i_ino) {
+                       ret = 0;
+                       break;
+               }
+
+               child_inode_no = parent_inode_no;
+
+               if (++i >= MAX_LOOKUP_TIMES) {
+                       mlog(ML_NOTICE, "max lookup times reached, filesystem "
+                                       "may have nested directories, "
+                                       "src inode: %llu, dest inode: %llu.\n",
+                                       (unsigned long long)src_inode_no,
+                                       (unsigned long long)dest_inode_no);
+                       ret = 0;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
 /*
  * The only place this should be used is rename!
  * if they have the same id, then the 1st one is the only one locked.
@@ -1002,6 +1082,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
                             struct inode *inode2)
 {
        int status;
+       int inode1_is_ancestor, inode2_is_ancestor;
        struct ocfs2_inode_info *oi1 = OCFS2_I(inode1);
        struct ocfs2_inode_info *oi2 = OCFS2_I(inode2);
        struct buffer_head **tmpbh;
@@ -1015,9 +1096,26 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
        if (*bh2)
                *bh2 = NULL;
 
-       /* we always want to lock the one with the lower lockid first. */
+       /* we always want to lock the one with the lower lockid first.
+        * and if they are nested, we lock ancestor first */
        if (oi1->ip_blkno != oi2->ip_blkno) {
-               if (oi1->ip_blkno < oi2->ip_blkno) {
+               inode1_is_ancestor = ocfs2_check_if_ancestor(osb, oi2->ip_blkno,
+                               oi1->ip_blkno);
+               if (inode1_is_ancestor < 0) {
+                       status = inode1_is_ancestor;
+                       goto bail;
+               }
+
+               inode2_is_ancestor = ocfs2_check_if_ancestor(osb, oi1->ip_blkno,
+                               oi2->ip_blkno);
+               if (inode2_is_ancestor < 0) {
+                       status = inode2_is_ancestor;
+                       goto bail;
+               }
+
+               if ((inode1_is_ancestor == 1) ||
+                               (oi1->ip_blkno < oi2->ip_blkno &&
+                               inode2_is_ancestor == 0)) {
                        /* switch id1 and id2 around */
                        tmpbh = bh2;
                        bh2 = bh1;
@@ -1098,6 +1196,7 @@ static int ocfs2_rename(struct inode *old_dir,
        struct ocfs2_dir_lookup_result old_entry_lookup = { NULL, };
        struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
        struct ocfs2_dir_lookup_result target_insert = { NULL, };
+       bool should_add_orphan = false;
 
        /* At some point it might be nice to break this function up a
         * bit. */
@@ -1134,6 +1233,21 @@ static int ocfs2_rename(struct inode *old_dir,
                        goto bail;
                }
                rename_lock = 1;
+
+               /* here we cannot guarantee the inodes haven't just been
+                * changed, so check if they are nested again */
+               status = ocfs2_check_if_ancestor(osb, new_dir->i_ino,
+                               old_inode->i_ino);
+               if (status < 0) {
+                       mlog_errno(status);
+                       goto bail;
+               } else if (status == 1) {
+                       status = -EPERM;
+                       trace_ocfs2_rename_not_permitted(
+                                       (unsigned long long)old_inode->i_ino,
+                                       (unsigned long long)new_dir->i_ino);
+                       goto bail;
+               }
        }
 
        /* if old and new are the same, this'll just do one lock. */
@@ -1304,6 +1418,7 @@ static int ocfs2_rename(struct inode *old_dir,
                                mlog_errno(status);
                                goto bail;
                        }
+                       should_add_orphan = true;
                }
        } else {
                BUG_ON(new_dentry->d_parent->d_inode != new_dir);
@@ -1348,17 +1463,6 @@ static int ocfs2_rename(struct inode *old_dir,
                        goto bail;
                }
 
-               if (S_ISDIR(new_inode->i_mode) ||
-                   (ocfs2_read_links_count(newfe) == 1)) {
-                       status = ocfs2_orphan_add(osb, handle, new_inode,
-                                                 newfe_bh, orphan_name,
-                                                 &orphan_insert, orphan_dir);
-                       if (status < 0) {
-                               mlog_errno(status);
-                               goto bail;
-                       }
-               }
-
                /* change the dirent to point to the correct inode */
                status = ocfs2_update_entry(new_dir, handle, &target_lookup_res,
                                            old_inode);
@@ -1373,6 +1477,15 @@ static int ocfs2_rename(struct inode *old_dir,
                else
                        ocfs2_add_links_count(newfe, -1);
                ocfs2_journal_dirty(handle, newfe_bh);
+               if (should_add_orphan) {
+                       status = ocfs2_orphan_add(osb, handle, new_inode,
+                                       newfe_bh, orphan_name,
+                                       &orphan_insert, orphan_dir);
+                       if (status < 0) {
+                               mlog_errno(status);
+                               goto bail;
+                       }
+               }
        } else {
                /* if the name was not found in new_dir, add it now */
                status = ocfs2_add_entry(handle, new_dentry, old_inode,
@@ -1642,6 +1755,7 @@ static int ocfs2_symlink(struct inode *dir,
        struct ocfs2_dir_lookup_result lookup = { NULL, };
        sigset_t oldset;
        int did_block_signals = 0;
+       struct ocfs2_dentry_lock *dl = NULL;
 
        trace_ocfs2_symlink_begin(dir, dentry, symname,
                                  dentry->d_name.len, dentry->d_name.name);
@@ -1830,6 +1944,8 @@ static int ocfs2_symlink(struct inode *dir,
                goto bail;
        }
 
+       dl = dentry->d_fsdata;
+
        status = ocfs2_add_entry(handle, dentry, inode,
                                 le64_to_cpu(fe->i_blkno), parent_fe_bh,
                                 &lookup);
@@ -1864,6 +1980,9 @@ bail:
        if (xattr_ac)
                ocfs2_free_alloc_context(xattr_ac);
        if ((status < 0) && inode) {
+               if (dl)
+                       ocfs2_cleanup_add_entry_failure(osb, dentry, inode);
+
                OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR;
                clear_nlink(inode);
                iput(inode);
index 1b60c62..6cb019b 100644 (file)
@@ -2292,6 +2292,8 @@ TRACE_EVENT(ocfs2_rename,
                  __entry->new_len, __get_str(new_name))
 );
 
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_rename_not_permitted);
+
 TRACE_EVENT(ocfs2_rename_target_exists,
        TP_PROTO(int new_len, const char *new_name),
        TP_ARGS(new_len, new_name),
index 714e53b..636aab6 100644 (file)
@@ -4288,9 +4288,16 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
                goto out;
        }
 
+       error = ocfs2_rw_lock(inode, 1);
+       if (error) {
+               mlog_errno(error);
+               goto out;
+       }
+
        error = ocfs2_inode_lock(inode, &old_bh, 1);
        if (error) {
                mlog_errno(error);
+               ocfs2_rw_unlock(inode, 1);
                goto out;
        }
 
@@ -4302,6 +4309,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
        up_write(&OCFS2_I(inode)->ip_xattr_sem);
 
        ocfs2_inode_unlock(inode, 1);
+       ocfs2_rw_unlock(inode, 1);
        brelse(old_bh);
 
        if (error) {
index c7a89ce..ddb662b 100644 (file)
@@ -1925,15 +1925,11 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
 
        ocfs2_shutdown_local_alloc(osb);
 
+       ocfs2_truncate_log_shutdown(osb);
+
        /* This will disable recovery and flush any recovery work. */
        ocfs2_recovery_exit(osb);
 
-       /*
-        * During dismount, when it recovers another node it will call
-        * ocfs2_recover_orphans and queue delayed work osb_truncate_log_wq.
-        */
-       ocfs2_truncate_log_shutdown(osb);
-
        ocfs2_journal_shutdown(osb);
 
        ocfs2_sync_blockdev(sb);
index 36662d0..d6fd3ac 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -263,11 +263,10 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
                return -EPERM;
 
        /*
-        * We can not allow to do any fallocate operation on an active
-        * swapfile
+        * We cannot allow any fallocate operation on an active swapfile
         */
        if (IS_SWAPFILE(inode))
-               ret = -ETXTBSY;
+               return -ETXTBSY;
 
        /*
         * Revalidate the write permissions, in case security policy has
index 9d231e9..bf2d03f 100644 (file)
@@ -184,29 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
 
 static int stat_open(struct inode *inode, struct file *file)
 {
-       size_t size = 1024 + 128 * num_possible_cpus();
-       char *buf;
-       struct seq_file *m;
-       int res;
+       size_t size = 1024 + 128 * num_online_cpus();
 
        /* minimum size to display an interrupt count : 2 bytes */
        size += 2 * nr_irqs;
-
-       /* don't ask for more than the kmalloc() max size */
-       if (size > KMALLOC_MAX_SIZE)
-               size = KMALLOC_MAX_SIZE;
-       buf = kmalloc(size, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       res = single_open(file, show_stat, NULL);
-       if (!res) {
-               m = file->private_data;
-               m->buf = buf;
-               m->size = ksize(buf);
-       } else
-               kfree(buf);
-       return res;
+       return single_open_size(file, show_stat, NULL, size);
 }
 
 static const struct file_operations proc_stat_operations = {
index 9cd5f63..7f30bdc 100644 (file)
@@ -702,6 +702,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        struct dquot *dquot;
        unsigned long freed = 0;
 
+       spin_lock(&dq_list_lock);
        head = free_dquots.prev;
        while (head != &free_dquots && sc->nr_to_scan) {
                dquot = list_entry(head, struct dquot, dq_free);
@@ -713,6 +714,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                freed++;
                head = free_dquots.prev;
        }
+       spin_unlock(&dq_list_lock);
        return freed;
 }
 
index 1d641bb..3857b72 100644 (file)
@@ -8,8 +8,10 @@
 #include <linux/fs.h>
 #include <linux/export.h>
 #include <linux/seq_file.h>
+#include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/cred.h>
+#include <linux/mm.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -30,6 +32,16 @@ static void seq_set_overflow(struct seq_file *m)
        m->count = m->size;
 }
 
+static void *seq_buf_alloc(unsigned long size)
+{
+       void *buf;
+
+       buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
+       if (!buf && size > PAGE_SIZE)
+               buf = vmalloc(size);
+       return buf;
+}
+
 /**
  *     seq_open -      initialize sequential file
  *     @file: file we initialize
@@ -96,7 +108,7 @@ static int traverse(struct seq_file *m, loff_t offset)
                return 0;
        }
        if (!m->buf) {
-               m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
+               m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
                if (!m->buf)
                        return -ENOMEM;
        }
@@ -135,9 +147,9 @@ static int traverse(struct seq_file *m, loff_t offset)
 
 Eoverflow:
        m->op->stop(m, p);
-       kfree(m->buf);
+       kvfree(m->buf);
        m->count = 0;
-       m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+       m->buf = seq_buf_alloc(m->size <<= 1);
        return !m->buf ? -ENOMEM : -EAGAIN;
 }
 
@@ -192,7 +204,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
 
        /* grab buffer if we didn't have one */
        if (!m->buf) {
-               m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
+               m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
                if (!m->buf)
                        goto Enomem;
        }
@@ -232,9 +244,9 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
                if (m->count < m->size)
                        goto Fill;
                m->op->stop(m, p);
-               kfree(m->buf);
+               kvfree(m->buf);
                m->count = 0;
-               m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+               m->buf = seq_buf_alloc(m->size <<= 1);
                if (!m->buf)
                        goto Enomem;
                m->version = 0;
@@ -350,7 +362,7 @@ EXPORT_SYMBOL(seq_lseek);
 int seq_release(struct inode *inode, struct file *file)
 {
        struct seq_file *m = file->private_data;
-       kfree(m->buf);
+       kvfree(m->buf);
        kfree(m);
        return 0;
 }
@@ -605,13 +617,13 @@ EXPORT_SYMBOL(single_open);
 int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
                void *data, size_t size)
 {
-       char *buf = kmalloc(size, GFP_KERNEL);
+       char *buf = seq_buf_alloc(size);
        int ret;
        if (!buf)
                return -ENOMEM;
        ret = single_open(file, show, data);
        if (ret) {
-               kfree(buf);
+               kvfree(buf);
                return ret;
        }
        ((struct seq_file *)file->private_data)->buf = buf;
index 3377dff..c69e6d4 100644 (file)
@@ -843,7 +843,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
 
        /* wrap around? */
        len = sizeof(*new_xattr) + size;
-       if (len <= sizeof(*new_xattr))
+       if (len < sizeof(*new_xattr))
                return NULL;
 
        new_xattr = kmalloc(len, GFP_KERNEL);
index 96175df..75c3fe5 100644 (file)
@@ -4298,8 +4298,8 @@ xfs_bmapi_delay(
 }
 
 
-int
-__xfs_bmapi_allocate(
+static int
+xfs_bmapi_allocate(
        struct xfs_bmalloca     *bma)
 {
        struct xfs_mount        *mp = bma->ip->i_mount;
@@ -4578,9 +4578,6 @@ xfs_bmapi_write(
        bma.flist = flist;
        bma.firstblock = firstblock;
 
-       if (flags & XFS_BMAPI_STACK_SWITCH)
-               bma.stack_switch = 1;
-
        while (bno < end && n < *nmap) {
                inhole = eof || bma.got.br_startoff > bno;
                wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
index 38ba36e..b879ca5 100644 (file)
@@ -77,7 +77,6 @@ typedef       struct xfs_bmap_free
  * from written to unwritten, otherwise convert from unwritten to written.
  */
 #define XFS_BMAPI_CONVERT      0x040
-#define XFS_BMAPI_STACK_SWITCH 0x080
 
 #define XFS_BMAPI_FLAGS \
        { XFS_BMAPI_ENTIRE,     "ENTIRE" }, \
@@ -86,8 +85,7 @@ typedef       struct xfs_bmap_free
        { XFS_BMAPI_PREALLOC,   "PREALLOC" }, \
        { XFS_BMAPI_IGSTATE,    "IGSTATE" }, \
        { XFS_BMAPI_CONTIG,     "CONTIG" }, \
-       { XFS_BMAPI_CONVERT,    "CONVERT" }, \
-       { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" }
+       { XFS_BMAPI_CONVERT,    "CONVERT" }
 
 
 static inline int xfs_bmapi_aflag(int w)
index 703b3ec..64731ef 100644 (file)
@@ -248,59 +248,6 @@ xfs_bmap_rtalloc(
        return 0;
 }
 
-/*
- * Stack switching interfaces for allocation
- */
-static void
-xfs_bmapi_allocate_worker(
-       struct work_struct      *work)
-{
-       struct xfs_bmalloca     *args = container_of(work,
-                                               struct xfs_bmalloca, work);
-       unsigned long           pflags;
-       unsigned long           new_pflags = PF_FSTRANS;
-
-       /*
-        * we are in a transaction context here, but may also be doing work
-        * in kswapd context, and hence we may need to inherit that state
-        * temporarily to ensure that we don't block waiting for memory reclaim
-        * in any way.
-        */
-       if (args->kswapd)
-               new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
-
-       current_set_flags_nested(&pflags, new_pflags);
-
-       args->result = __xfs_bmapi_allocate(args);
-       complete(args->done);
-
-       current_restore_flags_nested(&pflags, new_pflags);
-}
-
-/*
- * Some allocation requests often come in with little stack to work on. Push
- * them off to a worker thread so there is lots of stack to use. Otherwise just
- * call directly to avoid the context switch overhead here.
- */
-int
-xfs_bmapi_allocate(
-       struct xfs_bmalloca     *args)
-{
-       DECLARE_COMPLETION_ONSTACK(done);
-
-       if (!args->stack_switch)
-               return __xfs_bmapi_allocate(args);
-
-
-       args->done = &done;
-       args->kswapd = current_is_kswapd();
-       INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
-       queue_work(xfs_alloc_wq, &args->work);
-       wait_for_completion(&done);
-       destroy_work_on_stack(&args->work);
-       return args->result;
-}
-
 /*
  * Check if the endoff is outside the last extent. If so the caller will grow
  * the allocation to a stripe unit boundary.  All offsets are considered outside
index 075f722..2fdb72d 100644 (file)
@@ -55,8 +55,6 @@ struct xfs_bmalloca {
        bool                    userdata;/* set if is user data */
        bool                    aeof;   /* allocated space at eof */
        bool                    conv;   /* overwriting unwritten extents */
-       bool                    stack_switch;
-       bool                    kswapd; /* allocation in kswapd context */
        int                     flags;
        struct completion       *done;
        struct work_struct      work;
@@ -66,8 +64,6 @@ struct xfs_bmalloca {
 int    xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
                        int *committed);
 int    xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
-int    xfs_bmapi_allocate(struct xfs_bmalloca *args);
-int    __xfs_bmapi_allocate(struct xfs_bmalloca *args);
 int    xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
                     int whichfork, int *eof);
 int    xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
index bf810c6..cf893bc 100644 (file)
@@ -33,6 +33,7 @@
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_alloc.h"
 
 /*
  * Cursor allocation zone.
@@ -2323,7 +2324,7 @@ error1:
  * record (to be inserted into parent).
  */
 STATIC int                                     /* error */
-xfs_btree_split(
+__xfs_btree_split(
        struct xfs_btree_cur    *cur,
        int                     level,
        union xfs_btree_ptr     *ptrp,
@@ -2503,6 +2504,85 @@ error0:
        return error;
 }
 
+struct xfs_btree_split_args {
+       struct xfs_btree_cur    *cur;
+       int                     level;
+       union xfs_btree_ptr     *ptrp;
+       union xfs_btree_key     *key;
+       struct xfs_btree_cur    **curp;
+       int                     *stat;          /* success/failure */
+       int                     result;
+       bool                    kswapd; /* allocation in kswapd context */
+       struct completion       *done;
+       struct work_struct      work;
+};
+
+/*
+ * Stack switching interfaces for allocation
+ */
+static void
+xfs_btree_split_worker(
+       struct work_struct      *work)
+{
+       struct xfs_btree_split_args     *args = container_of(work,
+                                               struct xfs_btree_split_args, work);
+       unsigned long           pflags;
+       unsigned long           new_pflags = PF_FSTRANS;
+
+       /*
+        * we are in a transaction context here, but may also be doing work
+        * in kswapd context, and hence we may need to inherit that state
+        * temporarily to ensure that we don't block waiting for memory reclaim
+        * in any way.
+        */
+       if (args->kswapd)
+               new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
+
+       current_set_flags_nested(&pflags, new_pflags);
+
+       args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
+                                        args->key, args->curp, args->stat);
+       complete(args->done);
+
+       current_restore_flags_nested(&pflags, new_pflags);
+}
+
+/*
+ * BMBT split requests often come in with little stack to work on. Push
+ * them off to a worker thread so there is lots of stack to use. For the other
+ * btree types, just call directly to avoid the context switch overhead here.
+ */
+STATIC int                                     /* error */
+xfs_btree_split(
+       struct xfs_btree_cur    *cur,
+       int                     level,
+       union xfs_btree_ptr     *ptrp,
+       union xfs_btree_key     *key,
+       struct xfs_btree_cur    **curp,
+       int                     *stat)          /* success/failure */
+{
+       struct xfs_btree_split_args     args;
+       DECLARE_COMPLETION_ONSTACK(done);
+
+       if (cur->bc_btnum != XFS_BTNUM_BMAP)
+               return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
+
+       args.cur = cur;
+       args.level = level;
+       args.ptrp = ptrp;
+       args.key = key;
+       args.curp = curp;
+       args.stat = stat;
+       args.done = &done;
+       args.kswapd = current_is_kswapd();
+       INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
+       queue_work(xfs_alloc_wq, &args.work);
+       wait_for_completion(&done);
+       destroy_work_on_stack(&args.work);
+       return args.result;
+}
+
+
 /*
  * Copy the old inode root contents into a real block and make the
  * broot point to it.
index 6c5eb4c..6d3ec2b 100644 (file)
@@ -749,8 +749,7 @@ xfs_iomap_write_allocate(
                         * pointer that the caller gave to us.
                         */
                        error = xfs_bmapi_write(tp, ip, map_start_fsb,
-                                               count_fsb,
-                                               XFS_BMAPI_STACK_SWITCH,
+                                               count_fsb, 0,
                                                &first_block, 1,
                                                imap, &nimaps, &free_list);
                        if (error)
index c3453b1..7703fa6 100644 (file)
@@ -483,10 +483,16 @@ xfs_sb_quota_to_disk(
        }
 
        /*
-        * GQUOTINO and PQUOTINO cannot be used together in versions
-        * of superblock that do not have pquotino. from->sb_flags
-        * tells us which quota is active and should be copied to
-        * disk.
+        * GQUOTINO and PQUOTINO cannot be used together in versions of
+        * superblock that do not have pquotino. from->sb_flags tells us which
+        * quota is active and should be copied to disk. If neither are active,
+        * make sure we write NULLFSINO to the sb_gquotino field as a quota
+        * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
+        * bit is set.
+        *
+        * Note that we don't need to handle the sb_uquotino or sb_pquotino here
+        * as they do not require any translation. Hence the main sb field loop
+        * will write them appropriately from the in-core superblock.
         */
        if ((*fields & XFS_SB_GQUOTINO) &&
                                (from->sb_qflags & XFS_GQUOTA_ACCT))
@@ -494,6 +500,17 @@ xfs_sb_quota_to_disk(
        else if ((*fields & XFS_SB_PQUOTINO) &&
                                (from->sb_qflags & XFS_PQUOTA_ACCT))
                to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
+       else {
+               /*
+                * We can't rely on just the fields being logged to tell us
+                * that it is safe to write NULLFSINO - we should only do that
+                * if quotas are not actually enabled. Hence only write
+                * NULLFSINO if both in-core quota inodes are NULL.
+                */
+               if (from->sb_gquotino == NULLFSINO &&
+                   from->sb_pquotino == NULLFSINO)
+                       to->sb_gquotino = cpu_to_be64(NULLFSINO);
+       }
 
        *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
 }
index 6eb1d3c..9b9b6f2 100644 (file)
@@ -53,7 +53,7 @@ struct acpi_power_register {
        u8 bit_offset;
        u8 access_size;
        u64 address;
-} __attribute__ ((packed));
+} __packed;
 
 struct acpi_processor_cx {
        u8 valid;
@@ -83,7 +83,7 @@ struct acpi_psd_package {
        u64 domain;
        u64 coord_type;
        u64 num_processors;
-} __attribute__ ((packed));
+} __packed;
 
 struct acpi_pct_register {
        u8 descriptor;
@@ -93,7 +93,7 @@ struct acpi_pct_register {
        u8 bit_offset;
        u8 reserved;
        u64 address;
-} __attribute__ ((packed));
+} __packed;
 
 struct acpi_processor_px {
        u64 core_frequency;     /* megahertz */
@@ -124,7 +124,7 @@ struct acpi_tsd_package {
        u64 domain;
        u64 coord_type;
        u64 num_processors;
-} __attribute__ ((packed));
+} __packed;
 
 struct acpi_ptc_register {
        u8 descriptor;
@@ -134,7 +134,7 @@ struct acpi_ptc_register {
        u8 bit_offset;
        u8 reserved;
        u64 address;
-} __attribute__ ((packed));
+} __packed;
 
 struct acpi_processor_tx_tss {
        u64 freqpercentage;     /* */
index ea4c7bb..843ef1a 100644 (file)
@@ -22,6 +22,7 @@ extern void acpi_video_unregister(void);
 extern void acpi_video_unregister_backlight(void);
 extern int acpi_video_get_edid(struct acpi_device *device, int type,
                               int device_id, void **edid);
+extern bool acpi_video_verify_backlight_support(void);
 #else
 static inline int acpi_video_register(void) { return 0; }
 static inline void acpi_video_unregister(void) { return; }
@@ -31,6 +32,7 @@ static inline int acpi_video_get_edid(struct acpi_device *device, int type,
 {
        return -ENODEV;
 }
+static inline bool acpi_video_verify_backlight_support(void) { return false; }
 #endif
 
 #endif
index a6806a9..2e29d13 100644 (file)
@@ -4,8 +4,7 @@
 #include <linux/io.h>
 #include <asm-generic/int-ll64.h>
 
-#ifndef readq
-static inline __u64 readq(const volatile void __iomem *addr)
+static inline __u64 hi_lo_readq(const volatile void __iomem *addr)
 {
        const volatile u32 __iomem *p = addr;
        u32 low, high;
@@ -15,14 +14,19 @@ static inline __u64 readq(const volatile void __iomem *addr)
 
        return low + ((u64)high << 32);
 }
-#endif
 
-#ifndef writeq
-static inline void writeq(__u64 val, volatile void __iomem *addr)
+static inline void hi_lo_writeq(__u64 val, volatile void __iomem *addr)
 {
        writel(val >> 32, addr + 4);
        writel(val, addr);
 }
+
+#ifndef readq
+#define readq hi_lo_readq
+#endif
+
+#ifndef writeq
+#define writeq hi_lo_writeq
 #endif
 
 #endif /* _ASM_IO_64_NONATOMIC_HI_LO_H_ */
index ca546b1..0efacff 100644 (file)
@@ -4,8 +4,7 @@
 #include <linux/io.h>
 #include <asm-generic/int-ll64.h>
 
-#ifndef readq
-static inline __u64 readq(const volatile void __iomem *addr)
+static inline __u64 lo_hi_readq(const volatile void __iomem *addr)
 {
        const volatile u32 __iomem *p = addr;
        u32 low, high;
@@ -15,14 +14,19 @@ static inline __u64 readq(const volatile void __iomem *addr)
 
        return low + ((u64)high << 32);
 }
-#endif
 
-#ifndef writeq
-static inline void writeq(__u64 val, volatile void __iomem *addr)
+static inline void lo_hi_writeq(__u64 val, volatile void __iomem *addr)
 {
        writel(val, addr);
        writel(val >> 32, addr + 4);
 }
+
+#ifndef readq
+#define readq lo_hi_readq
+#endif
+
+#ifndef writeq
+#define writeq lo_hi_writeq
 #endif
 
 #endif /* _ASM_IO_64_NONATOMIC_LO_HI_H_ */
index 0703aa7..4d9f233 100644 (file)
@@ -36,93 +36,385 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
 #endif
 
 /*
- * Add a offset to a pointer but keep the pointer as is.
- *
- * Only S390 provides its own means of moving the pointer.
+ * Arch may define arch_raw_cpu_ptr() to provide more efficient address
+ * translations for raw_cpu_ptr().
  */
-#ifndef SHIFT_PERCPU_PTR
-/* Weird cast keeps both GCC and sparse happy. */
-#define SHIFT_PERCPU_PTR(__p, __offset)        ({                              \
-       __verify_pcpu_ptr((__p));                                       \
-       RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)); \
-})
+#ifndef arch_raw_cpu_ptr
+#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
 #endif
 
-/*
- * A percpu variable may point to a discarded regions. The following are
- * established ways to produce a usable pointer from the percpu variable
- * offset.
- */
-#define per_cpu(var, cpu) \
-       (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu)))
-
-#ifndef raw_cpu_ptr
-#define raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+extern void setup_per_cpu_areas(void);
 #endif
-#ifdef CONFIG_DEBUG_PREEMPT
-#define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset)
+
+#endif /* SMP */
+
+#ifndef PER_CPU_BASE_SECTION
+#ifdef CONFIG_SMP
+#define PER_CPU_BASE_SECTION ".data..percpu"
 #else
-#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
+#define PER_CPU_BASE_SECTION ".data"
+#endif
 #endif
 
-#define __get_cpu_var(var) (*this_cpu_ptr(&(var)))
-#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
+#ifndef PER_CPU_ATTRIBUTES
+#define PER_CPU_ATTRIBUTES
+#endif
 
-#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
-extern void setup_per_cpu_areas(void);
+#ifndef PER_CPU_DEF_ATTRIBUTES
+#define PER_CPU_DEF_ATTRIBUTES
 #endif
 
-#else /* ! SMP */
+#define raw_cpu_generic_to_op(pcp, val, op)                            \
+do {                                                                   \
+       *raw_cpu_ptr(&(pcp)) op val;                                    \
+} while (0)
 
-#define VERIFY_PERCPU_PTR(__p) ({                      \
-       __verify_pcpu_ptr((__p));                       \
-       (typeof(*(__p)) __kernel __force *)(__p);       \
+#define raw_cpu_generic_add_return(pcp, val)                           \
+({                                                                     \
+       raw_cpu_add(pcp, val);                                          \
+       raw_cpu_read(pcp);                                              \
 })
 
-#define per_cpu(var, cpu)      (*((void)(cpu), VERIFY_PERCPU_PTR(&(var))))
-#define __get_cpu_var(var)     (*VERIFY_PERCPU_PTR(&(var)))
-#define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var)))
-#define this_cpu_ptr(ptr)      per_cpu_ptr(ptr, 0)
-#define raw_cpu_ptr(ptr)       this_cpu_ptr(ptr)
+#define raw_cpu_generic_xchg(pcp, nval)                                        \
+({                                                                     \
+       typeof(pcp) __ret;                                              \
+       __ret = raw_cpu_read(pcp);                                      \
+       raw_cpu_write(pcp, nval);                                       \
+       __ret;                                                          \
+})
 
-#endif /* SMP */
+#define raw_cpu_generic_cmpxchg(pcp, oval, nval)                       \
+({                                                                     \
+       typeof(pcp) __ret;                                              \
+       __ret = raw_cpu_read(pcp);                                      \
+       if (__ret == (oval))                                            \
+               raw_cpu_write(pcp, nval);                               \
+       __ret;                                                          \
+})
 
-#ifndef PER_CPU_BASE_SECTION
-#ifdef CONFIG_SMP
-#define PER_CPU_BASE_SECTION ".data..percpu"
-#else
-#define PER_CPU_BASE_SECTION ".data"
+#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+({                                                                     \
+       int __ret = 0;                                                  \
+       if (raw_cpu_read(pcp1) == (oval1) &&                            \
+                        raw_cpu_read(pcp2)  == (oval2)) {              \
+               raw_cpu_write(pcp1, nval1);                             \
+               raw_cpu_write(pcp2, nval2);                             \
+               __ret = 1;                                              \
+       }                                                               \
+       (__ret);                                                        \
+})
+
+#define this_cpu_generic_read(pcp)                                     \
+({                                                                     \
+       typeof(pcp) __ret;                                              \
+       preempt_disable();                                              \
+       __ret = *this_cpu_ptr(&(pcp));                                  \
+       preempt_enable();                                               \
+       __ret;                                                          \
+})
+
+#define this_cpu_generic_to_op(pcp, val, op)                           \
+do {                                                                   \
+       unsigned long __flags;                                          \
+       raw_local_irq_save(__flags);                                    \
+       *raw_cpu_ptr(&(pcp)) op val;                                    \
+       raw_local_irq_restore(__flags);                                 \
+} while (0)
+
+#define this_cpu_generic_add_return(pcp, val)                          \
+({                                                                     \
+       typeof(pcp) __ret;                                              \
+       unsigned long __flags;                                          \
+       raw_local_irq_save(__flags);                                    \
+       raw_cpu_add(pcp, val);                                          \
+       __ret = raw_cpu_read(pcp);                                      \
+       raw_local_irq_restore(__flags);                                 \
+       __ret;                                                          \
+})
+
+#define this_cpu_generic_xchg(pcp, nval)                               \
+({                                                                     \
+       typeof(pcp) __ret;                                              \
+       unsigned long __flags;                                          \
+       raw_local_irq_save(__flags);                                    \
+       __ret = raw_cpu_read(pcp);                                      \
+       raw_cpu_write(pcp, nval);                                       \
+       raw_local_irq_restore(__flags);                                 \
+       __ret;                                                          \
+})
+
+#define this_cpu_generic_cmpxchg(pcp, oval, nval)                      \
+({                                                                     \
+       typeof(pcp) __ret;                                              \
+       unsigned long __flags;                                          \
+       raw_local_irq_save(__flags);                                    \
+       __ret = raw_cpu_read(pcp);                                      \
+       if (__ret == (oval))                                            \
+               raw_cpu_write(pcp, nval);                               \
+       raw_local_irq_restore(__flags);                                 \
+       __ret;                                                          \
+})
+
+#define this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)        \
+({                                                                     \
+       int __ret;                                                      \
+       unsigned long __flags;                                          \
+       raw_local_irq_save(__flags);                                    \
+       __ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2,              \
+                       oval1, oval2, nval1, nval2);                    \
+       raw_local_irq_restore(__flags);                                 \
+       __ret;                                                          \
+})
+
+#ifndef raw_cpu_read_1
+#define raw_cpu_read_1(pcp)            (*raw_cpu_ptr(&(pcp)))
 #endif
+#ifndef raw_cpu_read_2
+#define raw_cpu_read_2(pcp)            (*raw_cpu_ptr(&(pcp)))
+#endif
+#ifndef raw_cpu_read_4
+#define raw_cpu_read_4(pcp)            (*raw_cpu_ptr(&(pcp)))
+#endif
+#ifndef raw_cpu_read_8
+#define raw_cpu_read_8(pcp)            (*raw_cpu_ptr(&(pcp)))
 #endif
 
-#ifdef CONFIG_SMP
+#ifndef raw_cpu_write_1
+#define raw_cpu_write_1(pcp, val)      raw_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef raw_cpu_write_2
+#define raw_cpu_write_2(pcp, val)      raw_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef raw_cpu_write_4
+#define raw_cpu_write_4(pcp, val)      raw_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef raw_cpu_write_8
+#define raw_cpu_write_8(pcp, val)      raw_cpu_generic_to_op(pcp, val, =)
+#endif
 
-#ifdef MODULE
-#define PER_CPU_SHARED_ALIGNED_SECTION ""
-#define PER_CPU_ALIGNED_SECTION ""
-#else
-#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
-#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
+#ifndef raw_cpu_add_1
+#define raw_cpu_add_1(pcp, val)                raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef raw_cpu_add_2
+#define raw_cpu_add_2(pcp, val)                raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef raw_cpu_add_4
+#define raw_cpu_add_4(pcp, val)                raw_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef raw_cpu_add_8
+#define raw_cpu_add_8(pcp, val)                raw_cpu_generic_to_op(pcp, val, +=)
 #endif
-#define PER_CPU_FIRST_SECTION "..first"
 
-#else
+#ifndef raw_cpu_and_1
+#define raw_cpu_and_1(pcp, val)                raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef raw_cpu_and_2
+#define raw_cpu_and_2(pcp, val)                raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef raw_cpu_and_4
+#define raw_cpu_and_4(pcp, val)                raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef raw_cpu_and_8
+#define raw_cpu_and_8(pcp, val)                raw_cpu_generic_to_op(pcp, val, &=)
+#endif
+
+#ifndef raw_cpu_or_1
+#define raw_cpu_or_1(pcp, val)         raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef raw_cpu_or_2
+#define raw_cpu_or_2(pcp, val)         raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef raw_cpu_or_4
+#define raw_cpu_or_4(pcp, val)         raw_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef raw_cpu_or_8
+#define raw_cpu_or_8(pcp, val)         raw_cpu_generic_to_op(pcp, val, |=)
+#endif
 
-#define PER_CPU_SHARED_ALIGNED_SECTION ""
-#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
-#define PER_CPU_FIRST_SECTION ""
+#ifndef raw_cpu_add_return_1
+#define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef raw_cpu_add_return_2
+#define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef raw_cpu_add_return_4
+#define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef raw_cpu_add_return_8
+#define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val)
+#endif
 
+#ifndef raw_cpu_xchg_1
+#define raw_cpu_xchg_1(pcp, nval)      raw_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef raw_cpu_xchg_2
+#define raw_cpu_xchg_2(pcp, nval)      raw_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef raw_cpu_xchg_4
+#define raw_cpu_xchg_4(pcp, nval)      raw_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef raw_cpu_xchg_8
+#define raw_cpu_xchg_8(pcp, nval)      raw_cpu_generic_xchg(pcp, nval)
 #endif
 
-#ifndef PER_CPU_ATTRIBUTES
-#define PER_CPU_ATTRIBUTES
+#ifndef raw_cpu_cmpxchg_1
+#define raw_cpu_cmpxchg_1(pcp, oval, nval) \
+       raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef raw_cpu_cmpxchg_2
+#define raw_cpu_cmpxchg_2(pcp, oval, nval) \
+       raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef raw_cpu_cmpxchg_4
+#define raw_cpu_cmpxchg_4(pcp, oval, nval) \
+       raw_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef raw_cpu_cmpxchg_8
+#define raw_cpu_cmpxchg_8(pcp, oval, nval) \
+       raw_cpu_generic_cmpxchg(pcp, oval, nval)
 #endif
 
-#ifndef PER_CPU_DEF_ATTRIBUTES
-#define PER_CPU_DEF_ATTRIBUTES
+#ifndef raw_cpu_cmpxchg_double_1
+#define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+       raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef raw_cpu_cmpxchg_double_2
+#define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+       raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef raw_cpu_cmpxchg_double_4
+#define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+       raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef raw_cpu_cmpxchg_double_8
+#define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+       raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+
+#ifndef this_cpu_read_1
+#define this_cpu_read_1(pcp)           this_cpu_generic_read(pcp)
+#endif
+#ifndef this_cpu_read_2
+#define this_cpu_read_2(pcp)           this_cpu_generic_read(pcp)
+#endif
+#ifndef this_cpu_read_4
+#define this_cpu_read_4(pcp)           this_cpu_generic_read(pcp)
+#endif
+#ifndef this_cpu_read_8
+#define this_cpu_read_8(pcp)           this_cpu_generic_read(pcp)
 #endif
 
-/* Keep until we have removed all uses of __this_cpu_ptr */
-#define __this_cpu_ptr raw_cpu_ptr
+#ifndef this_cpu_write_1
+#define this_cpu_write_1(pcp, val)     this_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef this_cpu_write_2
+#define this_cpu_write_2(pcp, val)     this_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef this_cpu_write_4
+#define this_cpu_write_4(pcp, val)     this_cpu_generic_to_op(pcp, val, =)
+#endif
+#ifndef this_cpu_write_8
+#define this_cpu_write_8(pcp, val)     this_cpu_generic_to_op(pcp, val, =)
+#endif
+
+#ifndef this_cpu_add_1
+#define this_cpu_add_1(pcp, val)       this_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef this_cpu_add_2
+#define this_cpu_add_2(pcp, val)       this_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef this_cpu_add_4
+#define this_cpu_add_4(pcp, val)       this_cpu_generic_to_op(pcp, val, +=)
+#endif
+#ifndef this_cpu_add_8
+#define this_cpu_add_8(pcp, val)       this_cpu_generic_to_op(pcp, val, +=)
+#endif
+
+#ifndef this_cpu_and_1
+#define this_cpu_and_1(pcp, val)       this_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef this_cpu_and_2
+#define this_cpu_and_2(pcp, val)       this_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef this_cpu_and_4
+#define this_cpu_and_4(pcp, val)       this_cpu_generic_to_op(pcp, val, &=)
+#endif
+#ifndef this_cpu_and_8
+#define this_cpu_and_8(pcp, val)       this_cpu_generic_to_op(pcp, val, &=)
+#endif
+
+#ifndef this_cpu_or_1
+#define this_cpu_or_1(pcp, val)                this_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef this_cpu_or_2
+#define this_cpu_or_2(pcp, val)                this_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef this_cpu_or_4
+#define this_cpu_or_4(pcp, val)                this_cpu_generic_to_op(pcp, val, |=)
+#endif
+#ifndef this_cpu_or_8
+#define this_cpu_or_8(pcp, val)                this_cpu_generic_to_op(pcp, val, |=)
+#endif
+
+#ifndef this_cpu_add_return_1
+#define this_cpu_add_return_1(pcp, val)        this_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef this_cpu_add_return_2
+#define this_cpu_add_return_2(pcp, val)        this_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef this_cpu_add_return_4
+#define this_cpu_add_return_4(pcp, val)        this_cpu_generic_add_return(pcp, val)
+#endif
+#ifndef this_cpu_add_return_8
+#define this_cpu_add_return_8(pcp, val)        this_cpu_generic_add_return(pcp, val)
+#endif
+
+#ifndef this_cpu_xchg_1
+#define this_cpu_xchg_1(pcp, nval)     this_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef this_cpu_xchg_2
+#define this_cpu_xchg_2(pcp, nval)     this_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef this_cpu_xchg_4
+#define this_cpu_xchg_4(pcp, nval)     this_cpu_generic_xchg(pcp, nval)
+#endif
+#ifndef this_cpu_xchg_8
+#define this_cpu_xchg_8(pcp, nval)     this_cpu_generic_xchg(pcp, nval)
+#endif
+
+#ifndef this_cpu_cmpxchg_1
+#define this_cpu_cmpxchg_1(pcp, oval, nval) \
+       this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef this_cpu_cmpxchg_2
+#define this_cpu_cmpxchg_2(pcp, oval, nval) \
+       this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef this_cpu_cmpxchg_4
+#define this_cpu_cmpxchg_4(pcp, oval, nval) \
+       this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+#ifndef this_cpu_cmpxchg_8
+#define this_cpu_cmpxchg_8(pcp, oval, nval) \
+       this_cpu_generic_cmpxchg(pcp, oval, nval)
+#endif
+
+#ifndef this_cpu_cmpxchg_double_1
+#define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+       this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef this_cpu_cmpxchg_double_2
+#define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+       this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef this_cpu_cmpxchg_double_4
+#define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+       this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
+#ifndef this_cpu_cmpxchg_double_8
+#define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+       this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
+#endif
 
 #endif /* _ASM_GENERIC_PERCPU_H_ */
index 471ba48..c1c0b0c 100644 (file)
        . = ALIGN(PAGE_SIZE);                                           \
        *(.data..percpu..page_aligned)                                  \
        . = ALIGN(cacheline);                                           \
-       *(.data..percpu..readmostly)                                    \
+       *(.data..percpu..read_mostly)                                   \
        . = ALIGN(cacheline);                                           \
        *(.data..percpu)                                                \
        *(.data..percpu..shared_aligned)                                \
index 0edf949..94b19be 100644 (file)
@@ -75,9 +75,9 @@ static inline void aead_givcrypt_free(struct aead_givcrypt_request *req)
 
 static inline void aead_givcrypt_set_callback(
        struct aead_givcrypt_request *req, u32 flags,
-       crypto_completion_t complete, void *data)
+       crypto_completion_t compl, void *data)
 {
-       aead_request_set_callback(&req->areq, flags, complete, data);
+       aead_request_set_callback(&req->areq, flags, compl, data);
 }
 
 static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req,
index 016c2f1..623a59c 100644 (file)
@@ -410,4 +410,10 @@ static inline int crypto_memneq(const void *a, const void *b, size_t size)
        return __crypto_memneq(a, b, size) != 0UL ? 1 : 0;
 }
 
+static inline void crypto_yield(u32 flags)
+{
+       if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
+               cond_resched();
+}
+
 #endif /* _CRYPTO_ALGAPI_H */
index 2971c63..fc6274c 100644 (file)
@@ -16,4 +16,7 @@
 
 extern unsigned long des_ekey(u32 *pe, const u8 *k);
 
+extern int __des3_ede_setkey(u32 *expkey, u32 *flags, const u8 *key,
+                            unsigned int keylen);
+
 #endif /* __CRYPTO_DES_H */
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
new file mode 100644 (file)
index 0000000..831d786
--- /dev/null
@@ -0,0 +1,290 @@
+/*
+ * DRBG based on NIST SP800-90A
+ *
+ * Copyright Stephan Mueller <smueller@chronox.de>, 2014
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, and the entire permission notice in its entirety,
+ *    including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ *    products derived from this software without specific prior
+ *    written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions.  (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+#ifndef _DRBG_H
+#define _DRBG_H
+
+
+#include <linux/random.h>
+#include <linux/scatterlist.h>
+#include <crypto/hash.h>
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/slab.h>
+#include <crypto/internal/rng.h>
+#include <crypto/rng.h>
+#include <linux/fips.h>
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+/*
+ * Concatenation Helper and string operation helper
+ *
+ * SP800-90A requires the concatenation of different data. To avoid copying
+ * buffers around or allocate additional memory, the following data structure
+ * is used to point to the original memory with its size. In addition, it
+ * is used to build a linked list. The linked list defines the concatenation
+ * of individual buffers. The order of memory block referenced in that
+ * linked list determines the order of concatenation.
+ */
+struct drbg_string {
+       const unsigned char *buf;
+       size_t len;
+       struct list_head list;
+};
+
+static inline void drbg_string_fill(struct drbg_string *string,
+                                   const unsigned char *buf, size_t len)
+{
+       string->buf = buf;
+       string->len = len;
+       INIT_LIST_HEAD(&string->list);
+}
+
+struct drbg_state;
+typedef uint32_t drbg_flag_t;
+
+struct drbg_core {
+       drbg_flag_t flags;      /* flags for the cipher */
+       __u8 statelen;          /* maximum state length */
+       /*
+        * maximum length of personalization string or additional input
+        * string -- exponent for base 2
+        */
+       __u8 max_addtllen;
+       /* maximum bits per RNG request -- exponent for base 2*/
+       __u8 max_bits;
+       /* maximum number of requests -- exponent for base 2 */
+       __u8 max_req;
+       __u8 blocklen_bytes;    /* block size of output in bytes */
+       char cra_name[CRYPTO_MAX_ALG_NAME]; /* mapping to kernel crypto API */
+        /* kernel crypto API backend cipher name */
+       char backend_cra_name[CRYPTO_MAX_ALG_NAME];
+};
+
+struct drbg_state_ops {
+       int (*update)(struct drbg_state *drbg, struct list_head *seed,
+                     int reseed);
+       int (*generate)(struct drbg_state *drbg,
+                       unsigned char *buf, unsigned int buflen,
+                       struct list_head *addtl);
+       int (*crypto_init)(struct drbg_state *drbg);
+       int (*crypto_fini)(struct drbg_state *drbg);
+
+};
+
+struct drbg_test_data {
+       struct drbg_string *testentropy; /* TEST PARAMETER: test entropy */
+};
+
+struct drbg_state {
+       spinlock_t drbg_lock;   /* lock around DRBG */
+       unsigned char *V;       /* internal state 10.1.1.1 1a) */
+       /* hash: static value 10.1.1.1 1b) hmac / ctr: key */
+       unsigned char *C;
+       /* Number of RNG requests since last reseed -- 10.1.1.1 1c) */
+       size_t reseed_ctr;
+        /* some memory the DRBG can use for its operation */
+       unsigned char *scratchpad;
+       void *priv_data;        /* Cipher handle */
+       bool seeded;            /* DRBG fully seeded? */
+       bool pr;                /* Prediction resistance enabled? */
+#ifdef CONFIG_CRYPTO_FIPS
+       bool fips_primed;       /* Continuous test primed? */
+       unsigned char *prev;    /* FIPS 140-2 continuous test value */
+#endif
+       const struct drbg_state_ops *d_ops;
+       const struct drbg_core *core;
+       struct drbg_test_data *test_data;
+};
+
+static inline __u8 drbg_statelen(struct drbg_state *drbg)
+{
+       if (drbg && drbg->core)
+               return drbg->core->statelen;
+       return 0;
+}
+
+static inline __u8 drbg_blocklen(struct drbg_state *drbg)
+{
+       if (drbg && drbg->core)
+               return drbg->core->blocklen_bytes;
+       return 0;
+}
+
+static inline __u8 drbg_keylen(struct drbg_state *drbg)
+{
+       if (drbg && drbg->core)
+               return (drbg->core->statelen - drbg->core->blocklen_bytes);
+       return 0;
+}
+
+static inline size_t drbg_max_request_bytes(struct drbg_state *drbg)
+{
+       /* max_bits is in bits, but buflen is in bytes */
+       return (1 << (drbg->core->max_bits - 3));
+}
+
+static inline size_t drbg_max_addtl(struct drbg_state *drbg)
+{
+       return (1UL<<(drbg->core->max_addtllen));
+}
+
+static inline size_t drbg_max_requests(struct drbg_state *drbg)
+{
+       return (1UL<<(drbg->core->max_req));
+}
+
+/*
+ * kernel crypto API input data structure for DRBG generate in case dlen
+ * is set to 0
+ */
+struct drbg_gen {
+       unsigned char *outbuf;  /* output buffer for random numbers */
+       unsigned int outlen;    /* size of output buffer */
+       struct drbg_string *addtl;      /* additional information string */
+       struct drbg_test_data *test_data;       /* test data */
+};
+
+/*
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_get_bytes() to allow the caller to provide additional data.
+ *
+ * @drng DRBG handle -- see crypto_rng_get_bytes
+ * @outbuf output buffer -- see crypto_rng_get_bytes
+ * @outlen length of output buffer -- see crypto_rng_get_bytes
+ * @addtl_input additional information string input buffer
+ * @addtllen length of additional information string buffer
+ *
+ * return
+ *     see crypto_rng_get_bytes
+ */
+static inline int crypto_drbg_get_bytes_addtl(struct crypto_rng *drng,
+                       unsigned char *outbuf, unsigned int outlen,
+                       struct drbg_string *addtl)
+{
+       int ret;
+       struct drbg_gen genbuf;
+       genbuf.outbuf = outbuf;
+       genbuf.outlen = outlen;
+       genbuf.addtl = addtl;
+       genbuf.test_data = NULL;
+       ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0);
+       return ret;
+}
+
+/*
+ * TEST code
+ *
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_get_bytes() to allow the caller to provide additional data and
+ * allow furnishing of test_data
+ *
+ * @drng DRBG handle -- see crypto_rng_get_bytes
+ * @outbuf output buffer -- see crypto_rng_get_bytes
+ * @outlen length of output buffer -- see crypto_rng_get_bytes
+ * @addtl_input additional information string input buffer
+ * @addtllen length of additional information string buffer
+ * @test_data filled test data
+ *
+ * return
+ *     see crypto_rng_get_bytes
+ */
+static inline int crypto_drbg_get_bytes_addtl_test(struct crypto_rng *drng,
+                       unsigned char *outbuf, unsigned int outlen,
+                       struct drbg_string *addtl,
+                       struct drbg_test_data *test_data)
+{
+       int ret;
+       struct drbg_gen genbuf;
+       genbuf.outbuf = outbuf;
+       genbuf.outlen = outlen;
+       genbuf.addtl = addtl;
+       genbuf.test_data = test_data;
+       ret = crypto_rng_get_bytes(drng, (u8 *)&genbuf, 0);
+       return ret;
+}
+
+/*
+ * TEST code
+ *
+ * This is a wrapper to the kernel crypto API function of
+ * crypto_rng_reset() to allow the caller to provide test_data
+ *
+ * @drng DRBG handle -- see crypto_rng_reset
+ * @pers personalization string input buffer
+ * @perslen length of additional information string buffer
+ * @test_data filled test data
+ *
+ * return
+ *     see crypto_rng_reset
+ */
+static inline int crypto_drbg_reset_test(struct crypto_rng *drng,
+                                        struct drbg_string *pers,
+                                        struct drbg_test_data *test_data)
+{
+       int ret;
+       struct drbg_gen genbuf;
+       genbuf.outbuf = NULL;
+       genbuf.outlen = 0;
+       genbuf.addtl = pers;
+       genbuf.test_data = test_data;
+       ret = crypto_rng_reset(drng, (u8 *)&genbuf, 0);
+       return ret;
+}
+
+/* DRBG type flags */
+#define DRBG_CTR       ((drbg_flag_t)1<<0)
+#define DRBG_HMAC      ((drbg_flag_t)1<<1)
+#define DRBG_HASH      ((drbg_flag_t)1<<2)
+#define DRBG_TYPE_MASK (DRBG_CTR | DRBG_HMAC | DRBG_HASH)
+/* DRBG strength flags */
+#define DRBG_STRENGTH128       ((drbg_flag_t)1<<3)
+#define DRBG_STRENGTH192       ((drbg_flag_t)1<<4)
+#define DRBG_STRENGTH256       ((drbg_flag_t)1<<5)
+#define DRBG_STRENGTH_MASK     (DRBG_STRENGTH128 | DRBG_STRENGTH192 | \
+                                DRBG_STRENGTH256)
+
+enum drbg_prefixes {
+       DRBG_PREFIX0 = 0x00,
+       DRBG_PREFIX1,
+       DRBG_PREFIX2,
+       DRBG_PREFIX3
+};
+
+#endif /* _DRBG_H */
index 26cb1eb..a391955 100644 (file)
@@ -238,10 +238,10 @@ static inline struct ahash_request *ahash_request_cast(
 
 static inline void ahash_request_set_callback(struct ahash_request *req,
                                              u32 flags,
-                                             crypto_completion_t complete,
+                                             crypto_completion_t compl,
                                              void *data)
 {
-       req->base.complete = complete;
+       req->base.complete = compl;
        req->base.data = data;
        req->base.flags = flags;
 }
index 06e8b32..b3a46c5 100644 (file)
@@ -81,8 +81,7 @@ static inline int skcipher_enqueue_givcrypt(
 static inline struct skcipher_givcrypt_request *skcipher_dequeue_givcrypt(
        struct crypto_queue *queue)
 {
-       return __crypto_dequeue_request(
-               queue, offsetof(struct skcipher_givcrypt_request, creq.base));
+       return skcipher_givcrypt_cast(crypto_dequeue_request(queue));
 }
 
 static inline void *skcipher_givcrypt_reqctx(
index 6a626a5..7ef512f 100644 (file)
 #include <linux/scatterlist.h>
 #include <linux/sched.h>
 
-static inline void crypto_yield(u32 flags)
-{
-       if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
-               cond_resched();
-}
-
 static inline void scatterwalk_sg_chain(struct scatterlist *sg1, int num,
                                        struct scatterlist *sg2)
 {
index 25fd612..07d245f 100644 (file)
@@ -86,9 +86,9 @@ static inline void skcipher_givcrypt_free(struct skcipher_givcrypt_request *req)
 
 static inline void skcipher_givcrypt_set_callback(
        struct skcipher_givcrypt_request *req, u32 flags,
-       crypto_completion_t complete, void *data)
+       crypto_completion_t compl, void *data)
 {
-       ablkcipher_request_set_callback(&req->creq, flags, complete, data);
+       ablkcipher_request_set_callback(&req->creq, flags, compl, data);
 }
 
 static inline void skcipher_givcrypt_set_crypt(
index 0572035..a70d456 100644 (file)
 #define INTEL_BDW_GT3D_IDS(info) \
        _INTEL_BDW_D_IDS(3, info)
 
+#define INTEL_BDW_RSVDM_IDS(info) \
+       _INTEL_BDW_M_IDS(4, info)
+
+#define INTEL_BDW_RSVDD_IDS(info) \
+       _INTEL_BDW_D_IDS(4, info)
+
 #define INTEL_BDW_M_IDS(info) \
        INTEL_BDW_GT12M_IDS(info), \
-       INTEL_BDW_GT3M_IDS(info)
+       INTEL_BDW_GT3M_IDS(info), \
+       INTEL_BDW_RSVDM_IDS(info)
 
 #define INTEL_BDW_D_IDS(info) \
        INTEL_BDW_GT12D_IDS(info), \
-       INTEL_BDW_GT3D_IDS(info)
+       INTEL_BDW_GT3D_IDS(info), \
+       INTEL_BDW_RSVDD_IDS(info)
 
 #define INTEL_CHV_IDS(info) \
        INTEL_VGA_DEVICE(0x22b0, info), \
index cfdc884..baa6f11 100644 (file)
@@ -30,7 +30,8 @@
 #define _I915_POWERWELL_H_
 
 /* For use by hda_i915 driver */
-extern void i915_request_power_well(void);
-extern void i915_release_power_well(void);
+extern int i915_request_power_well(void);
+extern int i915_release_power_well(void);
+extern int i915_get_cdclk_freq(void);
 
 #endif                         /* _I915_POWERWELL_H_ */
diff --git a/include/dt-bindings/clock/clps711x-clock.h b/include/dt-bindings/clock/clps711x-clock.h
new file mode 100644 (file)
index 0000000..0c4c80b
--- /dev/null
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_CLPS711X_H
+#define __DT_BINDINGS_CLOCK_CLPS711X_H
+
+#define CLPS711X_CLK_DUMMY     0
+#define CLPS711X_CLK_CPU       1
+#define CLPS711X_CLK_BUS       2
+#define CLPS711X_CLK_PLL       3
+#define CLPS711X_CLK_TIMERREF  4
+#define CLPS711X_CLK_TIMER1    5
+#define CLPS711X_CLK_TIMER2    6
+#define CLPS711X_CLK_PWM       7
+#define CLPS711X_CLK_SPIREF    8
+#define CLPS711X_CLK_SPI       9
+#define CLPS711X_CLK_UART      10
+#define CLPS711X_CLK_TICK      11
+#define CLPS711X_CLK_MAX       12
+
+#endif
index 1106ca5..459bd2b 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- * Author: Andrzej Haja <a.hajda@samsung.com>
+ * Author: Andrzej Hajda <a.hajda@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #define CLK_MOUT_CORE          19
 #define CLK_MOUT_APLL          20
 #define CLK_SCLK_HDMIPHY       22
+#define CLK_OUT_DMC            23
+#define CLK_OUT_TOP            24
+#define CLK_OUT_LEFTBUS                25
+#define CLK_OUT_RIGHTBUS       26
+#define CLK_OUT_CPU            27
 
 /* gate for special clocks (sclk) */
 #define CLK_SCLK_FIMC0         128
 #define CLK_MOUT_G3D           394
 #define CLK_ACLK400_MCUISP     395 /* Exynos4x12 only */
 
+/* gate clocks - ppmu */
+#define CLK_PPMULEFT           400
+#define CLK_PPMURIGHT          401
+#define CLK_PPMUCAMIF          402
+#define CLK_PPMUTV             403
+#define CLK_PPMUMFC_L          404
+#define CLK_PPMUMFC_R          405
+#define CLK_PPMUG3D            406
+#define CLK_PPMUIMAGE          407
+#define CLK_PPMULCD0           408
+#define CLK_PPMULCD1           409 /* Exynos4210 only */
+#define CLK_PPMUFILE           410
+#define CLK_PPMUGPS            411
+#define CLK_PPMUDMC0           412
+#define CLK_PPMUDMC1           413
+#define CLK_PPMUCPU            414
+#define CLK_PPMUACP            415
+
 /* div clocks */
 #define CLK_DIV_ISP0           450 /* Exynos4x12 only */
 #define CLK_DIV_ISP1           451 /* Exynos4x12 only */
index be6e97c..4273891 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- * Author: Andrzej Haja <a.hajda@samsung.com>
+ * Author: Andrzej Hajda <a.hajda@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
index 97dcb89..8dc0913 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- * Author: Andrzej Haja <a.hajda@samsung.com>
+ * Author: Andrzej Hajda <a.hajda@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -63,7 +63,6 @@
 #define CLK_SCLK_MPHY_IXTAL24  161
 
 /* gate clocks */
-#define CLK_ACLK66_PERIC       256
 #define CLK_UART0              257
 #define CLK_UART1              258
 #define CLK_UART2              259
 #define CLK_MOUT_G3D           641
 #define CLK_MOUT_VPLL          642
 #define CLK_MOUT_MAUDIO0       643
+#define CLK_MOUT_USER_ACLK333  644
+#define CLK_MOUT_SW_ACLK333    645
 
 /* divider clocks */
 #define CLK_DOUT_PIXEL         768
index 70cd850..c66fc40 100644 (file)
@@ -1,6 +1,6 @@
 /*
  * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- * Author: Andrzej Haja <a.hajda-Sze3O3UU22JBDgjK7y7TUQ@public.gmane.org>
+ * Author: Andrzej Hajda <a.hajda@samsung.com>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
index 7cf5c99..b91dd46 100644 (file)
 #define IMX6SL_CLK_USDHC4              132
 #define IMX6SL_CLK_PLL4_AUDIO_DIV      133
 #define IMX6SL_CLK_SPBA                        134
-#define IMX6SL_CLK_END                 135
+#define IMX6SL_CLK_ENET                        135
+#define IMX6SL_CLK_END                 136
 
 #endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-apq8084.h b/include/dt-bindings/clock/qcom,gcc-apq8084.h
new file mode 100644 (file)
index 0000000..2c0da56
--- /dev/null
@@ -0,0 +1,351 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_APQ_GCC_8084_H
+#define _DT_BINDINGS_CLK_APQ_GCC_8084_H
+
+#define GPLL0                                          0
+#define GPLL0_VOTE                                     1
+#define GPLL1                                          2
+#define GPLL1_VOTE                                     3
+#define GPLL2                                          4
+#define GPLL2_VOTE                                     5
+#define GPLL3                                          6
+#define GPLL3_VOTE                                     7
+#define GPLL4                                          8
+#define GPLL4_VOTE                                     9
+#define CONFIG_NOC_CLK_SRC                             10
+#define PERIPH_NOC_CLK_SRC                             11
+#define SYSTEM_NOC_CLK_SRC                             12
+#define BLSP_UART_SIM_CLK_SRC                          13
+#define QDSS_TSCTR_CLK_SRC                             14
+#define UFS_AXI_CLK_SRC                                        15
+#define RPM_CLK_SRC                                    16
+#define KPSS_AHB_CLK_SRC                               17
+#define QDSS_AT_CLK_SRC                                        18
+#define BIMC_DDR_CLK_SRC                               19
+#define USB30_MASTER_CLK_SRC                           20
+#define USB30_SEC_MASTER_CLK_SRC                       21
+#define USB_HSIC_AHB_CLK_SRC                           22
+#define MMSS_BIMC_GFX_CLK_SRC                          23
+#define QDSS_STM_CLK_SRC                               24
+#define ACC_CLK_SRC                                    25
+#define SEC_CTRL_CLK_SRC                               26
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC                    27
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC                    28
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC                    29
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC                    30
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC                    31
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC                    32
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC                    33
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC                    34
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC                    35
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC                    36
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC                    37
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC                    38
+#define BLSP1_UART1_APPS_CLK_SRC                       39
+#define BLSP1_UART2_APPS_CLK_SRC                       40
+#define BLSP1_UART3_APPS_CLK_SRC                       41
+#define BLSP1_UART4_APPS_CLK_SRC                       42
+#define BLSP1_UART5_APPS_CLK_SRC                       43
+#define BLSP1_UART6_APPS_CLK_SRC                       44
+#define BLSP2_QUP1_I2C_APPS_CLK_SRC                    45
+#define BLSP2_QUP1_SPI_APPS_CLK_SRC                    46
+#define BLSP2_QUP2_I2C_APPS_CLK_SRC                    47
+#define BLSP2_QUP2_SPI_APPS_CLK_SRC                    48
+#define BLSP2_QUP3_I2C_APPS_CLK_SRC                    49
+#define BLSP2_QUP3_SPI_APPS_CLK_SRC                    50
+#define BLSP2_QUP4_I2C_APPS_CLK_SRC                    51
+#define BLSP2_QUP4_SPI_APPS_CLK_SRC                    52
+#define BLSP2_QUP5_I2C_APPS_CLK_SRC                    53
+#define BLSP2_QUP5_SPI_APPS_CLK_SRC                    54
+#define BLSP2_QUP6_I2C_APPS_CLK_SRC                    55
+#define BLSP2_QUP6_SPI_APPS_CLK_SRC                    56
+#define BLSP2_UART1_APPS_CLK_SRC                       57
+#define BLSP2_UART2_APPS_CLK_SRC                       58
+#define BLSP2_UART3_APPS_CLK_SRC                       59
+#define BLSP2_UART4_APPS_CLK_SRC                       60
+#define BLSP2_UART5_APPS_CLK_SRC                       61
+#define BLSP2_UART6_APPS_CLK_SRC                       62
+#define CE1_CLK_SRC                                    63
+#define CE2_CLK_SRC                                    64
+#define CE3_CLK_SRC                                    65
+#define GP1_CLK_SRC                                    66
+#define GP2_CLK_SRC                                    67
+#define GP3_CLK_SRC                                    68
+#define PDM2_CLK_SRC                                   69
+#define QDSS_TRACECLKIN_CLK_SRC                                70
+#define RBCPR_CLK_SRC                                  71
+#define SATA_ASIC0_CLK_SRC                             72
+#define SATA_PMALIVE_CLK_SRC                           73
+#define SATA_RX_CLK_SRC                                        74
+#define SATA_RX_OOB_CLK_SRC                            75
+#define SDCC1_APPS_CLK_SRC                             76
+#define SDCC2_APPS_CLK_SRC                             77
+#define SDCC3_APPS_CLK_SRC                             78
+#define SDCC4_APPS_CLK_SRC                             79
+#define GCC_SNOC_BUS_TIMEOUT0_AHB_CLK                  80
+#define SPMI_AHB_CLK_SRC                               81
+#define SPMI_SER_CLK_SRC                               82
+#define TSIF_REF_CLK_SRC                               83
+#define USB30_MOCK_UTMI_CLK_SRC                                84
+#define USB30_SEC_MOCK_UTMI_CLK_SRC                    85
+#define USB_HS_SYSTEM_CLK_SRC                          86
+#define USB_HSIC_CLK_SRC                               87
+#define USB_HSIC_IO_CAL_CLK_SRC                                88
+#define USB_HSIC_MOCK_UTMI_CLK_SRC                     89
+#define USB_HSIC_SYSTEM_CLK_SRC                                90
+#define GCC_BAM_DMA_AHB_CLK                            91
+#define GCC_BAM_DMA_INACTIVITY_TIMERS_CLK              92
+#define DDR_CLK_SRC                                    93
+#define GCC_BIMC_CFG_AHB_CLK                           94
+#define GCC_BIMC_CLK                                   95
+#define GCC_BIMC_KPSS_AXI_CLK                          96
+#define GCC_BIMC_SLEEP_CLK                             97
+#define GCC_BIMC_SYSNOC_AXI_CLK                                98
+#define GCC_BIMC_XO_CLK                                        99
+#define GCC_BLSP1_AHB_CLK                              100
+#define GCC_BLSP1_SLEEP_CLK                            101
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK                    102
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK                    103
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK                    104
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK                    105
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK                    106
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK                    107
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK                    108
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK                    109
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK                    110
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK                    111
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK                    112
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK                    113
+#define GCC_BLSP1_UART1_APPS_CLK                       114
+#define GCC_BLSP1_UART1_SIM_CLK                                115
+#define GCC_BLSP1_UART2_APPS_CLK                       116
+#define GCC_BLSP1_UART2_SIM_CLK                                117
+#define GCC_BLSP1_UART3_APPS_CLK                       118
+#define GCC_BLSP1_UART3_SIM_CLK                                119
+#define GCC_BLSP1_UART4_APPS_CLK                       120
+#define GCC_BLSP1_UART4_SIM_CLK                                121
+#define GCC_BLSP1_UART5_APPS_CLK                       122
+#define GCC_BLSP1_UART5_SIM_CLK                                123
+#define GCC_BLSP1_UART6_APPS_CLK                       124
+#define GCC_BLSP1_UART6_SIM_CLK                                125
+#define GCC_BLSP2_AHB_CLK                              126
+#define GCC_BLSP2_SLEEP_CLK                            127
+#define GCC_BLSP2_QUP1_I2C_APPS_CLK                    128
+#define GCC_BLSP2_QUP1_SPI_APPS_CLK                    129
+#define GCC_BLSP2_QUP2_I2C_APPS_CLK                    130
+#define GCC_BLSP2_QUP2_SPI_APPS_CLK                    131
+#define GCC_BLSP2_QUP3_I2C_APPS_CLK                    132
+#define GCC_BLSP2_QUP3_SPI_APPS_CLK                    133
+#define GCC_BLSP2_QUP4_I2C_APPS_CLK                    134
+#define GCC_BLSP2_QUP4_SPI_APPS_CLK                    135
+#define GCC_BLSP2_QUP5_I2C_APPS_CLK                    136
+#define GCC_BLSP2_QUP5_SPI_APPS_CLK                    137
+#define GCC_BLSP2_QUP6_I2C_APPS_CLK                    138
+#define GCC_BLSP2_QUP6_SPI_APPS_CLK                    139
+#define GCC_BLSP2_UART1_APPS_CLK                       140
+#define GCC_BLSP2_UART1_SIM_CLK                                141
+#define GCC_BLSP2_UART2_APPS_CLK                       142
+#define GCC_BLSP2_UART2_SIM_CLK                                143
+#define GCC_BLSP2_UART3_APPS_CLK                       144
+#define GCC_BLSP2_UART3_SIM_CLK                                145
+#define GCC_BLSP2_UART4_APPS_CLK                       146
+#define GCC_BLSP2_UART4_SIM_CLK                                147
+#define GCC_BLSP2_UART5_APPS_CLK                       148
+#define GCC_BLSP2_UART5_SIM_CLK                                149
+#define GCC_BLSP2_UART6_APPS_CLK                       150
+#define GCC_BLSP2_UART6_SIM_CLK                                151
+#define GCC_BOOT_ROM_AHB_CLK                           152
+#define GCC_CE1_AHB_CLK                                        153
+#define GCC_CE1_AXI_CLK                                        154
+#define GCC_CE1_CLK                                    155
+#define GCC_CE2_AHB_CLK                                        156
+#define GCC_CE2_AXI_CLK                                        157
+#define GCC_CE2_CLK                                    158
+#define GCC_CE3_AHB_CLK                                        159
+#define GCC_CE3_AXI_CLK                                        160
+#define GCC_CE3_CLK                                    161
+#define GCC_CNOC_BUS_TIMEOUT0_AHB_CLK                  162
+#define GCC_CNOC_BUS_TIMEOUT1_AHB_CLK                  163
+#define GCC_CNOC_BUS_TIMEOUT2_AHB_CLK                  164
+#define GCC_CNOC_BUS_TIMEOUT3_AHB_CLK                  165
+#define GCC_CNOC_BUS_TIMEOUT4_AHB_CLK                  166
+#define GCC_CNOC_BUS_TIMEOUT5_AHB_CLK                  167
+#define GCC_CNOC_BUS_TIMEOUT6_AHB_CLK                  168
+#define GCC_CNOC_BUS_TIMEOUT7_AHB_CLK                  169
+#define GCC_CFG_NOC_AHB_CLK                            170
+#define GCC_CFG_NOC_DDR_CFG_CLK                                171
+#define GCC_CFG_NOC_RPM_AHB_CLK                                172
+#define GCC_COPSS_SMMU_AHB_CLK                         173
+#define GCC_COPSS_SMMU_AXI_CLK                         174
+#define GCC_DCD_XO_CLK                                 175
+#define GCC_BIMC_DDR_CH0_CLK                           176
+#define GCC_BIMC_DDR_CH1_CLK                           177
+#define GCC_BIMC_DDR_CPLL0_CLK                         178
+#define GCC_BIMC_DDR_CPLL1_CLK                         179
+#define GCC_BIMC_GFX_CLK                               180
+#define GCC_DDR_DIM_CFG_CLK                            181
+#define GCC_DDR_DIM_SLEEP_CLK                          182
+#define GCC_DEHR_CLK                                   183
+#define GCC_AHB_CLK                                    184
+#define GCC_IM_SLEEP_CLK                               185
+#define GCC_XO_CLK                                     186
+#define GCC_XO_DIV4_CLK                                        187
+#define GCC_GP1_CLK                                    188
+#define GCC_GP2_CLK                                    189
+#define GCC_GP3_CLK                                    190
+#define GCC_IMEM_AXI_CLK                               191
+#define GCC_IMEM_CFG_AHB_CLK                           192
+#define GCC_KPSS_AHB_CLK                               193
+#define GCC_KPSS_AXI_CLK                               194
+#define GCC_LPASS_MPORT_AXI_CLK                                195
+#define GCC_LPASS_Q6_AXI_CLK                           196
+#define GCC_LPASS_SWAY_CLK                             197
+#define GCC_MMSS_BIMC_GFX_CLK                          198
+#define GCC_MMSS_NOC_AT_CLK                            199
+#define GCC_MMSS_NOC_CFG_AHB_CLK                       200
+#define GCC_MMSS_VPU_MAPLE_SYS_NOC_AXI_CLK             201
+#define GCC_OCMEM_NOC_CFG_AHB_CLK                      202
+#define GCC_OCMEM_SYS_NOC_AXI_CLK                      203
+#define GCC_MPM_AHB_CLK                                        204
+#define GCC_MSG_RAM_AHB_CLK                            205
+#define GCC_NOC_CONF_XPU_AHB_CLK                       206
+#define GCC_PDM2_CLK                                   207
+#define GCC_PDM_AHB_CLK                                        208
+#define GCC_PDM_XO4_CLK                                        209
+#define GCC_PERIPH_NOC_AHB_CLK                         210
+#define GCC_PERIPH_NOC_AT_CLK                          211
+#define GCC_PERIPH_NOC_CFG_AHB_CLK                     212
+#define GCC_PERIPH_NOC_USB_HSIC_AHB_CLK                        213
+#define GCC_PERIPH_NOC_MPU_CFG_AHB_CLK                 214
+#define GCC_PERIPH_XPU_AHB_CLK                         215
+#define GCC_PNOC_BUS_TIMEOUT0_AHB_CLK                  216
+#define GCC_PNOC_BUS_TIMEOUT1_AHB_CLK                  217
+#define GCC_PNOC_BUS_TIMEOUT2_AHB_CLK                  218
+#define GCC_PNOC_BUS_TIMEOUT3_AHB_CLK                  219
+#define GCC_PNOC_BUS_TIMEOUT4_AHB_CLK                  220
+#define GCC_PRNG_AHB_CLK                               221
+#define GCC_QDSS_AT_CLK                                        222
+#define GCC_QDSS_CFG_AHB_CLK                           223
+#define GCC_QDSS_DAP_AHB_CLK                           224
+#define GCC_QDSS_DAP_CLK                               225
+#define GCC_QDSS_ETR_USB_CLK                           226
+#define GCC_QDSS_STM_CLK                               227
+#define GCC_QDSS_TRACECLKIN_CLK                                228
+#define GCC_QDSS_TSCTR_DIV16_CLK                       229
+#define GCC_QDSS_TSCTR_DIV2_CLK                                230
+#define GCC_QDSS_TSCTR_DIV3_CLK                                231
+#define GCC_QDSS_TSCTR_DIV4_CLK                                232
+#define GCC_QDSS_TSCTR_DIV8_CLK                                233
+#define GCC_QDSS_RBCPR_XPU_AHB_CLK                     234
+#define GCC_RBCPR_AHB_CLK                              235
+#define GCC_RBCPR_CLK                                  236
+#define GCC_RPM_BUS_AHB_CLK                            237
+#define GCC_RPM_PROC_HCLK                              238
+#define GCC_RPM_SLEEP_CLK                              239
+#define GCC_RPM_TIMER_CLK                              240
+#define GCC_SATA_ASIC0_CLK                             241
+#define GCC_SATA_AXI_CLK                               242
+#define GCC_SATA_CFG_AHB_CLK                           243
+#define GCC_SATA_PMALIVE_CLK                           244
+#define GCC_SATA_RX_CLK                                        245
+#define GCC_SATA_RX_OOB_CLK                            246
+#define GCC_SDCC1_AHB_CLK                              247
+#define GCC_SDCC1_APPS_CLK                             248
+#define GCC_SDCC1_CDCCAL_FF_CLK                                249
+#define GCC_SDCC1_CDCCAL_SLEEP_CLK                     250
+#define GCC_SDCC2_AHB_CLK                              251
+#define GCC_SDCC2_APPS_CLK                             252
+#define GCC_SDCC2_INACTIVITY_TIMERS_CLK                        253
+#define GCC_SDCC3_AHB_CLK                              254
+#define GCC_SDCC3_APPS_CLK                             255
+#define GCC_SDCC3_INACTIVITY_TIMERS_CLK                        256
+#define GCC_SDCC4_AHB_CLK                              257
+#define GCC_SDCC4_APPS_CLK                             258
+#define GCC_SDCC4_INACTIVITY_TIMERS_CLK                        259
+#define GCC_SEC_CTRL_ACC_CLK                           260
+#define GCC_SEC_CTRL_AHB_CLK                           261
+#define GCC_SEC_CTRL_BOOT_ROM_PATCH_CLK                        262
+#define GCC_SEC_CTRL_CLK                               263
+#define GCC_SEC_CTRL_SENSE_CLK                         264
+#define GCC_SNOC_BUS_TIMEOUT2_AHB_CLK                  265
+#define GCC_SNOC_BUS_TIMEOUT3_AHB_CLK                  266
+#define GCC_SPDM_BIMC_CY_CLK                           267
+#define GCC_SPDM_CFG_AHB_CLK                           268
+#define GCC_SPDM_DEBUG_CY_CLK                          269
+#define GCC_SPDM_FF_CLK                                        270
+#define GCC_SPDM_MSTR_AHB_CLK                          271
+#define GCC_SPDM_PNOC_CY_CLK                           272
+#define GCC_SPDM_RPM_CY_CLK                            273
+#define GCC_SPDM_SNOC_CY_CLK                           274
+#define GCC_SPMI_AHB_CLK                               275
+#define GCC_SPMI_CNOC_AHB_CLK                          276
+#define GCC_SPMI_SER_CLK                               277
+#define GCC_SPSS_AHB_CLK                               278
+#define GCC_SNOC_CNOC_AHB_CLK                          279
+#define GCC_SNOC_PNOC_AHB_CLK                          280
+#define GCC_SYS_NOC_AT_CLK                             281
+#define GCC_SYS_NOC_AXI_CLK                            282
+#define GCC_SYS_NOC_KPSS_AHB_CLK                       283
+#define GCC_SYS_NOC_QDSS_STM_AXI_CLK                   284
+#define GCC_SYS_NOC_UFS_AXI_CLK                                285
+#define GCC_SYS_NOC_USB3_AXI_CLK                       286
+#define GCC_SYS_NOC_USB3_SEC_AXI_CLK                   287
+#define GCC_TCSR_AHB_CLK                               288
+#define GCC_TLMM_AHB_CLK                               289
+#define GCC_TLMM_CLK                                   290
+#define GCC_TSIF_AHB_CLK                               291
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK                 292
+#define GCC_TSIF_REF_CLK                               293
+#define GCC_UFS_AHB_CLK                                        294
+#define GCC_UFS_AXI_CLK                                        295
+#define GCC_UFS_RX_CFG_CLK                             296
+#define GCC_UFS_RX_SYMBOL_0_CLK                                297
+#define GCC_UFS_RX_SYMBOL_1_CLK                                298
+#define GCC_UFS_TX_CFG_CLK                             299
+#define GCC_UFS_TX_SYMBOL_0_CLK                                300
+#define GCC_UFS_TX_SYMBOL_1_CLK                                301
+#define GCC_USB2A_PHY_SLEEP_CLK                                302
+#define GCC_USB2B_PHY_SLEEP_CLK                                303
+#define GCC_USB30_MASTER_CLK                           304
+#define GCC_USB30_MOCK_UTMI_CLK                                305
+#define GCC_USB30_SLEEP_CLK                            306
+#define GCC_USB30_SEC_MASTER_CLK                       307
+#define GCC_USB30_SEC_MOCK_UTMI_CLK                    308
+#define GCC_USB30_SEC_SLEEP_CLK                                309
+#define GCC_USB_HS_AHB_CLK                             310
+#define GCC_USB_HS_INACTIVITY_TIMERS_CLK               311
+#define GCC_USB_HS_SYSTEM_CLK                          312
+#define GCC_USB_HSIC_AHB_CLK                           313
+#define GCC_USB_HSIC_CLK                               314
+#define GCC_USB_HSIC_IO_CAL_CLK                                315
+#define GCC_USB_HSIC_IO_CAL_SLEEP_CLK                  316
+#define GCC_USB_HSIC_MOCK_UTMI_CLK                     317
+#define GCC_USB_HSIC_SYSTEM_CLK                                318
+#define PCIE_0_AUX_CLK_SRC                             319
+#define PCIE_0_PIPE_CLK_SRC                            320
+#define PCIE_1_AUX_CLK_SRC                             321
+#define PCIE_1_PIPE_CLK_SRC                            322
+#define GCC_PCIE_0_AUX_CLK                             323
+#define GCC_PCIE_0_CFG_AHB_CLK                         324
+#define GCC_PCIE_0_MSTR_AXI_CLK                                325
+#define GCC_PCIE_0_PIPE_CLK                            326
+#define GCC_PCIE_0_SLV_AXI_CLK                         327
+#define GCC_PCIE_1_AUX_CLK                             328
+#define GCC_PCIE_1_CFG_AHB_CLK                         329
+#define GCC_PCIE_1_MSTR_AXI_CLK                                330
+#define GCC_PCIE_1_PIPE_CLK                            331
+#define GCC_PCIE_1_SLV_AXI_CLK                         332
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h
new file mode 100644 (file)
index 0000000..b857cad
--- /dev/null
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_GCC_IPQ806X_H
+#define _DT_BINDINGS_CLK_GCC_IPQ806X_H
+
+#define AFAB_CLK_SRC                           0
+#define QDSS_STM_CLK                           1
+#define SCSS_A_CLK                             2
+#define SCSS_H_CLK                             3
+#define AFAB_CORE_CLK                          4
+#define SCSS_XO_SRC_CLK                                5
+#define AFAB_EBI1_CH0_A_CLK                    6
+#define AFAB_EBI1_CH1_A_CLK                    7
+#define AFAB_AXI_S0_FCLK                       8
+#define AFAB_AXI_S1_FCLK                       9
+#define AFAB_AXI_S2_FCLK                       10
+#define AFAB_AXI_S3_FCLK                       11
+#define AFAB_AXI_S4_FCLK                       12
+#define SFAB_CORE_CLK                          13
+#define SFAB_AXI_S0_FCLK                       14
+#define SFAB_AXI_S1_FCLK                       15
+#define SFAB_AXI_S2_FCLK                       16
+#define SFAB_AXI_S3_FCLK                       17
+#define SFAB_AXI_S4_FCLK                       18
+#define SFAB_AXI_S5_FCLK                       19
+#define SFAB_AHB_S0_FCLK                       20
+#define SFAB_AHB_S1_FCLK                       21
+#define SFAB_AHB_S2_FCLK                       22
+#define SFAB_AHB_S3_FCLK                       23
+#define SFAB_AHB_S4_FCLK                       24
+#define SFAB_AHB_S5_FCLK                       25
+#define SFAB_AHB_S6_FCLK                       26
+#define SFAB_AHB_S7_FCLK                       27
+#define QDSS_AT_CLK_SRC                                28
+#define QDSS_AT_CLK                            29
+#define QDSS_TRACECLKIN_CLK_SRC                        30
+#define QDSS_TRACECLKIN_CLK                    31
+#define QDSS_TSCTR_CLK_SRC                     32
+#define QDSS_TSCTR_CLK                         33
+#define SFAB_ADM0_M0_A_CLK                     34
+#define SFAB_ADM0_M1_A_CLK                     35
+#define SFAB_ADM0_M2_H_CLK                     36
+#define ADM0_CLK                               37
+#define ADM0_PBUS_CLK                          38
+#define IMEM0_A_CLK                            39
+#define QDSS_H_CLK                             40
+#define PCIE_A_CLK                             41
+#define PCIE_AUX_CLK                           42
+#define PCIE_H_CLK                             43
+#define PCIE_PHY_CLK                           44
+#define SFAB_CLK_SRC                           45
+#define SFAB_LPASS_Q6_A_CLK                    46
+#define SFAB_AFAB_M_A_CLK                      47
+#define AFAB_SFAB_M0_A_CLK                     48
+#define AFAB_SFAB_M1_A_CLK                     49
+#define SFAB_SATA_S_H_CLK                      50
+#define DFAB_CLK_SRC                           51
+#define DFAB_CLK                               52
+#define SFAB_DFAB_M_A_CLK                      53
+#define DFAB_SFAB_M_A_CLK                      54
+#define DFAB_SWAY0_H_CLK                       55
+#define DFAB_SWAY1_H_CLK                       56
+#define DFAB_ARB0_H_CLK                                57
+#define DFAB_ARB1_H_CLK                                58
+#define PPSS_H_CLK                             59
+#define PPSS_PROC_CLK                          60
+#define PPSS_TIMER0_CLK                                61
+#define PPSS_TIMER1_CLK                                62
+#define PMEM_A_CLK                             63
+#define DMA_BAM_H_CLK                          64
+#define SIC_H_CLK                              65
+#define SPS_TIC_H_CLK                          66
+#define CFPB_2X_CLK_SRC                                67
+#define CFPB_CLK                               68
+#define CFPB0_H_CLK                            69
+#define CFPB1_H_CLK                            70
+#define CFPB2_H_CLK                            71
+#define SFAB_CFPB_M_H_CLK                      72
+#define CFPB_MASTER_H_CLK                      73
+#define SFAB_CFPB_S_H_CLK                      74
+#define CFPB_SPLITTER_H_CLK                    75
+#define TSIF_H_CLK                             76
+#define TSIF_INACTIVITY_TIMERS_CLK             77
+#define TSIF_REF_SRC                           78
+#define TSIF_REF_CLK                           79
+#define CE1_H_CLK                              80
+#define CE1_CORE_CLK                           81
+#define CE1_SLEEP_CLK                          82
+#define CE2_H_CLK                              83
+#define CE2_CORE_CLK                           84
+#define SFPB_H_CLK_SRC                         85
+#define SFPB_H_CLK                             86
+#define SFAB_SFPB_M_H_CLK                      87
+#define SFAB_SFPB_S_H_CLK                      88
+#define RPM_PROC_CLK                           89
+#define RPM_BUS_H_CLK                          90
+#define RPM_SLEEP_CLK                          91
+#define RPM_TIMER_CLK                          92
+#define RPM_MSG_RAM_H_CLK                      93
+#define PMIC_ARB0_H_CLK                                94
+#define PMIC_ARB1_H_CLK                                95
+#define PMIC_SSBI2_SRC                         96
+#define PMIC_SSBI2_CLK                         97
+#define SDC1_H_CLK                             98
+#define SDC2_H_CLK                             99
+#define SDC3_H_CLK                             100
+#define SDC4_H_CLK                             101
+#define SDC1_SRC                               102
+#define SDC1_CLK                               103
+#define SDC2_SRC                               104
+#define SDC2_CLK                               105
+#define SDC3_SRC                               106
+#define SDC3_CLK                               107
+#define SDC4_SRC                               108
+#define SDC4_CLK                               109
+#define USB_HS1_H_CLK                          110
+#define USB_HS1_XCVR_SRC                       111
+#define USB_HS1_XCVR_CLK                       112
+#define USB_HSIC_H_CLK                         113
+#define USB_HSIC_XCVR_SRC                      114
+#define USB_HSIC_XCVR_CLK                      115
+#define USB_HSIC_SYSTEM_CLK_SRC                        116
+#define USB_HSIC_SYSTEM_CLK                    117
+#define CFPB0_C0_H_CLK                         118
+#define CFPB0_D0_H_CLK                         119
+#define CFPB0_C1_H_CLK                         120
+#define CFPB0_D1_H_CLK                         121
+#define USB_FS1_H_CLK                          122
+#define USB_FS1_XCVR_SRC                       123
+#define USB_FS1_XCVR_CLK                       124
+#define USB_FS1_SYSTEM_CLK                     125
+#define GSBI_COMMON_SIM_SRC                    126
+#define GSBI1_H_CLK                            127
+#define GSBI2_H_CLK                            128
+#define GSBI3_H_CLK                            129
+#define GSBI4_H_CLK                            130
+#define GSBI5_H_CLK                            131
+#define GSBI6_H_CLK                            132
+#define GSBI7_H_CLK                            133
+#define GSBI1_QUP_SRC                          134
+#define GSBI1_QUP_CLK                          135
+#define GSBI2_QUP_SRC                          136
+#define GSBI2_QUP_CLK                          137
+#define GSBI3_QUP_SRC                          138
+#define GSBI3_QUP_CLK                          139
+#define GSBI4_QUP_SRC                          140
+#define GSBI4_QUP_CLK                          141
+#define GSBI5_QUP_SRC                          142
+#define GSBI5_QUP_CLK                          143
+#define GSBI6_QUP_SRC                          144
+#define GSBI6_QUP_CLK                          145
+#define GSBI7_QUP_SRC                          146
+#define GSBI7_QUP_CLK                          147
+#define GSBI1_UART_SRC                         148
+#define GSBI1_UART_CLK                         149
+#define GSBI2_UART_SRC                         150
+#define GSBI2_UART_CLK                         151
+#define GSBI3_UART_SRC                         152
+#define GSBI3_UART_CLK                         153
+#define GSBI4_UART_SRC                         154
+#define GSBI4_UART_CLK                         155
+#define GSBI5_UART_SRC                         156
+#define GSBI5_UART_CLK                         157
+#define GSBI6_UART_SRC                         158
+#define GSBI6_UART_CLK                         159
+#define GSBI7_UART_SRC                         160
+#define GSBI7_UART_CLK                         161
+#define GSBI1_SIM_CLK                          162
+#define GSBI2_SIM_CLK                          163
+#define GSBI3_SIM_CLK                          164
+#define GSBI4_SIM_CLK                          165
+#define GSBI5_SIM_CLK                          166
+#define GSBI6_SIM_CLK                          167
+#define GSBI7_SIM_CLK                          168
+#define USB_HSIC_HSIC_CLK_SRC                  169
+#define USB_HSIC_HSIC_CLK                      170
+#define USB_HSIC_HSIO_CAL_CLK                  171
+#define SPDM_CFG_H_CLK                         172
+#define SPDM_MSTR_H_CLK                                173
+#define SPDM_FF_CLK_SRC                                174
+#define SPDM_FF_CLK                            175
+#define SEC_CTRL_CLK                           176
+#define SEC_CTRL_ACC_CLK_SRC                   177
+#define SEC_CTRL_ACC_CLK                       178
+#define TLMM_H_CLK                             179
+#define TLMM_CLK                               180
+#define SATA_H_CLK                             181
+#define SATA_CLK_SRC                           182
+#define SATA_RXOOB_CLK                         183
+#define SATA_PMALIVE_CLK                       184
+#define SATA_PHY_REF_CLK                       185
+#define SATA_A_CLK                             186
+#define SATA_PHY_CFG_CLK                       187
+#define TSSC_CLK_SRC                           188
+#define TSSC_CLK                               189
+#define PDM_SRC                                        190
+#define PDM_CLK                                        191
+#define GP0_SRC                                        192
+#define GP0_CLK                                        193
+#define GP1_SRC                                        194
+#define GP1_CLK                                        195
+#define GP2_SRC                                        196
+#define GP2_CLK                                        197
+#define MPM_CLK                                        198
+#define EBI1_CLK_SRC                           199
+#define EBI1_CH0_CLK                           200
+#define EBI1_CH1_CLK                           201
+#define EBI1_2X_CLK                            202
+#define EBI1_CH0_DQ_CLK                                203
+#define EBI1_CH1_DQ_CLK                                204
+#define EBI1_CH0_CA_CLK                                205
+#define EBI1_CH1_CA_CLK                                206
+#define EBI1_XO_CLK                            207
+#define SFAB_SMPSS_S_H_CLK                     208
+#define PRNG_SRC                               209
+#define PRNG_CLK                               210
+#define PXO_SRC                                        211
+#define SPDM_CY_PORT0_CLK                      212
+#define SPDM_CY_PORT1_CLK                      213
+#define SPDM_CY_PORT2_CLK                      214
+#define SPDM_CY_PORT3_CLK                      215
+#define SPDM_CY_PORT4_CLK                      216
+#define SPDM_CY_PORT5_CLK                      217
+#define SPDM_CY_PORT6_CLK                      218
+#define SPDM_CY_PORT7_CLK                      219
+#define PLL0                                   220
+#define PLL0_VOTE                              221
+#define PLL3                                   222
+#define PLL3_VOTE                              223
+#define PLL4                                   224
+#define PLL4_VOTE                              225
+#define PLL8                                   226
+#define PLL8_VOTE                              227
+#define PLL9                                   228
+#define PLL10                                  229
+#define PLL11                                  230
+#define PLL12                                  231
+#define PLL14                                  232
+#define PLL14_VOTE                             233
+#define PLL18                                  234
+#define CE5_SRC                                        235
+#define CE5_H_CLK                              236
+#define CE5_CORE_CLK                           237
+#define CE3_SLEEP_CLK                          238
+#define SFAB_AHB_S8_FCLK                       239
+#define SPDM_CY_PORT8_CLK                      246
+#define PCIE_ALT_REF_SRC                       247
+#define PCIE_ALT_REF_CLK                       248
+#define PCIE_1_A_CLK                           249
+#define PCIE_1_AUX_CLK                         250
+#define PCIE_1_H_CLK                           251
+#define PCIE_1_PHY_CLK                         252
+#define PCIE_1_ALT_REF_SRC                     253
+#define PCIE_1_ALT_REF_CLK                     254
+#define PCIE_2_A_CLK                           255
+#define PCIE_2_AUX_CLK                         256
+#define PCIE_2_H_CLK                           257
+#define PCIE_2_PHY_CLK                         258
+#define PCIE_2_ALT_REF_SRC                     259
+#define PCIE_2_ALT_REF_CLK                     260
+#define EBI2_CLK                               261
+#define USB30_SLEEP_CLK                                262
+#define USB30_UTMI_SRC                         263
+#define USB30_0_UTMI_CLK                       264
+#define USB30_1_UTMI_CLK                       265
+#define USB30_MASTER_SRC                       266
+#define USB30_0_MASTER_CLK                     267
+#define USB30_1_MASTER_CLK                     268
+#define GMAC_CORE1_CLK_SRC                     269
+#define GMAC_CORE2_CLK_SRC                     270
+#define GMAC_CORE3_CLK_SRC                     271
+#define GMAC_CORE4_CLK_SRC                     272
+#define GMAC_CORE1_CLK                         273
+#define GMAC_CORE2_CLK                         274
+#define GMAC_CORE3_CLK                         275
+#define GMAC_CORE4_CLK                         276
+#define UBI32_CORE1_CLK_SRC                    277
+#define UBI32_CORE2_CLK_SRC                    278
+#define UBI32_CORE1_CLK                                279
+#define UBI32_CORE2_CLK                                280
+
+#endif
index f9f5471..7d20eed 100644 (file)
 #define PLL13                                  292
 #define PLL14                                  293
 #define PLL14_VOTE                             294
+#define USB_HS3_H_CLK                          295
+#define USB_HS3_XCVR_SRC                       296
+#define USB_HS3_XCVR_CLK                       297
+#define USB_HS4_H_CLK                          298
+#define USB_HS4_XCVR_SRC                       299
+#define USB_HS4_XCVR_CLK                       300
+#define SATA_PHY_CFG_CLK                       301
+#define SATA_A_CLK                             302
+#define CE3_SRC                                        303
+#define CE3_CORE_CLK                           304
+#define CE3_H_CLK                              305
 
 #endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h
new file mode 100644 (file)
index 0000000..a929f86
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_APQ_MMCC_8084_H
+#define _DT_BINDINGS_CLK_APQ_MMCC_8084_H
+
+#define MMSS_AHB_CLK_SRC               0
+#define MMSS_AXI_CLK_SRC               1
+#define MMPLL0                         2
+#define MMPLL0_VOTE                    3
+#define MMPLL1                         4
+#define MMPLL1_VOTE                    5
+#define MMPLL2                         6
+#define MMPLL3                         7
+#define MMPLL4                         8
+#define CSI0_CLK_SRC                   9
+#define CSI1_CLK_SRC                   10
+#define CSI2_CLK_SRC                   11
+#define CSI3_CLK_SRC                   12
+#define VCODEC0_CLK_SRC                        13
+#define VFE0_CLK_SRC                   14
+#define VFE1_CLK_SRC                   15
+#define MDP_CLK_SRC                    16
+#define PCLK0_CLK_SRC                  17
+#define PCLK1_CLK_SRC                  18
+#define OCMEMNOC_CLK_SRC               19
+#define GFX3D_CLK_SRC                  20
+#define JPEG0_CLK_SRC                  21
+#define JPEG1_CLK_SRC                  22
+#define JPEG2_CLK_SRC                  23
+#define EDPPIXEL_CLK_SRC               24
+#define EXTPCLK_CLK_SRC                        25
+#define VP_CLK_SRC                     26
+#define CCI_CLK_SRC                    27
+#define CAMSS_GP0_CLK_SRC              28
+#define CAMSS_GP1_CLK_SRC              29
+#define MCLK0_CLK_SRC                  30
+#define MCLK1_CLK_SRC                  31
+#define MCLK2_CLK_SRC                  32
+#define MCLK3_CLK_SRC                  33
+#define CSI0PHYTIMER_CLK_SRC           34
+#define CSI1PHYTIMER_CLK_SRC           35
+#define CSI2PHYTIMER_CLK_SRC           36
+#define CPP_CLK_SRC                    37
+#define BYTE0_CLK_SRC                  38
+#define BYTE1_CLK_SRC                  39
+#define EDPAUX_CLK_SRC                 40
+#define EDPLINK_CLK_SRC                        41
+#define ESC0_CLK_SRC                   42
+#define ESC1_CLK_SRC                   43
+#define HDMI_CLK_SRC                   44
+#define VSYNC_CLK_SRC                  45
+#define RBCPR_CLK_SRC                  46
+#define RBBMTIMER_CLK_SRC              47
+#define MAPLE_CLK_SRC                  48
+#define VDP_CLK_SRC                    49
+#define VPU_BUS_CLK_SRC                        50
+#define MMSS_CXO_CLK                   51
+#define MMSS_SLEEPCLK_CLK              52
+#define AVSYNC_AHB_CLK                 53
+#define AVSYNC_EDPPIXEL_CLK            54
+#define AVSYNC_EXTPCLK_CLK             55
+#define AVSYNC_PCLK0_CLK               56
+#define AVSYNC_PCLK1_CLK               57
+#define AVSYNC_VP_CLK                  58
+#define CAMSS_AHB_CLK                  59
+#define CAMSS_CCI_CCI_AHB_CLK          60
+#define CAMSS_CCI_CCI_CLK              61
+#define CAMSS_CSI0_AHB_CLK             62
+#define CAMSS_CSI0_CLK                 63
+#define CAMSS_CSI0PHY_CLK              64
+#define CAMSS_CSI0PIX_CLK              65
+#define CAMSS_CSI0RDI_CLK              66
+#define CAMSS_CSI1_AHB_CLK             67
+#define CAMSS_CSI1_CLK                 68
+#define CAMSS_CSI1PHY_CLK              69
+#define CAMSS_CSI1PIX_CLK              70
+#define CAMSS_CSI1RDI_CLK              71
+#define CAMSS_CSI2_AHB_CLK             72
+#define CAMSS_CSI2_CLK                 73
+#define CAMSS_CSI2PHY_CLK              74
+#define CAMSS_CSI2PIX_CLK              75
+#define CAMSS_CSI2RDI_CLK              76
+#define CAMSS_CSI3_AHB_CLK             77
+#define CAMSS_CSI3_CLK                 78
+#define CAMSS_CSI3PHY_CLK              79
+#define CAMSS_CSI3PIX_CLK              80
+#define CAMSS_CSI3RDI_CLK              81
+#define CAMSS_CSI_VFE0_CLK             82
+#define CAMSS_CSI_VFE1_CLK             83
+#define CAMSS_GP0_CLK                  84
+#define CAMSS_GP1_CLK                  85
+#define CAMSS_ISPIF_AHB_CLK            86
+#define CAMSS_JPEG_JPEG0_CLK           87
+#define CAMSS_JPEG_JPEG1_CLK           88
+#define CAMSS_JPEG_JPEG2_CLK           89
+#define CAMSS_JPEG_JPEG_AHB_CLK                90
+#define CAMSS_JPEG_JPEG_AXI_CLK                91
+#define CAMSS_MCLK0_CLK                        92
+#define CAMSS_MCLK1_CLK                        93
+#define CAMSS_MCLK2_CLK                        94
+#define CAMSS_MCLK3_CLK                        95
+#define CAMSS_MICRO_AHB_CLK            96
+#define CAMSS_PHY0_CSI0PHYTIMER_CLK    97
+#define CAMSS_PHY1_CSI1PHYTIMER_CLK    98
+#define CAMSS_PHY2_CSI2PHYTIMER_CLK    99
+#define CAMSS_TOP_AHB_CLK              100
+#define CAMSS_VFE_CPP_AHB_CLK          101
+#define CAMSS_VFE_CPP_CLK              102
+#define CAMSS_VFE_VFE0_CLK             103
+#define CAMSS_VFE_VFE1_CLK             104
+#define CAMSS_VFE_VFE_AHB_CLK          105
+#define CAMSS_VFE_VFE_AXI_CLK          106
+#define MDSS_AHB_CLK                   107
+#define MDSS_AXI_CLK                   108
+#define MDSS_BYTE0_CLK                 109
+#define MDSS_BYTE1_CLK                 110
+#define MDSS_EDPAUX_CLK                        111
+#define MDSS_EDPLINK_CLK               112
+#define MDSS_EDPPIXEL_CLK              113
+#define MDSS_ESC0_CLK                  114
+#define MDSS_ESC1_CLK                  115
+#define MDSS_EXTPCLK_CLK               116
+#define MDSS_HDMI_AHB_CLK              117
+#define MDSS_HDMI_CLK                  118
+#define MDSS_MDP_CLK                   119
+#define MDSS_MDP_LUT_CLK               120
+#define MDSS_PCLK0_CLK                 121
+#define MDSS_PCLK1_CLK                 122
+#define MDSS_VSYNC_CLK                 123
+#define MMSS_RBCPR_AHB_CLK             124
+#define MMSS_RBCPR_CLK                 125
+#define MMSS_SPDM_AHB_CLK              126
+#define MMSS_SPDM_AXI_CLK              127
+#define MMSS_SPDM_CSI0_CLK             128
+#define MMSS_SPDM_GFX3D_CLK            129
+#define MMSS_SPDM_JPEG0_CLK            130
+#define MMSS_SPDM_JPEG1_CLK            131
+#define MMSS_SPDM_JPEG2_CLK            132
+#define MMSS_SPDM_MDP_CLK              133
+#define MMSS_SPDM_PCLK0_CLK            134
+#define MMSS_SPDM_PCLK1_CLK            135
+#define MMSS_SPDM_VCODEC0_CLK          136
+#define MMSS_SPDM_VFE0_CLK             137
+#define MMSS_SPDM_VFE1_CLK             138
+#define MMSS_SPDM_RM_AXI_CLK           139
+#define MMSS_SPDM_RM_OCMEMNOC_CLK      140
+#define MMSS_MISC_AHB_CLK              141
+#define MMSS_MMSSNOC_AHB_CLK           142
+#define MMSS_MMSSNOC_BTO_AHB_CLK       143
+#define MMSS_MMSSNOC_AXI_CLK           144
+#define MMSS_S0_AXI_CLK                        145
+#define OCMEMCX_AHB_CLK                        146
+#define OCMEMCX_OCMEMNOC_CLK           147
+#define OXILI_OCMEMGX_CLK              148
+#define OXILI_GFX3D_CLK                        149
+#define OXILI_RBBMTIMER_CLK            150
+#define OXILICX_AHB_CLK                        151
+#define VENUS0_AHB_CLK                 152
+#define VENUS0_AXI_CLK                 153
+#define VENUS0_CORE0_VCODEC_CLK                154
+#define VENUS0_CORE1_VCODEC_CLK                155
+#define VENUS0_OCMEMNOC_CLK            156
+#define VENUS0_VCODEC0_CLK             157
+#define VPU_AHB_CLK                    158
+#define VPU_AXI_CLK                    159
+#define VPU_BUS_CLK                    160
+#define VPU_CXO_CLK                    161
+#define VPU_MAPLE_CLK                  162
+#define VPU_SLEEP_CLK                  163
+#define VPU_VDP_CLK                    164
+
+#endif
index 5868ef1..85041b2 100644 (file)
 #define CSIPHY0_TIMER_CLK                              116
 #define PLL1                                           117
 #define PLL2                                           118
+#define RGB_TV_CLK                                     119
+#define NPL_TV_CLK                                     120
+#define VCAP_AHB_CLK                                   121
+#define VCAP_AXI_CLK                                   122
+#define VCAP_SRC                                       123
+#define VCAP_CLK                                       124
+#define VCAP_NPL_CLK                                   125
+#define PLL15                                          126
 
 #endif
diff --git a/include/dt-bindings/clock/rk3066a-cru.h b/include/dt-bindings/clock/rk3066a-cru.h
new file mode 100644 (file)
index 0000000..bc1ed1d
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/rk3188-cru-common.h>
+
+/* soft-reset indices */
+#define SRST_SRST1             0
+#define SRST_SRST2             1
+
+#define SRST_L2MEM             18
+#define SRST_I2S0              23
+#define SRST_I2S1              24
+#define SRST_I2S2              25
+#define SRST_TIMER2            29
+
+#define SRST_GPIO4             36
+#define SRST_GPIO6             38
+
+#define SRST_TSADC             92
+
+#define SRST_HDMI              96
+#define SRST_HDMI_APB          97
+#define SRST_CIF1              111
diff --git a/include/dt-bindings/clock/rk3188-cru-common.h b/include/dt-bindings/clock/rk3188-cru-common.h
new file mode 100644 (file)
index 0000000..750ee60
--- /dev/null
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* core clocks from */
+#define PLL_APLL               1
+#define PLL_DPLL               2
+#define PLL_CPLL               3
+#define PLL_GPLL               4
+#define CORE_PERI              5
+#define CORE_L2C               6
+
+/* sclk gates (special clocks) */
+#define SCLK_UART0             64
+#define SCLK_UART1             65
+#define SCLK_UART2             66
+#define SCLK_UART3             67
+#define SCLK_MAC               68
+#define SCLK_SPI0              69
+#define SCLK_SPI1              70
+#define SCLK_SARADC            71
+#define SCLK_SDMMC             72
+#define SCLK_SDIO              73
+#define SCLK_EMMC              74
+#define SCLK_I2S0              75
+#define SCLK_I2S1              76
+#define SCLK_I2S2              77
+#define SCLK_SPDIF             78
+#define SCLK_CIF0              79
+#define SCLK_CIF1              80
+#define SCLK_OTGPHY0           81
+#define SCLK_OTGPHY1           82
+#define SCLK_HSADC             83
+#define SCLK_TIMER0            84
+#define SCLK_TIMER1            85
+#define SCLK_TIMER2            86
+#define SCLK_TIMER3            87
+#define SCLK_TIMER4            88
+#define SCLK_TIMER5            89
+#define SCLK_TIMER6            90
+#define SCLK_JTAG              91
+#define SCLK_SMC               92
+
+#define DCLK_LCDC0             190
+#define DCLK_LCDC1             191
+
+/* aclk gates */
+#define ACLK_DMA1              192
+#define ACLK_DMA2              193
+#define ACLK_GPS               194
+#define ACLK_LCDC0             195
+#define ACLK_LCDC1             196
+#define ACLK_GPU               197
+#define ACLK_SMC               198
+#define ACLK_CIF               199
+#define ACLK_IPP               200
+#define ACLK_RGA               201
+#define ACLK_CIF0              202
+
+/* pclk gates */
+#define PCLK_GRF               320
+#define PCLK_PMU               321
+#define PCLK_TIMER0            322
+#define PCLK_TIMER1            323
+#define PCLK_TIMER2            324
+#define PCLK_TIMER3            325
+#define PCLK_PWM01             326
+#define PCLK_PWM23             327
+#define PCLK_SPI0              328
+#define PCLK_SPI1              329
+#define PCLK_SARADC            330
+#define PCLK_WDT               331
+#define PCLK_UART0             332
+#define PCLK_UART1             333
+#define PCLK_UART2             334
+#define PCLK_UART3             335
+#define PCLK_I2C0              336
+#define PCLK_I2C1              337
+#define PCLK_I2C2              338
+#define PCLK_I2C3              339
+#define PCLK_I2C4              340
+#define PCLK_GPIO0             341
+#define PCLK_GPIO1             342
+#define PCLK_GPIO2             343
+#define PCLK_GPIO3             344
+#define PCLK_GPIO4             345
+#define PCLK_GPIO6             346
+#define PCLK_EFUSE             347
+#define PCLK_TZPC              348
+#define PCLK_TSADC             349
+
+/* hclk gates */
+#define HCLK_SDMMC             448
+#define HCLK_SDIO              449
+#define HCLK_EMMC              450
+#define HCLK_OTG0              451
+#define HCLK_EMAC              452
+#define HCLK_SPDIF             453
+#define HCLK_I2S0              454
+#define HCLK_I2S1              455
+#define HCLK_I2S2              456
+#define HCLK_OTG1              457
+#define HCLK_HSIC              458
+#define HCLK_HSADC             459
+#define HCLK_PIDF              460
+#define HCLK_LCDC0             461
+#define HCLK_LCDC1             462
+#define HCLK_ROM               463
+#define HCLK_CIF0              464
+#define HCLK_IPP               465
+#define HCLK_RGA               466
+#define HCLK_NANDC0            467
+
+#define CLK_NR_CLKS            (HCLK_NANDC0 + 1)
+
+/* soft-reset indices */
+#define SRST_MCORE             2
+#define SRST_CORE0             3
+#define SRST_CORE1             4
+#define SRST_MCORE_DBG         7
+#define SRST_CORE0_DBG         8
+#define SRST_CORE1_DBG         9
+#define SRST_CORE0_WDT         12
+#define SRST_CORE1_WDT         13
+#define SRST_STRC_SYS          14
+#define SRST_L2C               15
+
+#define SRST_CPU_AHB           17
+#define SRST_AHB2APB           19
+#define SRST_DMA1              20
+#define SRST_INTMEM            21
+#define SRST_ROM               22
+#define SRST_SPDIF             26
+#define SRST_TIMER0            27
+#define SRST_TIMER1            28
+#define SRST_EFUSE             30
+
+#define SRST_GPIO0             32
+#define SRST_GPIO1             33
+#define SRST_GPIO2             34
+#define SRST_GPIO3             35
+
+#define SRST_UART0             39
+#define SRST_UART1             40
+#define SRST_UART2             41
+#define SRST_UART3             42
+#define SRST_I2C0              43
+#define SRST_I2C1              44
+#define SRST_I2C2              45
+#define SRST_I2C3              46
+#define SRST_I2C4              47
+
+#define SRST_PWM0              48
+#define SRST_PWM1              49
+#define SRST_DAP_PO            50
+#define SRST_DAP               51
+#define SRST_DAP_SYS           52
+#define SRST_TPIU_ATB          53
+#define SRST_PMU_APB           54
+#define SRST_GRF               55
+#define SRST_PMU               56
+#define SRST_PERI_AXI          57
+#define SRST_PERI_AHB          58
+#define SRST_PERI_APB          59
+#define SRST_PERI_NIU          60
+#define SRST_CPU_PERI          61
+#define SRST_EMEM_PERI         62
+#define SRST_USB_PERI          63
+
+#define SRST_DMA2              64
+#define SRST_SMC               65
+#define SRST_MAC               66
+#define SRST_NANC0             68
+#define SRST_USBOTG0           69
+#define SRST_USBPHY0           70
+#define SRST_OTGC0             71
+#define SRST_USBOTG1           72
+#define SRST_USBPHY1           73
+#define SRST_OTGC1             74
+#define SRST_HSADC             76
+#define SRST_PIDFILTER         77
+#define SRST_DDR_MSCH          79
+
+#define SRST_TZPC              80
+#define SRST_SDMMC             81
+#define SRST_SDIO              82
+#define SRST_EMMC              83
+#define SRST_SPI0              84
+#define SRST_SPI1              85
+#define SRST_WDT               86
+#define SRST_SARADC            87
+#define SRST_DDRPHY            88
+#define SRST_DDRPHY_APB                89
+#define SRST_DDRCTL            90
+#define SRST_DDRCTL_APB                91
+#define SRST_DDRPUB            93
+
+#define SRST_VIO0_AXI          98
+#define SRST_VIO0_AHB          99
+#define SRST_LCDC0_AXI         100
+#define SRST_LCDC0_AHB         101
+#define SRST_LCDC0_DCLK                102
+#define SRST_LCDC1_AXI         103
+#define SRST_LCDC1_AHB         104
+#define SRST_LCDC1_DCLK                105
+#define SRST_IPP_AXI           106
+#define SRST_IPP_AHB           107
+#define SRST_RGA_AXI           108
+#define SRST_RGA_AHB           109
+#define SRST_CIF0              110
+
+#define SRST_VCODEC_AXI                112
+#define SRST_VCODEC_AHB                113
+#define SRST_VIO1_AXI          114
+#define SRST_VCODEC_CPU                115
+#define SRST_VCODEC_NIU                116
+#define SRST_GPU               120
+#define SRST_GPU_NIU           122
+#define SRST_TFUN_ATB          125
+#define SRST_TFUN_APB          126
+#define SRST_CTI4_APB          127
+
+#define SRST_TPIU_APB          128
+#define SRST_TRACE             129
+#define SRST_CORE_DBG          130
+#define SRST_DBG_APB           131
+#define SRST_CTI0              132
+#define SRST_CTI0_APB          133
+#define SRST_CTI1              134
+#define SRST_CTI1_APB          135
+#define SRST_PTM_CORE0         136
+#define SRST_PTM_CORE1         137
+#define SRST_PTM0              138
+#define SRST_PTM0_ATB          139
+#define SRST_PTM1              140
+#define SRST_PTM1_ATB          141
+#define SRST_CTM               142
+#define SRST_TS                        143
diff --git a/include/dt-bindings/clock/rk3188-cru.h b/include/dt-bindings/clock/rk3188-cru.h
new file mode 100644 (file)
index 0000000..9fac8ed
--- /dev/null
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/rk3188-cru-common.h>
+
+/* soft-reset indices */
+#define SRST_PTM_CORE2         0
+#define SRST_PTM_CORE3         1
+#define SRST_CORE2             5
+#define SRST_CORE3             6
+#define SRST_CORE2_DBG         10
+#define SRST_CORE3_DBG         11
+
+#define SRST_TIMER2            16
+#define SRST_TIMER4            23
+#define SRST_I2S0              24
+#define SRST_TIMER5            25
+#define SRST_TIMER3            29
+#define SRST_TIMER6            31
+
+#define SRST_PTM3              36
+#define SRST_PTM3_ATB          37
+
+#define SRST_GPS               67
+#define SRST_HSICPHY           75
+#define SRST_TIMER             78
+
+#define SRST_PTM2              92
+#define SRST_CORE2_WDT         94
+#define SRST_CORE3_WDT         95
+
+#define SRST_PTM2_ATB          111
+
+#define SRST_HSIC              117
+#define SRST_CTI2              118
+#define SRST_CTI2_APB          119
+#define SRST_GPU_BRIDGE                121
+#define SRST_CTI3              123
+#define SRST_CTI3_APB          124
diff --git a/include/dt-bindings/clock/rk3288-cru.h b/include/dt-bindings/clock/rk3288-cru.h
new file mode 100644 (file)
index 0000000..ebcb460
--- /dev/null
@@ -0,0 +1,278 @@
+/*
+ * Copyright (c) 2014 MundoReader S.L.
+ * Author: Heiko Stuebner <heiko@sntech.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* core clocks */
+#define PLL_APLL               1
+#define PLL_DPLL               2
+#define PLL_CPLL               3
+#define PLL_GPLL               4
+#define PLL_NPLL               5
+
+/* sclk gates (special clocks) */
+#define SCLK_GPU               64
+#define SCLK_SPI0              65
+#define SCLK_SPI1              66
+#define SCLK_SPI2              67
+#define SCLK_SDMMC             68
+#define SCLK_SDIO0             69
+#define SCLK_SDIO1             70
+#define SCLK_EMMC              71
+#define SCLK_TSADC             72
+#define SCLK_SARADC            73
+#define SCLK_PS2C              74
+#define SCLK_NANDC0            75
+#define SCLK_NANDC1            76
+#define SCLK_UART0             77
+#define SCLK_UART1             78
+#define SCLK_UART2             79
+#define SCLK_UART3             80
+#define SCLK_UART4             81
+#define SCLK_I2S0              82
+#define SCLK_SPDIF             83
+#define SCLK_SPDIF8CH          84
+#define SCLK_TIMER0            85
+#define SCLK_TIMER1            86
+#define SCLK_TIMER2            87
+#define SCLK_TIMER3            88
+#define SCLK_TIMER4            89
+#define SCLK_TIMER5            90
+#define SCLK_TIMER6            91
+#define SCLK_HSADC             92
+#define SCLK_OTGPHY0           93
+#define SCLK_OTGPHY1           94
+#define SCLK_OTGPHY2           95
+#define SCLK_OTG_ADP           96
+#define SCLK_HSICPHY480M       97
+#define SCLK_HSICPHY12M                98
+#define SCLK_MACREF            99
+#define SCLK_LCDC_PWM0         100
+#define SCLK_LCDC_PWM1         101
+#define SCLK_MAC_RX            102
+#define SCLK_MAC_TX            103
+
+#define DCLK_VOP0              190
+#define DCLK_VOP1              191
+
+/* aclk gates */
+#define ACLK_GPU               192
+#define ACLK_DMAC1             193
+#define ACLK_DMAC2             194
+#define ACLK_MMU               195
+#define ACLK_GMAC              196
+#define ACLK_VOP0              197
+#define ACLK_VOP1              198
+#define ACLK_CRYPTO            199
+#define ACLK_RGA               200
+
+/* pclk gates */
+#define PCLK_GPIO0             320
+#define PCLK_GPIO1             321
+#define PCLK_GPIO2             322
+#define PCLK_GPIO3             323
+#define PCLK_GPIO4             324
+#define PCLK_GPIO5             325
+#define PCLK_GPIO6             326
+#define PCLK_GPIO7             327
+#define PCLK_GPIO8             328
+#define PCLK_GRF               329
+#define PCLK_SGRF              330
+#define PCLK_PMU               331
+#define PCLK_I2C0              332
+#define PCLK_I2C1              333
+#define PCLK_I2C2              334
+#define PCLK_I2C3              335
+#define PCLK_I2C4              336
+#define PCLK_I2C5              337
+#define PCLK_SPI0              338
+#define PCLK_SPI1              339
+#define PCLK_SPI2              340
+#define PCLK_UART0             341
+#define PCLK_UART1             342
+#define PCLK_UART2             343
+#define PCLK_UART3             344
+#define PCLK_UART4             345
+#define PCLK_TSADC             346
+#define PCLK_SARADC            347
+#define PCLK_SIM               348
+#define PCLK_GMAC              349
+#define PCLK_PWM               350
+#define PCLK_RKPWM             351
+#define PCLK_PS2C              352
+#define PCLK_TIMER             353
+#define PCLK_TZPC              354
+
+/* hclk gates */
+#define HCLK_GPS               448
+#define HCLK_OTG0              449
+#define HCLK_USBHOST0          450
+#define HCLK_USBHOST1          451
+#define HCLK_HSIC              452
+#define HCLK_NANDC0            453
+#define HCLK_NANDC1            454
+#define HCLK_TSP               455
+#define HCLK_SDMMC             456
+#define HCLK_SDIO0             457
+#define HCLK_SDIO1             458
+#define HCLK_EMMC              459
+#define HCLK_HSADC             460
+#define HCLK_CRYPTO            461
+#define HCLK_I2S0              462
+#define HCLK_SPDIF             463
+#define HCLK_SPDIF8CH          464
+#define HCLK_VOP0              465
+#define HCLK_VOP1              466
+#define HCLK_ROM               467
+#define HCLK_IEP               468
+#define HCLK_ISP               469
+#define HCLK_RGA               470
+
+#define CLK_NR_CLKS            (HCLK_RGA + 1)
+
+/* soft-reset indices */
+#define SRST_CORE0             0
+#define SRST_CORE1             1
+#define SRST_CORE2             2
+#define SRST_CORE3             3
+#define SRST_CORE0_PO          4
+#define SRST_CORE1_PO          5
+#define SRST_CORE2_PO          6
+#define SRST_CORE3_PO          7
+#define SRST_PDCORE_STRSYS     8
+#define SRST_PDBUS_STRSYS      9
+#define SRST_L2C               10
+#define SRST_TOPDBG            11
+#define SRST_CORE0_DBG         12
+#define SRST_CORE1_DBG         13
+#define SRST_CORE2_DBG         14
+#define SRST_CORE3_DBG         15
+
+#define SRST_PDBUG_AHB_ARBITOR 16
+#define SRST_EFUSE256          17
+#define SRST_DMAC1             18
+#define SRST_INTMEM            19
+#define SRST_ROM               20
+#define SRST_SPDIF8CH          21
+#define SRST_TIMER             22
+#define SRST_I2S0              23
+#define SRST_SPDIF             24
+#define SRST_TIMER0            25
+#define SRST_TIMER1            26
+#define SRST_TIMER2            27
+#define SRST_TIMER3            28
+#define SRST_TIMER4            29
+#define SRST_TIMER5            30
+#define SRST_EFUSE             31
+
+#define SRST_GPIO0             32
+#define SRST_GPIO1             33
+#define SRST_GPIO2             34
+#define SRST_GPIO3             35
+#define SRST_GPIO4             36
+#define SRST_GPIO5             37
+#define SRST_GPIO6             38
+#define SRST_GPIO7             39
+#define SRST_GPIO8             40
+#define SRST_I2C0              42
+#define SRST_I2C1              43
+#define SRST_I2C2              44
+#define SRST_I2C3              45
+#define SRST_I2C4              46
+#define SRST_I2C5              47
+
+#define SRST_DWPWM             48
+#define SRST_MMC_PERI          49
+#define SRST_PERIPH_MMU                50
+#define SRST_DAP               51
+#define SRST_DAP_SYS           52
+#define SRST_TPIU              53
+#define SRST_PMU_APB           54
+#define SRST_GRF               55
+#define SRST_PMU               56
+#define SRST_PERIPH_AXI                57
+#define SRST_PERIPH_AHB                58
+#define SRST_PERIPH_APB                59
+#define SRST_PERIPH_NIU                60
+#define SRST_PDPERI_AHB_ARBI   61
+#define SRST_EMEM              62
+#define SRST_USB_PERI          63
+
+#define SRST_DMAC2             64
+#define SRST_MAC               66
+#define SRST_GPS               67
+#define SRST_RKPWM             69
+#define SRST_CCP               71
+#define SRST_USBHOST0          72
+#define SRST_HSIC              73
+#define SRST_HSIC_AUX          74
+#define SRST_HSIC_PHY          75
+#define SRST_HSADC             76
+#define SRST_NANDC0            77
+#define SRST_NANDC1            78
+
+#define SRST_TZPC              80
+#define SRST_SPI0              83
+#define SRST_SPI1              84
+#define SRST_SPI2              85
+#define SRST_SARADC            87
+#define SRST_PDALIVE_NIU       88
+#define SRST_PDPMU_INTMEM      89
+#define SRST_PDPMU_NIU         90
+#define SRST_SGRF              91
+
+#define SRST_VIO_ARBI          96
+#define SRST_RGA_NIU           97
+#define SRST_VIO0_NIU_AXI      98
+#define SRST_VIO_NIU_AHB       99
+#define SRST_LCDC0_AXI         100
+#define SRST_LCDC0_AHB         101
+#define SRST_LCDC0_DCLK                102
+#define SRST_VIO1_NIU_AXI      103
+#define SRST_VIP               104
+#define SRST_RGA_CORE          105
+#define SRST_IEP_AXI           106
+#define SRST_IEP_AHB           107
+#define SRST_RGA_AXI           108
+#define SRST_RGA_AHB           109
+#define SRST_ISP               110
+#define SRST_EDP               111
+
+#define SRST_VCODEC_AXI                112
+#define SRST_VCODEC_AHB                113
+#define SRST_VIO_H2P           114
+#define SRST_MIPIDSI0          115
+#define SRST_MIPIDSI1          116
+#define SRST_MIPICSI           117
+#define SRST_LVDS_PHY          118
+#define SRST_LVDS_CON          119
+#define SRST_GPU               120
+#define SRST_HDMI              121
+#define SRST_CORE_PVTM         124
+#define SRST_GPU_PVTM          125
+
+#define SRST_MMC0              128
+#define SRST_SDIO0             129
+#define SRST_SDIO1             130
+#define SRST_EMMC              131
+#define SRST_USBOTG_AHB                132
+#define SRST_USBOTG_PHY                133
+#define SRST_USBOTG_CON                134
+#define SRST_USBHOST0_AHB      135
+#define SRST_USBHOST0_PHY      136
+#define SRST_USBHOST0_CON      137
+#define SRST_USBHOST1_AHB      138
+#define SRST_USBHOST1_PHY      139
+#define SRST_USBHOST1_CON      140
+#define SRST_USB_ADP           141
+#define SRST_ACC_EFUSE         142
index 0d2c739..d80caa6 100644 (file)
@@ -10,6 +10,7 @@
 #define CLK_ETH1_PHY           4
 
 /* CLOCKGEN A1 */
+#define CLK_ICN_IF_2           0
 #define CLK_GMAC0_PHY          3
 
 #endif
index 552c779..f9bdbd1 100644 (file)
@@ -10,6 +10,7 @@
 #define CLK_ETH1_PHY           4
 
 /* CLOCKGEN A1 */
+#define CLK_ICN_IF_2           0
 #define CLK_GMAC0_PHY          3
 
 #endif
diff --git a/include/dt-bindings/mfd/palmas.h b/include/dt-bindings/mfd/palmas.h
new file mode 100644 (file)
index 0000000..2c8ac48
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * This header provides macros for Palmas device bindings.
+ *
+ * Copyright (c) 2013, NVIDIA Corporation.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ */
+
+#ifndef __DT_BINDINGS_PALMAS_H__
+#define __DT_BINDINGS_PALMAS_H
+
+/* External control pins */
+#define PALMAS_EXT_CONTROL_PIN_ENABLE1 1
+#define PALMAS_EXT_CONTROL_PIN_ENABLE2 2
+#define PALMAS_EXT_CONTROL_PIN_NSLEEP  3
+
+#endif /* __DT_BINDINGS_PALMAS_H */
index 002a285..3d33794 100644 (file)
@@ -30,7 +30,8 @@
 #define MUX_MODE14     0xe
 #define MUX_MODE15     0xf
 
-#define PULL_ENA               (1 << 16)
+#define PULL_ENA               (0 << 16)
+#define PULL_DIS               (1 << 16)
 #define PULL_UP                        (1 << 17)
 #define INPUT_EN               (1 << 18)
 #define SLEWCONTROL            (1 << 19)
 #define WAKEUP_EVENT           (1 << 25)
 
 /* Active pin states */
-#define PIN_OUTPUT             0
+#define PIN_OUTPUT             (0 | PULL_DIS)
 #define PIN_OUTPUT_PULLUP      (PIN_OUTPUT | PULL_ENA | PULL_UP)
 #define PIN_OUTPUT_PULLDOWN    (PIN_OUTPUT | PULL_ENA)
-#define PIN_INPUT              INPUT_EN
+#define PIN_INPUT              (INPUT_EN | PULL_DIS)
 #define PIN_INPUT_SLEW         (INPUT_EN | SLEWCONTROL)
 #define PIN_INPUT_PULLUP       (PULL_ENA | INPUT_EN | PULL_UP)
 #define PIN_INPUT_PULLDOWN     (PULL_ENA | INPUT_EN)
diff --git a/include/dt-bindings/reset/qcom,gcc-apq8084.h b/include/dt-bindings/reset/qcom,gcc-apq8084.h
new file mode 100644 (file)
index 0000000..527caaf
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_APQ_GCC_8084_H
+#define _DT_BINDINGS_RESET_APQ_GCC_8084_H
+
+#define GCC_SYSTEM_NOC_BCR             0
+#define GCC_CONFIG_NOC_BCR             1
+#define GCC_PERIPH_NOC_BCR             2
+#define GCC_IMEM_BCR                   3
+#define GCC_MMSS_BCR                   4
+#define GCC_QDSS_BCR                   5
+#define GCC_USB_30_BCR                 6
+#define GCC_USB3_PHY_BCR               7
+#define GCC_USB_HS_HSIC_BCR            8
+#define GCC_USB_HS_BCR                 9
+#define GCC_USB2A_PHY_BCR              10
+#define GCC_USB2B_PHY_BCR              11
+#define GCC_SDCC1_BCR                  12
+#define GCC_SDCC2_BCR                  13
+#define GCC_SDCC3_BCR                  14
+#define GCC_SDCC4_BCR                  15
+#define GCC_BLSP1_BCR                  16
+#define GCC_BLSP1_QUP1_BCR             17
+#define GCC_BLSP1_UART1_BCR            18
+#define GCC_BLSP1_QUP2_BCR             19
+#define GCC_BLSP1_UART2_BCR            20
+#define GCC_BLSP1_QUP3_BCR             21
+#define GCC_BLSP1_UART3_BCR            22
+#define GCC_BLSP1_QUP4_BCR             23
+#define GCC_BLSP1_UART4_BCR            24
+#define GCC_BLSP1_QUP5_BCR             25
+#define GCC_BLSP1_UART5_BCR            26
+#define GCC_BLSP1_QUP6_BCR             27
+#define GCC_BLSP1_UART6_BCR            28
+#define GCC_BLSP2_BCR                  29
+#define GCC_BLSP2_QUP1_BCR             30
+#define GCC_BLSP2_UART1_BCR            31
+#define GCC_BLSP2_QUP2_BCR             32
+#define GCC_BLSP2_UART2_BCR            33
+#define GCC_BLSP2_QUP3_BCR             34
+#define GCC_BLSP2_UART3_BCR            35
+#define GCC_BLSP2_QUP4_BCR             36
+#define GCC_BLSP2_UART4_BCR            37
+#define GCC_BLSP2_QUP5_BCR             38
+#define GCC_BLSP2_UART5_BCR            39
+#define GCC_BLSP2_QUP6_BCR             40
+#define GCC_BLSP2_UART6_BCR            41
+#define GCC_PDM_BCR                    42
+#define GCC_PRNG_BCR                   43
+#define GCC_BAM_DMA_BCR                        44
+#define GCC_TSIF_BCR                   45
+#define GCC_TCSR_BCR                   46
+#define GCC_BOOT_ROM_BCR               47
+#define GCC_MSG_RAM_BCR                        48
+#define GCC_TLMM_BCR                   49
+#define GCC_MPM_BCR                    50
+#define GCC_MPM_AHB_RESET              51
+#define GCC_MPM_NON_AHB_RESET          52
+#define GCC_SEC_CTRL_BCR               53
+#define GCC_SPMI_BCR                   54
+#define GCC_SPDM_BCR                   55
+#define GCC_CE1_BCR                    56
+#define GCC_CE2_BCR                    57
+#define GCC_BIMC_BCR                   58
+#define GCC_SNOC_BUS_TIMEOUT0_BCR      59
+#define GCC_SNOC_BUS_TIMEOUT2_BCR      60
+#define GCC_PNOC_BUS_TIMEOUT0_BCR      61
+#define GCC_PNOC_BUS_TIMEOUT1_BCR      62
+#define GCC_PNOC_BUS_TIMEOUT2_BCR      63
+#define GCC_PNOC_BUS_TIMEOUT3_BCR      64
+#define GCC_PNOC_BUS_TIMEOUT4_BCR      65
+#define GCC_CNOC_BUS_TIMEOUT0_BCR      66
+#define GCC_CNOC_BUS_TIMEOUT1_BCR      67
+#define GCC_CNOC_BUS_TIMEOUT2_BCR      68
+#define GCC_CNOC_BUS_TIMEOUT3_BCR      69
+#define GCC_CNOC_BUS_TIMEOUT4_BCR      70
+#define GCC_CNOC_BUS_TIMEOUT5_BCR      71
+#define GCC_CNOC_BUS_TIMEOUT6_BCR      72
+#define GCC_DEHR_BCR                   73
+#define GCC_RBCPR_BCR                  74
+#define GCC_MSS_RESTART                        75
+#define GCC_LPASS_RESTART              76
+#define GCC_WCSS_RESTART               77
+#define GCC_VENUS_RESTART              78
+#define GCC_COPSS_SMMU_BCR             79
+#define GCC_SPSS_BCR                   80
+#define GCC_PCIE_0_BCR                 81
+#define GCC_PCIE_0_PHY_BCR             82
+#define GCC_PCIE_1_BCR                 83
+#define GCC_PCIE_1_PHY_BCR             84
+#define GCC_USB_30_SEC_BCR             85
+#define GCC_USB3_SEC_PHY_BCR           86
+#define GCC_SATA_BCR                   87
+#define GCC_CE3_BCR                    88
+#define GCC_UFS_BCR                    89
+#define GCC_USB30_PHY_COM_BCR          90
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq806x.h b/include/dt-bindings/reset/qcom,gcc-ipq806x.h
new file mode 100644 (file)
index 0000000..0ad5ef9
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_806X_H
+#define _DT_BINDINGS_RESET_IPQ_806X_H
+
+#define QDSS_STM_RESET                                 0
+#define AFAB_SMPSS_S_RESET                             1
+#define AFAB_SMPSS_M1_RESET                            2
+#define AFAB_SMPSS_M0_RESET                            3
+#define AFAB_EBI1_CH0_RESET                            4
+#define AFAB_EBI1_CH1_RESET                            5
+#define SFAB_ADM0_M0_RESET                             6
+#define SFAB_ADM0_M1_RESET                             7
+#define SFAB_ADM0_M2_RESET                             8
+#define ADM0_C2_RESET                                  9
+#define ADM0_C1_RESET                                  10
+#define ADM0_C0_RESET                                  11
+#define ADM0_PBUS_RESET                                        12
+#define ADM0_RESET                                     13
+#define QDSS_CLKS_SW_RESET                             14
+#define QDSS_POR_RESET                                 15
+#define QDSS_TSCTR_RESET                               16
+#define QDSS_HRESET_RESET                              17
+#define QDSS_AXI_RESET                                 18
+#define QDSS_DBG_RESET                                 19
+#define SFAB_PCIE_M_RESET                              20
+#define SFAB_PCIE_S_RESET                              21
+#define PCIE_EXT_RESET                                 22
+#define PCIE_PHY_RESET                                 23
+#define PCIE_PCI_RESET                                 24
+#define PCIE_POR_RESET                                 25
+#define PCIE_HCLK_RESET                                        26
+#define PCIE_ACLK_RESET                                        27
+#define SFAB_LPASS_RESET                               28
+#define SFAB_AFAB_M_RESET                              29
+#define AFAB_SFAB_M0_RESET                             30
+#define AFAB_SFAB_M1_RESET                             31
+#define SFAB_SATA_S_RESET                              32
+#define SFAB_DFAB_M_RESET                              33
+#define DFAB_SFAB_M_RESET                              34
+#define DFAB_SWAY0_RESET                               35
+#define DFAB_SWAY1_RESET                               36
+#define DFAB_ARB0_RESET                                        37
+#define DFAB_ARB1_RESET                                        38
+#define PPSS_PROC_RESET                                        39
+#define PPSS_RESET                                     40
+#define DMA_BAM_RESET                                  41
+#define SPS_TIC_H_RESET                                        42
+#define SFAB_CFPB_M_RESET                              43
+#define SFAB_CFPB_S_RESET                              44
+#define TSIF_H_RESET                                   45
+#define CE1_H_RESET                                    46
+#define CE1_CORE_RESET                                 47
+#define CE1_SLEEP_RESET                                        48
+#define CE2_H_RESET                                    49
+#define CE2_CORE_RESET                                 50
+#define SFAB_SFPB_M_RESET                              51
+#define SFAB_SFPB_S_RESET                              52
+#define RPM_PROC_RESET                                 53
+#define PMIC_SSBI2_RESET                               54
+#define SDC1_RESET                                     55
+#define SDC2_RESET                                     56
+#define SDC3_RESET                                     57
+#define SDC4_RESET                                     58
+#define USB_HS1_RESET                                  59
+#define USB_HSIC_RESET                                 60
+#define USB_FS1_XCVR_RESET                             61
+#define USB_FS1_RESET                                  62
+#define GSBI1_RESET                                    63
+#define GSBI2_RESET                                    64
+#define GSBI3_RESET                                    65
+#define GSBI4_RESET                                    66
+#define GSBI5_RESET                                    67
+#define GSBI6_RESET                                    68
+#define GSBI7_RESET                                    69
+#define SPDM_RESET                                     70
+#define SEC_CTRL_RESET                                 71
+#define TLMM_H_RESET                                   72
+#define SFAB_SATA_M_RESET                              73
+#define SATA_RESET                                     74
+#define TSSC_RESET                                     75
+#define PDM_RESET                                      76
+#define MPM_H_RESET                                    77
+#define MPM_RESET                                      78
+#define SFAB_SMPSS_S_RESET                             79
+#define PRNG_RESET                                     80
+#define SFAB_CE3_M_RESET                               81
+#define SFAB_CE3_S_RESET                               82
+#define CE3_SLEEP_RESET                                        83
+#define PCIE_1_M_RESET                                 84
+#define PCIE_1_S_RESET                                 85
+#define PCIE_1_EXT_RESET                               86
+#define PCIE_1_PHY_RESET                               87
+#define PCIE_1_PCI_RESET                               88
+#define PCIE_1_POR_RESET                               89
+#define PCIE_1_HCLK_RESET                              90
+#define PCIE_1_ACLK_RESET                              91
+#define PCIE_2_M_RESET                                 92
+#define PCIE_2_S_RESET                                 93
+#define PCIE_2_EXT_RESET                               94
+#define PCIE_2_PHY_RESET                               95
+#define PCIE_2_PCI_RESET                               96
+#define PCIE_2_POR_RESET                               97
+#define PCIE_2_HCLK_RESET                              98
+#define PCIE_2_ACLK_RESET                              99
+#define SFAB_USB30_S_RESET                             100
+#define SFAB_USB30_M_RESET                             101
+#define USB30_0_PORT2_HS_PHY_RESET                     102
+#define USB30_0_MASTER_RESET                           103
+#define USB30_0_SLEEP_RESET                            104
+#define USB30_0_UTMI_PHY_RESET                         105
+#define USB30_0_POWERON_RESET                          106
+#define USB30_0_PHY_RESET                              107
+#define USB30_1_MASTER_RESET                           108
+#define USB30_1_SLEEP_RESET                            109
+#define USB30_1_UTMI_PHY_RESET                         110
+#define USB30_1_POWERON_RESET                          111
+#define USB30_1_PHY_RESET                              112
+#define NSSFB0_RESET                                   113
+#define NSSFB1_RESET                                   114
+#endif
index 07edd0e..47c8686 100644 (file)
 #define SFAB_SMPSS_S_RESET                             97
 #define PRNG_RESET                                     98
 #define RIVA_RESET                                     99
+#define USB_HS3_RESET                                  100
+#define USB_HS4_RESET                                  101
+#define CE3_RESET                                      102
+#define PCIE_EXT_PCI_RESET                             103
+#define PCIE_PHY_RESET                                 104
+#define PCIE_PCI_RESET                                 105
+#define PCIE_POR_RESET                                 106
+#define PCIE_HCLK_RESET                                        107
+#define PCIE_ACLK_RESET                                        108
+#define CE3_H_RESET                                    109
+#define SFAB_CE3_M_RESET                               110
+#define SFAB_CE3_S_RESET                               111
+#define SATA_RESET                                     112
+#define CE3_SLEEP_RESET                                        113
+#define GSS_SLP_RESET                                  114
+#define GSS_RESET                                      115
 
 #endif
diff --git a/include/dt-bindings/reset/qcom,mmcc-apq8084.h b/include/dt-bindings/reset/qcom,mmcc-apq8084.h
new file mode 100644 (file)
index 0000000..c167139
--- /dev/null
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_APQ_MMCC_8084_H
+#define _DT_BINDINGS_RESET_APQ_MMCC_8084_H
+
+#define MMSS_SPDM_RESET                        0
+#define MMSS_SPDM_RM_RESET             1
+#define VENUS0_RESET                   2
+#define VPU_RESET                      3
+#define MDSS_RESET                     4
+#define AVSYNC_RESET                   5
+#define CAMSS_PHY0_RESET               6
+#define CAMSS_PHY1_RESET               7
+#define CAMSS_PHY2_RESET               8
+#define CAMSS_CSI0_RESET               9
+#define CAMSS_CSI0PHY_RESET            10
+#define CAMSS_CSI0RDI_RESET            11
+#define CAMSS_CSI0PIX_RESET            12
+#define CAMSS_CSI1_RESET               13
+#define CAMSS_CSI1PHY_RESET            14
+#define CAMSS_CSI1RDI_RESET            15
+#define CAMSS_CSI1PIX_RESET            16
+#define CAMSS_CSI2_RESET               17
+#define CAMSS_CSI2PHY_RESET            18
+#define CAMSS_CSI2RDI_RESET            19
+#define CAMSS_CSI2PIX_RESET            20
+#define CAMSS_CSI3_RESET               21
+#define CAMSS_CSI3PHY_RESET            22
+#define CAMSS_CSI3RDI_RESET            23
+#define CAMSS_CSI3PIX_RESET            24
+#define CAMSS_ISPIF_RESET              25
+#define CAMSS_CCI_RESET                        26
+#define CAMSS_MCLK0_RESET              27
+#define CAMSS_MCLK1_RESET              28
+#define CAMSS_MCLK2_RESET              29
+#define CAMSS_MCLK3_RESET              30
+#define CAMSS_GP0_RESET                        31
+#define CAMSS_GP1_RESET                        32
+#define CAMSS_TOP_RESET                        33
+#define CAMSS_AHB_RESET                        34
+#define CAMSS_MICRO_RESET              35
+#define CAMSS_JPEG_RESET               36
+#define CAMSS_VFE_RESET                        37
+#define CAMSS_CSI_VFE0_RESET           38
+#define CAMSS_CSI_VFE1_RESET           39
+#define OXILI_RESET                    40
+#define OXILICX_RESET                  41
+#define OCMEMCX_RESET                  42
+#define MMSS_RBCRP_RESET               43
+#define MMSSNOCAHB_RESET               44
+#define MMSSNOCAXI_RESET               45
+
+#endif
index ba36ec6..1174111 100644 (file)
 #define CSI2_RESET                                     72
 #define CSI_RDI1_RESET                                 73
 #define CSI_RDI2_RESET                                 74
+#define GFX3D_AXI_RESET                                        75
+#define VCAP_AXI_RESET                                 76
+#define SMMU_VCAP_AHB_RESET                            77
+#define VCAP_AHB_RESET                                 78
+#define CSI_RDI_RESET                                  79
+#define CSI_PIX_RESET                                  80
+#define VCAP_NPL_RESET                                 81
+#define VCAP_RESET                                     82
 
 #endif
index 6dfd51a..09a947e 100644 (file)
@@ -43,10 +43,7 @@ struct ahci_host_priv *ahci_platform_get_resources(
        struct platform_device *pdev);
 int ahci_platform_init_host(struct platform_device *pdev,
                            struct ahci_host_priv *hpriv,
-                           const struct ata_port_info *pi_template,
-                           unsigned long host_flags,
-                           unsigned int force_port_map,
-                           unsigned int mask_port_map);
+                           const struct ata_port_info *pi_template);
 
 int ahci_platform_suspend_host(struct device *dev);
 int ahci_platform_resume_host(struct device *dev);
index 5a64576..d2633ee 100644 (file)
@@ -186,6 +186,15 @@ static inline void *bio_data(struct bio *bio)
 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
        __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
 
+/*
+ * Check if adding a bio_vec after bprv with offset would create a gap in
+ * the SG list. Most drivers don't care about this, but some do.
+ */
+static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
+{
+       return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
+}
+
 #define bio_io_error(bio) bio_endio((bio), -EIO)
 
 /*
@@ -644,10 +653,6 @@ struct biovec_slab {
 
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
 
-
-
-#define bip_vec_idx(bip, idx)  (&(bip->bip_vec[(idx)]))
-
 #define bip_for_each_vec(bvl, bip, iter)                               \
        for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
 
index a002cf1..eb726b9 100644 (file)
@@ -42,7 +42,7 @@ struct blk_mq_hw_ctx {
        unsigned int            nr_ctx;
        struct blk_mq_ctx       **ctxs;
 
-       unsigned int            wait_index;
+       atomic_t                wait_index;
 
        struct blk_mq_tags      *tags;
 
index 31e1105..8699bcf 100644 (file)
@@ -512,6 +512,7 @@ struct request_queue {
 #define QUEUE_FLAG_DEAD        19      /* queue tear-down finished */
 #define QUEUE_FLAG_INIT_DONE   20      /* queue is initialized */
 #define QUEUE_FLAG_NO_SG_MERGE 21      /* don't attempt to merge SG segments*/
+#define QUEUE_FLAG_SG_GAPS     22      /* queue doesn't support SG gaps */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
@@ -920,7 +921,7 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
                                               sector_t offset)
 {
        if (!q->limits.chunk_sectors)
-               return q->limits.max_hw_sectors;
+               return q->limits.max_sectors;
 
        return q->limits.chunk_sectors -
                        (offset & (q->limits.chunk_sectors - 1));
index 8a111dd..b5223c5 100644 (file)
@@ -203,7 +203,15 @@ struct cgroup {
        struct kernfs_node *kn;         /* cgroup kernfs entry */
        struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */
 
-       /* the bitmask of subsystems enabled on the child cgroups */
+       /*
+        * The bitmask of subsystems enabled on the child cgroups.
+        * ->subtree_control is the one configured through
+        * "cgroup.subtree_control" while ->child_subsys_mask is the
+        * effective one which may have more subsystems enabled.
+        * Controller knobs are made available iff it's enabled in
+        * ->subtree_control.
+        */
+       unsigned int subtree_control;
        unsigned int child_subsys_mask;
 
        /* Private pointers for each registered subsystem */
@@ -248,73 +256,9 @@ struct cgroup {
 
 /* cgroup_root->flags */
 enum {
-       /*
-        * Unfortunately, cgroup core and various controllers are riddled
-        * with idiosyncrasies and pointless options.  The following flag,
-        * when set, will force sane behavior - some options are forced on,
-        * others are disallowed, and some controllers will change their
-        * hierarchical or other behaviors.
-        *
-        * The set of behaviors affected by this flag are still being
-        * determined and developed and the mount option for this flag is
-        * prefixed with __DEVEL__.  The prefix will be dropped once we
-        * reach the point where all behaviors are compatible with the
-        * planned unified hierarchy, which will automatically turn on this
-        * flag.
-        *
-        * The followings are the behaviors currently affected this flag.
-        *
-        * - Mount options "noprefix", "xattr", "clone_children",
-        *   "release_agent" and "name" are disallowed.
-        *
-        * - When mounting an existing superblock, mount options should
-        *   match.
-        *
-        * - Remount is disallowed.
-        *
-        * - rename(2) is disallowed.
-        *
-        * - "tasks" is removed.  Everything should be at process
-        *   granularity.  Use "cgroup.procs" instead.
-        *
-        * - "cgroup.procs" is not sorted.  pids will be unique unless they
-        *   got recycled inbetween reads.
-        *
-        * - "release_agent" and "notify_on_release" are removed.
-        *   Replacement notification mechanism will be implemented.
-        *
-        * - "cgroup.clone_children" is removed.
-        *
-        * - "cgroup.subtree_populated" is available.  Its value is 0 if
-        *   the cgroup and its descendants contain no task; otherwise, 1.
-        *   The file also generates kernfs notification which can be
-        *   monitored through poll and [di]notify when the value of the
-        *   file changes.
-        *
-        * - If mount is requested with sane_behavior but without any
-        *   subsystem, the default unified hierarchy is mounted.
-        *
-        * - cpuset: tasks will be kept in empty cpusets when hotplug happens
-        *   and take masks of ancestors with non-empty cpus/mems, instead of
-        *   being moved to an ancestor.
-        *
-        * - cpuset: a task can be moved into an empty cpuset, and again it
-        *   takes masks of ancestors.
-        *
-        * - memcg: use_hierarchy is on by default and the cgroup file for
-        *   the flag is not created.
-        *
-        * - blkcg: blk-throttle becomes properly hierarchical.
-        *
-        * - debug: disallowed on the default hierarchy.
-        */
-       CGRP_ROOT_SANE_BEHAVIOR = (1 << 0),
-
+       CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */
        CGRP_ROOT_NOPREFIX      = (1 << 1), /* mounted subsystems have no named prefix */
        CGRP_ROOT_XATTR         = (1 << 2), /* supports extended attributes */
-
-       /* mount options live below bit 16 */
-       CGRP_ROOT_OPTION_MASK   = (1 << 16) - 1,
 };
 
 /*
@@ -440,9 +384,11 @@ struct css_set {
 enum {
        CFTYPE_ONLY_ON_ROOT     = (1 << 0),     /* only create on root cgrp */
        CFTYPE_NOT_ON_ROOT      = (1 << 1),     /* don't create on root cgrp */
-       CFTYPE_INSANE           = (1 << 2),     /* don't create if sane_behavior */
        CFTYPE_NO_PREFIX        = (1 << 3),     /* (DON'T USE FOR NEW FILES) no subsys prefix */
-       CFTYPE_ONLY_ON_DFL      = (1 << 4),     /* only on default hierarchy */
+
+       /* internal flags, do not use outside cgroup core proper */
+       __CFTYPE_ONLY_ON_DFL    = (1 << 16),    /* only on default hierarchy */
+       __CFTYPE_NOT_ON_DFL     = (1 << 17),    /* not on default hierarchy */
 };
 
 #define MAX_CFTYPE_NAME                64
@@ -526,20 +472,64 @@ struct cftype {
 extern struct cgroup_root cgrp_dfl_root;
 extern struct css_set init_css_set;
 
+/**
+ * cgroup_on_dfl - test whether a cgroup is on the default hierarchy
+ * @cgrp: the cgroup of interest
+ *
+ * The default hierarchy is the v2 interface of cgroup and this function
+ * can be used to test whether a cgroup is on the default hierarchy for
+ * cases where a subsystem should behave differnetly depending on the
+ * interface version.
+ *
+ * The set of behaviors which change on the default hierarchy are still
+ * being determined and the mount option is prefixed with __DEVEL__.
+ *
+ * List of changed behaviors:
+ *
+ * - Mount options "noprefix", "xattr", "clone_children", "release_agent"
+ *   and "name" are disallowed.
+ *
+ * - When mounting an existing superblock, mount options should match.
+ *
+ * - Remount is disallowed.
+ *
+ * - rename(2) is disallowed.
+ *
+ * - "tasks" is removed.  Everything should be at process granularity.  Use
+ *   "cgroup.procs" instead.
+ *
+ * - "cgroup.procs" is not sorted.  pids will be unique unless they got
+ *   recycled inbetween reads.
+ *
+ * - "release_agent" and "notify_on_release" are removed.  Replacement
+ *   notification mechanism will be implemented.
+ *
+ * - "cgroup.clone_children" is removed.
+ *
+ * - "cgroup.subtree_populated" is available.  Its value is 0 if the cgroup
+ *   and its descendants contain no task; otherwise, 1.  The file also
+ *   generates kernfs notification which can be monitored through poll and
+ *   [di]notify when the value of the file changes.
+ *
+ * - cpuset: tasks will be kept in empty cpusets when hotplug happens and
+ *   take masks of ancestors with non-empty cpus/mems, instead of being
+ *   moved to an ancestor.
+ *
+ * - cpuset: a task can be moved into an empty cpuset, and again it takes
+ *   masks of ancestors.
+ *
+ * - memcg: use_hierarchy is on by default and the cgroup file for the flag
+ *   is not created.
+ *
+ * - blkcg: blk-throttle becomes properly hierarchical.
+ *
+ * - debug: disallowed on the default hierarchy.
+ */
 static inline bool cgroup_on_dfl(const struct cgroup *cgrp)
 {
        return cgrp->root == &cgrp_dfl_root;
 }
 
-/*
- * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details.  This
- * function can be called as long as @cgrp is accessible.
- */
-static inline bool cgroup_sane_behavior(const struct cgroup *cgrp)
-{
-       return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR;
-}
-
 /* no synchronization, the result can only be used as a hint */
 static inline bool cgroup_has_tasks(struct cgroup *cgrp)
 {
@@ -602,7 +592,8 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
 
 char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
 
-int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
+int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
 int cgroup_rm_cftypes(struct cftype *cfts);
 
 bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
@@ -634,6 +625,7 @@ struct cgroup_subsys {
        int (*css_online)(struct cgroup_subsys_state *css);
        void (*css_offline)(struct cgroup_subsys_state *css);
        void (*css_free)(struct cgroup_subsys_state *css);
+       void (*css_reset)(struct cgroup_subsys_state *css);
 
        int (*can_attach)(struct cgroup_subsys_state *css,
                          struct cgroup_taskset *tset);
@@ -682,8 +674,21 @@ struct cgroup_subsys {
         */
        struct list_head cfts;
 
-       /* base cftypes, automatically registered with subsys itself */
-       struct cftype *base_cftypes;
+       /*
+        * Base cftypes which are automatically registered.  The two can
+        * point to the same array.
+        */
+       struct cftype *dfl_cftypes;     /* for the default hierarchy */
+       struct cftype *legacy_cftypes;  /* for the legacy hierarchies */
+
+       /*
+        * A subsystem may depend on other subsystems.  When such subsystem
+        * is enabled on a cgroup, the depended-upon subsystems are enabled
+        * together if available.  Subsystems enabled due to dependency are
+        * not visible to userland until explicitly enabled.  The following
+        * specifies the mask of subsystems that this one depends on.
+        */
+       unsigned int depends_on;
 };
 
 #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
index 0c287db..411dd7e 100644 (file)
@@ -619,5 +619,10 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
 
 #endif /* platform dependent I/O accessors */
 
+#ifdef CONFIG_DEBUG_FS
+struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode,
+                               void *data, const struct file_operations *fops);
+#endif
+
 #endif /* CONFIG_COMMON_CLK */
 #endif /* CLK_PROVIDER_H */
diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h
new file mode 100644 (file)
index 0000000..f3050e1
--- /dev/null
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2014 Samsung Electronics Co., Ltd.
+ * Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+struct device_node;
+
+#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
+int of_clk_set_defaults(struct device_node *node, bool clk_supplier);
+#else
+static inline int of_clk_set_defaults(struct device_node *node,
+                                     bool clk_supplier)
+{
+       return 0;
+}
+#endif
index ec4112d..8f8ae95 100644 (file)
@@ -482,8 +482,8 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
  *********************************************************************/
 
 /* Special Values of .frequency field */
-#define CPUFREQ_ENTRY_INVALID  ~0
-#define CPUFREQ_TABLE_END      ~1
+#define CPUFREQ_ENTRY_INVALID  ~0u
+#define CPUFREQ_TABLE_END      ~1u
 /* Special Values of .flags field */
 #define CPUFREQ_BOOST_FREQ     (1 << 0)
 
index b92eadf..d45e949 100644 (file)
@@ -710,9 +710,9 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req)
 
 static inline void ablkcipher_request_set_callback(
        struct ablkcipher_request *req,
-       u32 flags, crypto_completion_t complete, void *data)
+       u32 flags, crypto_completion_t compl, void *data)
 {
-       req->base.complete = complete;
+       req->base.complete = compl;
        req->base.data = data;
        req->base.flags = flags;
 }
@@ -841,10 +841,10 @@ static inline void aead_request_free(struct aead_request *req)
 
 static inline void aead_request_set_callback(struct aead_request *req,
                                             u32 flags,
-                                            crypto_completion_t complete,
+                                            crypto_completion_t compl,
                                             void *data)
 {
-       req->base.complete = complete;
+       req->base.complete = compl;
        req->base.data = data;
        req->base.flags = flags;
 }
index 4ff262e..45a9147 100644 (file)
@@ -133,7 +133,6 @@ extern struct request *elv_latter_request(struct request_queue *, struct request
 extern int elv_register_queue(struct request_queue *q);
 extern void elv_unregister_queue(struct request_queue *q);
 extern int elv_may_queue(struct request_queue *, int);
-extern void elv_abort_queue(struct request_queue *);
 extern void elv_completed_request(struct request_queue *, struct request *);
 extern int elv_set_request(struct request_queue *q, struct request *rq,
                           struct bio *bio, gfp_t gfp_mask);
@@ -144,7 +143,7 @@ extern void elv_drain_elevator(struct request_queue *);
  * io scheduler registration
  */
 extern void __init load_default_elevator_module(void);
-extern int __init elv_register(struct elevator_type *);
+extern int elv_register(struct elevator_type *);
 extern void elv_unregister(struct elevator_type *);
 
 /*
index 338e6f7..2daccaf 100644 (file)
@@ -833,7 +833,7 @@ static inline struct file *get_file(struct file *f)
  *
  * Lockd stuffs a "host" pointer into this.
  */
-typedef struct files_struct *fl_owner_t;
+typedef void *fl_owner_t;
 
 struct file_lock_operations {
        void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
@@ -1921,6 +1921,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
 
 static inline int break_deleg(struct inode *inode, unsigned int mode)
 {
+       /*
+        * Since this check is lockless, we must ensure that any refcounts
+        * taken are done before checking inode->i_flock. Otherwise, we could
+        * end up racing with tasks trying to set a new lease on this file.
+        */
+       smp_mb();
        if (inode->i_flock)
                return __break_lease(inode, mode, FL_DELEG);
        return 0;
index 404a686..6bb5e3f 100644 (file)
@@ -33,8 +33,7 @@
  * features, then it must call an indirect function that
  * does. Or at least does enough to prevent any unwelcomed side effects.
  */
-#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \
-       !ARCH_SUPPORTS_FTRACE_OPS
+#if !ARCH_SUPPORTS_FTRACE_OPS
 # define FTRACE_FORCE_LIST_FUNC 1
 #else
 # define FTRACE_FORCE_LIST_FUNC 0
@@ -118,17 +117,18 @@ struct ftrace_ops {
        ftrace_func_t                   func;
        struct ftrace_ops               *next;
        unsigned long                   flags;
-       int __percpu                    *disabled;
        void                            *private;
+       int __percpu                    *disabled;
 #ifdef CONFIG_DYNAMIC_FTRACE
+       int                             nr_trampolines;
        struct ftrace_hash              *notrace_hash;
        struct ftrace_hash              *filter_hash;
+       struct ftrace_hash              *tramp_hash;
        struct mutex                    regex_lock;
+       unsigned long                   trampoline;
 #endif
 };
 
-extern int function_trace_stop;
-
 /*
  * Type of the current tracing.
  */
@@ -140,32 +140,6 @@ enum ftrace_tracing_type_t {
 /* Current tracing type, default is FTRACE_TYPE_ENTER */
 extern enum ftrace_tracing_type_t ftrace_tracing_type;
 
-/**
- * ftrace_stop - stop function tracer.
- *
- * A quick way to stop the function tracer. Note this an on off switch,
- * it is not something that is recursive like preempt_disable.
- * This does not disable the calling of mcount, it only stops the
- * calling of functions from mcount.
- */
-static inline void ftrace_stop(void)
-{
-       function_trace_stop = 1;
-}
-
-/**
- * ftrace_start - start the function tracer.
- *
- * This function is the inverse of ftrace_stop. This does not enable
- * the function tracing if the function tracer is disabled. This only
- * sets the function tracer flag to continue calling the functions
- * from mcount.
- */
-static inline void ftrace_start(void)
-{
-       function_trace_stop = 0;
-}
-
 /*
  * The ftrace_ops must be a static and should also
  * be read_mostly.  These functions do modify read_mostly variables
@@ -242,8 +216,6 @@ static inline int ftrace_nr_registered_ops(void)
 }
 static inline void clear_ftrace_function(void) { }
 static inline void ftrace_kill(void) { }
-static inline void ftrace_stop(void) { }
-static inline void ftrace_start(void) { }
 #endif /* CONFIG_FUNCTION_TRACER */
 
 #ifdef CONFIG_STACK_TRACER
@@ -317,13 +289,20 @@ extern int ftrace_nr_registered_ops(void);
  * from tracing that function.
  */
 enum {
-       FTRACE_FL_ENABLED       = (1UL << 29),
+       FTRACE_FL_ENABLED       = (1UL << 31),
        FTRACE_FL_REGS          = (1UL << 30),
-       FTRACE_FL_REGS_EN       = (1UL << 31)
+       FTRACE_FL_REGS_EN       = (1UL << 29),
+       FTRACE_FL_TRAMP         = (1UL << 28),
+       FTRACE_FL_TRAMP_EN      = (1UL << 27),
 };
 
-#define FTRACE_FL_MASK         (0x7UL << 29)
-#define FTRACE_REF_MAX         ((1UL << 29) - 1)
+#define FTRACE_REF_MAX_SHIFT   27
+#define FTRACE_FL_BITS         5
+#define FTRACE_FL_MASKED_BITS  ((1UL << FTRACE_FL_BITS) - 1)
+#define FTRACE_FL_MASK         (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
+#define FTRACE_REF_MAX         ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
+
+#define ftrace_rec_count(rec)  ((rec)->flags & ~FTRACE_FL_MASK)
 
 struct dyn_ftrace {
        unsigned long           ip; /* address of mcount call-site */
@@ -431,6 +410,10 @@ void ftrace_modify_all_code(int command);
 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
 #endif
 
+#ifndef FTRACE_GRAPH_ADDR
+#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
+#endif
+
 #ifndef FTRACE_REGS_ADDR
 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
@@ -439,6 +422,16 @@ void ftrace_modify_all_code(int command);
 #endif
 #endif
 
+/*
+ * If an arch would like functions that are only traced
+ * by the function graph tracer to jump directly to its own
+ * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
+ * to be that address to jump to.
+ */
+#ifndef FTRACE_GRAPH_TRAMP_ADDR
+#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
+#endif
+
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 extern void ftrace_graph_caller(void);
 extern int ftrace_enable_ftrace_graph_caller(void);
@@ -736,6 +729,7 @@ extern char __irqentry_text_end[];
 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
                                trace_func_graph_ent_t entryfunc);
 
+extern bool ftrace_graph_is_dead(void);
 extern void ftrace_graph_stop(void);
 
 /* The current handlers in use */
index cff3106..06c6faa 100644 (file)
@@ -272,7 +272,6 @@ struct ftrace_event_call {
        struct trace_event      event;
        const char              *print_fmt;
        struct event_filter     *filter;
-       struct list_head        *files;
        void                    *mod;
        void                    *data;
        /*
@@ -404,8 +403,6 @@ enum event_trigger_type {
        ETT_EVENT_ENABLE        = (1 << 3),
 };
 
-extern void destroy_preds(struct ftrace_event_file *file);
-extern void destroy_call_preds(struct ftrace_event_call *call);
 extern int filter_match_preds(struct event_filter *filter, void *rec);
 
 extern int filter_check_discard(struct ftrace_event_file *file, void *rec,
index 255cd5c..a23c096 100644 (file)
@@ -80,6 +80,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page);
 bool isolate_huge_page(struct page *page, struct list_head *list);
 void putback_active_hugepage(struct page *page);
 bool is_hugepage_active(struct page *page);
+void free_huge_page(struct page *page);
 
 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
index 6df7f9f..2bb4c4f 100644 (file)
@@ -102,12 +102,6 @@ extern struct group_info init_groups;
 #define INIT_IDS
 #endif
 
-#ifdef CONFIG_RCU_BOOST
-#define INIT_TASK_RCU_BOOST()                                          \
-       .rcu_boost_mutex = NULL,
-#else
-#define INIT_TASK_RCU_BOOST()
-#endif
 #ifdef CONFIG_TREE_PREEMPT_RCU
 #define INIT_TASK_RCU_TREE_PREEMPT()                                   \
        .rcu_blocked_node = NULL,
@@ -119,8 +113,7 @@ extern struct group_info init_groups;
        .rcu_read_lock_nesting = 0,                                     \
        .rcu_read_unlock_special = 0,                                   \
        .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry),           \
-       INIT_TASK_RCU_TREE_PREEMPT()                                    \
-       INIT_TASK_RCU_BOOST()
+       INIT_TASK_RCU_TREE_PREEMPT()
 #else
 #define INIT_TASK_RCU_PREEMPT(tsk)
 #endif
index 19ae05d..bf9422c 100644 (file)
@@ -33,6 +33,11 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
 #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
 
 bool irq_work_queue(struct irq_work *work);
+
+#ifdef CONFIG_SMP
+bool irq_work_queue_on(struct irq_work *work, int cpu);
+#endif
+
 void irq_work_run(void);
 void irq_work_sync(struct irq_work *work);
 
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
new file mode 100644 (file)
index 0000000..03a4ea3
--- /dev/null
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H
+#define __LINUX_IRQCHIP_ARM_GIC_V3_H
+
+#include <asm/sysreg.h>
+
+/*
+ * Distributor registers. We assume we're running non-secure, with ARE
+ * being set. Secure-only and non-ARE registers are not described.
+ */
+#define GICD_CTLR                      0x0000
+#define GICD_TYPER                     0x0004
+#define GICD_IIDR                      0x0008
+#define GICD_STATUSR                   0x0010
+#define GICD_SETSPI_NSR                        0x0040
+#define GICD_CLRSPI_NSR                        0x0048
+#define GICD_SETSPI_SR                 0x0050
+#define GICD_CLRSPI_SR                 0x0058
+#define GICD_SEIR                      0x0068
+#define GICD_ISENABLER                 0x0100
+#define GICD_ICENABLER                 0x0180
+#define GICD_ISPENDR                   0x0200
+#define GICD_ICPENDR                   0x0280
+#define GICD_ISACTIVER                 0x0300
+#define GICD_ICACTIVER                 0x0380
+#define GICD_IPRIORITYR                        0x0400
+#define GICD_ICFGR                     0x0C00
+#define GICD_IROUTER                   0x6000
+#define GICD_PIDR2                     0xFFE8
+
+#define GICD_CTLR_RWP                  (1U << 31)
+#define GICD_CTLR_ARE_NS               (1U << 4)
+#define GICD_CTLR_ENABLE_G1A           (1U << 1)
+#define GICD_CTLR_ENABLE_G1            (1U << 0)
+
+#define GICD_IROUTER_SPI_MODE_ONE      (0U << 31)
+#define GICD_IROUTER_SPI_MODE_ANY      (1U << 31)
+
+#define GIC_PIDR2_ARCH_MASK            0xf0
+#define GIC_PIDR2_ARCH_GICv3           0x30
+#define GIC_PIDR2_ARCH_GICv4           0x40
+
+/*
+ * Re-Distributor registers, offsets from RD_base
+ */
+#define GICR_CTLR                      GICD_CTLR
+#define GICR_IIDR                      0x0004
+#define GICR_TYPER                     0x0008
+#define GICR_STATUSR                   GICD_STATUSR
+#define GICR_WAKER                     0x0014
+#define GICR_SETLPIR                   0x0040
+#define GICR_CLRLPIR                   0x0048
+#define GICR_SEIR                      GICD_SEIR
+#define GICR_PROPBASER                 0x0070
+#define GICR_PENDBASER                 0x0078
+#define GICR_INVLPIR                   0x00A0
+#define GICR_INVALLR                   0x00B0
+#define GICR_SYNCR                     0x00C0
+#define GICR_MOVLPIR                   0x0100
+#define GICR_MOVALLR                   0x0110
+#define GICR_PIDR2                     GICD_PIDR2
+
+#define GICR_WAKER_ProcessorSleep      (1U << 1)
+#define GICR_WAKER_ChildrenAsleep      (1U << 2)
+
+/*
+ * Re-Distributor registers, offsets from SGI_base
+ */
+#define GICR_ISENABLER0                        GICD_ISENABLER
+#define GICR_ICENABLER0                        GICD_ICENABLER
+#define GICR_ISPENDR0                  GICD_ISPENDR
+#define GICR_ICPENDR0                  GICD_ICPENDR
+#define GICR_ISACTIVER0                        GICD_ISACTIVER
+#define GICR_ICACTIVER0                        GICD_ICACTIVER
+#define GICR_IPRIORITYR0               GICD_IPRIORITYR
+#define GICR_ICFGR0                    GICD_ICFGR
+
+#define GICR_TYPER_VLPIS               (1U << 1)
+#define GICR_TYPER_LAST                        (1U << 4)
+
+/*
+ * CPU interface registers
+ */
+#define ICC_CTLR_EL1_EOImode_drop_dir  (0U << 1)
+#define ICC_CTLR_EL1_EOImode_drop      (1U << 1)
+#define ICC_SRE_EL1_SRE                        (1U << 0)
+
+/*
+ * Hypervisor interface registers (SRE only)
+ */
+#define ICH_LR_VIRTUAL_ID_MASK         ((1UL << 32) - 1)
+
+#define ICH_LR_EOI                     (1UL << 41)
+#define ICH_LR_GROUP                   (1UL << 60)
+#define ICH_LR_STATE                   (3UL << 62)
+#define ICH_LR_PENDING_BIT             (1UL << 62)
+#define ICH_LR_ACTIVE_BIT              (1UL << 63)
+
+#define ICH_MISR_EOI                   (1 << 0)
+#define ICH_MISR_U                     (1 << 1)
+
+#define ICH_HCR_EN                     (1 << 0)
+#define ICH_HCR_UIE                    (1 << 1)
+
+#define ICH_VMCR_CTLR_SHIFT            0
+#define ICH_VMCR_CTLR_MASK             (0x21f << ICH_VMCR_CTLR_SHIFT)
+#define ICH_VMCR_BPR1_SHIFT            18
+#define ICH_VMCR_BPR1_MASK             (7 << ICH_VMCR_BPR1_SHIFT)
+#define ICH_VMCR_BPR0_SHIFT            21
+#define ICH_VMCR_BPR0_MASK             (7 << ICH_VMCR_BPR0_SHIFT)
+#define ICH_VMCR_PMR_SHIFT             24
+#define ICH_VMCR_PMR_MASK              (0xffUL << ICH_VMCR_PMR_SHIFT)
+
+#define ICC_EOIR1_EL1                  sys_reg(3, 0, 12, 12, 1)
+#define ICC_IAR1_EL1                   sys_reg(3, 0, 12, 12, 0)
+#define ICC_SGI1R_EL1                  sys_reg(3, 0, 12, 11, 5)
+#define ICC_PMR_EL1                    sys_reg(3, 0, 4, 6, 0)
+#define ICC_CTLR_EL1                   sys_reg(3, 0, 12, 12, 4)
+#define ICC_SRE_EL1                    sys_reg(3, 0, 12, 12, 5)
+#define ICC_GRPEN1_EL1                 sys_reg(3, 0, 12, 12, 7)
+
+#define ICC_IAR1_EL1_SPURIOUS          0x3ff
+
+#define ICC_SRE_EL2                    sys_reg(3, 4, 12, 9, 5)
+
+#define ICC_SRE_EL2_SRE                        (1 << 0)
+#define ICC_SRE_EL2_ENABLE             (1 << 3)
+
+/*
+ * System register definitions
+ */
+#define ICH_VSEIR_EL2                  sys_reg(3, 4, 12, 9, 4)
+#define ICH_HCR_EL2                    sys_reg(3, 4, 12, 11, 0)
+#define ICH_VTR_EL2                    sys_reg(3, 4, 12, 11, 1)
+#define ICH_MISR_EL2                   sys_reg(3, 4, 12, 11, 2)
+#define ICH_EISR_EL2                   sys_reg(3, 4, 12, 11, 3)
+#define ICH_ELSR_EL2                   sys_reg(3, 4, 12, 11, 5)
+#define ICH_VMCR_EL2                   sys_reg(3, 4, 12, 11, 7)
+
+#define __LR0_EL2(x)                   sys_reg(3, 4, 12, 12, x)
+#define __LR8_EL2(x)                   sys_reg(3, 4, 12, 13, x)
+
+#define ICH_LR0_EL2                    __LR0_EL2(0)
+#define ICH_LR1_EL2                    __LR0_EL2(1)
+#define ICH_LR2_EL2                    __LR0_EL2(2)
+#define ICH_LR3_EL2                    __LR0_EL2(3)
+#define ICH_LR4_EL2                    __LR0_EL2(4)
+#define ICH_LR5_EL2                    __LR0_EL2(5)
+#define ICH_LR6_EL2                    __LR0_EL2(6)
+#define ICH_LR7_EL2                    __LR0_EL2(7)
+#define ICH_LR8_EL2                    __LR8_EL2(0)
+#define ICH_LR9_EL2                    __LR8_EL2(1)
+#define ICH_LR10_EL2                   __LR8_EL2(2)
+#define ICH_LR11_EL2                   __LR8_EL2(3)
+#define ICH_LR12_EL2                   __LR8_EL2(4)
+#define ICH_LR13_EL2                   __LR8_EL2(5)
+#define ICH_LR14_EL2                   __LR8_EL2(6)
+#define ICH_LR15_EL2                   __LR8_EL2(7)
+
+#define __AP0Rx_EL2(x)                 sys_reg(3, 4, 12, 8, x)
+#define ICH_AP0R0_EL2                  __AP0Rx_EL2(0)
+#define ICH_AP0R1_EL2                  __AP0Rx_EL2(1)
+#define ICH_AP0R2_EL2                  __AP0Rx_EL2(2)
+#define ICH_AP0R3_EL2                  __AP0Rx_EL2(3)
+
+#define __AP1Rx_EL2(x)                 sys_reg(3, 4, 12, 9, x)
+#define ICH_AP1R0_EL2                  __AP1Rx_EL2(0)
+#define ICH_AP1R1_EL2                  __AP1Rx_EL2(1)
+#define ICH_AP1R2_EL2                  __AP1Rx_EL2(2)
+#define ICH_AP1R3_EL2                  __AP1Rx_EL2(3)
+
+#ifndef __ASSEMBLY__
+
+#include <linux/stringify.h>
+
+static inline void gic_write_eoir(u64 irq)
+{
+       asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq));
+       isb();
+}
+
+#endif
+
+#endif
index 17aa1cc..30faf79 100644 (file)
@@ -91,6 +91,7 @@ struct kernfs_elem_attr {
        const struct kernfs_ops *ops;
        struct kernfs_open_node *open;
        loff_t                  size;
+       struct kernfs_node      *notify_next;   /* for kernfs_notify() */
 };
 
 /*
@@ -304,6 +305,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
                               struct kernfs_root *root, unsigned long magic,
                               bool *new_sb_created, const void *ns);
 void kernfs_kill_sb(struct super_block *sb);
+struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns);
 
 void kernfs_init(void);
 
index 7dcef33..13d5520 100644 (file)
@@ -73,7 +73,6 @@ struct kthread_worker {
 struct kthread_work {
        struct list_head        node;
        kthread_work_func_t     func;
-       wait_queue_head_t       done;
        struct kthread_worker   *worker;
 };
 
@@ -85,7 +84,6 @@ struct kthread_work {
 #define KTHREAD_WORK_INIT(work, fn)    {                               \
        .node = LIST_HEAD_INIT((work).node),                            \
        .func = (fn),                                                   \
-       .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done),             \
        }
 
 #define DEFINE_KTHREAD_WORKER(worker)                                  \
@@ -95,22 +93,16 @@ struct kthread_work {
        struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
 
 /*
- * kthread_worker.lock and kthread_work.done need their own lockdep class
- * keys if they are defined on stack with lockdep enabled.  Use the
- * following macros when defining them on stack.
+ * kthread_worker.lock needs its own lockdep class key when defined on
+ * stack with lockdep enabled.  Use the following macros in such cases.
  */
 #ifdef CONFIG_LOCKDEP
 # define KTHREAD_WORKER_INIT_ONSTACK(worker)                           \
        ({ init_kthread_worker(&worker); worker; })
 # define DEFINE_KTHREAD_WORKER_ONSTACK(worker)                         \
        struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
-# define KTHREAD_WORK_INIT_ONSTACK(work, fn)                           \
-       ({ init_kthread_work((&work), fn); work; })
-# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn)                         \
-       struct kthread_work work = KTHREAD_WORK_INIT_ONSTACK(work, fn)
 #else
 # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
-# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) DEFINE_KTHREAD_WORK(work, fn)
 #endif
 
 extern void __init_kthread_worker(struct kthread_worker *worker,
@@ -127,7 +119,6 @@ extern void __init_kthread_worker(struct kthread_worker *worker,
                memset((work), 0, sizeof(struct kthread_work));         \
                INIT_LIST_HEAD(&(work)->node);                          \
                (work)->func = (fn);                                    \
-               init_waitqueue_head(&(work)->done);                     \
        } while (0)
 
 int kthread_worker_fn(void *worker_ptr);
index 5ab4e3a..92abb49 100644 (file)
@@ -593,6 +593,7 @@ struct ata_host {
        struct device           *dev;
        void __iomem * const    *iomap;
        unsigned int            n_ports;
+       unsigned int            n_tags;                 /* nr of NCQ tags */
        void                    *private_data;
        struct ata_port_operations *ops;
        unsigned long           flags;
index b12f4bb..35b51e7 100644 (file)
@@ -578,8 +578,6 @@ struct mlx4_cq {
        u32                     cons_index;
 
        u16                     irq;
-       bool                    irq_affinity_change;
-
        __be32                 *set_ci_db;
        __be32                 *arm_db;
        int                     arm_sn;
@@ -1167,6 +1165,8 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
                   int *vector);
 void mlx4_release_eq(struct mlx4_dev *dev, int vec);
 
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
+
 int mlx4_get_phys_port_id(struct mlx4_dev *dev);
 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
index 92a2f99..8103f32 100644 (file)
@@ -25,7 +25,8 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg);
 struct msi_desc {
        struct {
                __u8    is_msix : 1;
-               __u8    multiple: 3;    /* log2 number of messages */
+               __u8    multiple: 3;    /* log2 num of messages allocated */
+               __u8    multi_cap : 3;  /* log2 num of messages supported */
                __u8    maskbit : 1;    /* mask-pending bit supported ? */
                __u8    is_64   : 1;    /* Address size: 0=32bit 1=64bit */
                __u8    pos;            /* Location of the msi capability */
index 11692de..8d5535c 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/lockdep.h>
 #include <linux/atomic.h>
 #include <asm/processor.h>
+#include <linux/osq_lock.h>
 
 /*
  * Simple, straightforward mutexes with strict semantics:
@@ -46,7 +47,6 @@
  * - detects multi-task circular deadlocks and prints out all affected
  *   locks and tasks (and only those tasks)
  */
-struct optimistic_spin_queue;
 struct mutex {
        /* 1: unlocked, 0: locked, negative: locked, possible waiters */
        atomic_t                count;
@@ -56,7 +56,7 @@ struct mutex {
        struct task_struct      *owner;
 #endif
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-       struct optimistic_spin_queue    *osq;   /* Spinner MCS lock */
+       struct optimistic_spin_queue osq; /* Spinner MCS lock */
 #endif
 #ifdef CONFIG_DEBUG_MUTEXES
        const char              *name;
@@ -176,8 +176,4 @@ extern void mutex_unlock(struct mutex *lock);
 
 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
 
-#ifndef arch_mutex_cpu_relax
-# define arch_mutex_cpu_relax() cpu_relax()
-#endif
-
 #endif /* __LINUX_MUTEX_H */
index 6a45fb5..447775e 100644 (file)
@@ -32,15 +32,24 @@ static inline void touch_nmi_watchdog(void)
 #ifdef arch_trigger_all_cpu_backtrace
 static inline bool trigger_all_cpu_backtrace(void)
 {
-       arch_trigger_all_cpu_backtrace();
+       arch_trigger_all_cpu_backtrace(true);
 
        return true;
 }
+static inline bool trigger_allbutself_cpu_backtrace(void)
+{
+       arch_trigger_all_cpu_backtrace(false);
+       return true;
+}
 #else
 static inline bool trigger_all_cpu_backtrace(void)
 {
        return false;
 }
+static inline bool trigger_allbutself_cpu_backtrace(void)
+{
+       return false;
+}
 #endif
 
 #ifdef CONFIG_LOCKUP_DETECTOR
@@ -48,6 +57,7 @@ int hw_nmi_is_cpu_stuck(struct pt_regs *);
 u64 hw_nmi_get_sample_period(int watchdog_thresh);
 extern int watchdog_user_enabled;
 extern int watchdog_thresh;
+extern int sysctl_softlockup_all_cpu_backtrace;
 struct ctl_table;
 extern int proc_dowatchdog(struct ctl_table *, int ,
                           void __user *, size_t *, loff_t *);
index 0511789..0ff360d 100644 (file)
@@ -73,6 +73,8 @@ extern int early_init_dt_scan_root(unsigned long node, const char *uname,
                                   int depth, void *data);
 
 extern bool early_init_dt_scan(void *params);
+extern bool early_init_dt_verify(void *params);
+extern void early_init_dt_scan_nodes(void);
 
 extern const char *of_flat_dt_get_machine_name(void);
 extern const void *of_flat_dt_match_machine(const void *default_match,
@@ -84,6 +86,7 @@ extern void unflatten_and_copy_device_tree(void);
 extern void early_init_devtree(void *);
 extern void early_get_first_memblock_info(void *, phys_addr_t *);
 extern u64 fdt_translate_address(const void *blob, int node_offset);
+extern void of_fdt_limit_memory(int limit);
 #else /* CONFIG_OF_FLATTREE */
 static inline void early_init_fdt_scan_reserved_mem(void) {}
 static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
index a70c949..d449018 100644 (file)
@@ -25,9 +25,6 @@ struct phy_device *of_phy_attach(struct net_device *dev,
 
 extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
 
-extern void of_mdiobus_link_phydev(struct mii_bus *mdio,
-                                  struct phy_device *phydev);
-
 #else /* CONFIG_OF */
 static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 {
@@ -63,11 +60,6 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
 {
        return NULL;
 }
-
-static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
-                                         struct phy_device *phydev)
-{
-}
 #endif /* CONFIG_OF */
 
 #if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
new file mode 100644 (file)
index 0000000..90230d5
--- /dev/null
@@ -0,0 +1,27 @@
+#ifndef __LINUX_OSQ_LOCK_H
+#define __LINUX_OSQ_LOCK_H
+
+/*
+ * An MCS like lock especially tailored for optimistic spinning for sleeping
+ * lock implementations (mutex, rwsem, etc).
+ */
+
+#define OSQ_UNLOCKED_VAL (0)
+
+struct optimistic_spin_queue {
+       /*
+        * Stores an encoded value of the CPU # of the tail node in the queue.
+        * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
+        */
+       atomic_t tail;
+};
+
+/* Init macro and function. */
+#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
+
+static inline void osq_lock_init(struct optimistic_spin_queue *lock)
+{
+       atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
+}
+
+#endif
index 3c545b4..8304959 100644 (file)
@@ -360,6 +360,9 @@ static inline void ClearPageCompound(struct page *page)
        ClearPageHead(page);
 }
 #endif
+
+#define PG_head_mask ((1L << PG_head))
+
 #else
 /*
  * Reduce page flag use as much as possible by overlapping
index 0a97b58..e1474ae 100644 (file)
@@ -398,6 +398,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
        return read_cache_page(mapping, index, filler, data);
 }
 
+/*
+ * Get the offset in PAGE_SIZE.
+ * (TODO: hugepage should have ->index in PAGE_SIZE)
+ */
+static inline pgoff_t page_to_pgoff(struct page *page)
+{
+       if (unlikely(PageHeadHuge(page)))
+               return page->index << compound_order(page);
+       else
+               return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+}
+
 /*
  * Return byte-offset into filesystem object for page.
  */
index 466bcd1..6ed3647 100644 (file)
@@ -978,6 +978,8 @@ int pci_try_reset_slot(struct pci_slot *slot);
 int pci_probe_reset_bus(struct pci_bus *bus);
 int pci_reset_bus(struct pci_bus *bus);
 int pci_try_reset_bus(struct pci_bus *bus);
+void pci_reset_secondary_bus(struct pci_dev *dev);
+void pcibios_reset_secondary_bus(struct pci_dev *dev);
 void pci_reset_bridge_secondary_bus(struct pci_dev *dev);
 void pci_update_resource(struct pci_dev *dev, int resno);
 int __must_check pci_assign_resource(struct pci_dev *dev, int i);
@@ -1186,7 +1188,6 @@ int pci_msix_vec_count(struct pci_dev *dev);
 int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
 void pci_msix_shutdown(struct pci_dev *dev);
 void pci_disable_msix(struct pci_dev *dev);
-void msi_remove_pci_irq_vectors(struct pci_dev *dev);
 void pci_restore_msi_state(struct pci_dev *dev);
 int pci_msi_enabled(void);
 int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec);
@@ -1217,7 +1218,6 @@ static inline int pci_enable_msix(struct pci_dev *dev,
 { return -ENOSYS; }
 static inline void pci_msix_shutdown(struct pci_dev *dev) { }
 static inline void pci_disable_msix(struct pci_dev *dev) { }
-static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) { }
 static inline void pci_restore_msi_state(struct pci_dev *dev) { }
 static inline int pci_msi_enabled(void) { return 0; }
 static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec,
index 7fa3173..6ed0bb7 100644 (file)
@@ -6,6 +6,8 @@
  *     Do not add new entries to this file unless the definitions
  *     are shared between multiple drivers.
  */
+#ifndef _LINUX_PCI_IDS_H
+#define _LINUX_PCI_IDS_H
 
 /* Device classes and subclasses */
 
 #define PCI_DEVICE_ID_XEN_PLATFORM     0x0001
 
 #define PCI_VENDOR_ID_OCZ              0x1b85
+
+#endif /* _LINUX_PCI_IDS_H */
index a5fc7d0..cfd5604 100644 (file)
@@ -1,6 +1,40 @@
+/*
+ * linux/percpu-defs.h - basic definitions for percpu areas
+ *
+ * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER.
+ *
+ * This file is separate from linux/percpu.h to avoid cyclic inclusion
+ * dependency from arch header files.  Only to be included from
+ * asm/percpu.h.
+ *
+ * This file includes macros necessary to declare percpu sections and
+ * variables, and definitions of percpu accessors and operations.  It
+ * should provide enough percpu features to arch header files even when
+ * they can only include asm/percpu.h to avoid cyclic inclusion dependency.
+ */
+
 #ifndef _LINUX_PERCPU_DEFS_H
 #define _LINUX_PERCPU_DEFS_H
 
+#ifdef CONFIG_SMP
+
+#ifdef MODULE
+#define PER_CPU_SHARED_ALIGNED_SECTION ""
+#define PER_CPU_ALIGNED_SECTION ""
+#else
+#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned"
+#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
+#endif
+#define PER_CPU_FIRST_SECTION "..first"
+
+#else
+
+#define PER_CPU_SHARED_ALIGNED_SECTION ""
+#define PER_CPU_ALIGNED_SECTION "..shared_aligned"
+#define PER_CPU_FIRST_SECTION ""
+
+#endif
+
 /*
  * Base implementations of per-CPU variable declarations and definitions, where
  * the section in which the variable is to be placed is provided by the
 #define __PCPU_DUMMY_ATTRS                                             \
        __attribute__((section(".discard"), unused))
 
-/*
- * Macro which verifies @ptr is a percpu pointer without evaluating
- * @ptr.  This is to be used in percpu accessors to verify that the
- * input parameter is a percpu pointer.
- *
- * + 0 is required in order to convert the pointer type from a
- * potential array type to a pointer to a single item of the array.
- */
-#define __verify_pcpu_ptr(ptr) do {                                    \
-       const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;    \
-       (void)__vpp_verify;                                             \
-} while (0)
-
 /*
  * s390 and alpha modules require percpu variables to be defined as
  * weak to force the compiler to generate GOT based external
  * Declaration/definition used for per-CPU variables that must be read mostly.
  */
 #define DECLARE_PER_CPU_READ_MOSTLY(type, name)                        \
-       DECLARE_PER_CPU_SECTION(type, name, "..readmostly")
+       DECLARE_PER_CPU_SECTION(type, name, "..read_mostly")
 
 #define DEFINE_PER_CPU_READ_MOSTLY(type, name)                         \
-       DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
+       DEFINE_PER_CPU_SECTION(type, name, "..read_mostly")
 
 /*
  * Intermodule exports for per-CPU variables.  sparse forgets about
 #define EXPORT_PER_CPU_SYMBOL_GPL(var)
 #endif
 
+/*
+ * Accessors and operations.
+ */
+#ifndef __ASSEMBLY__
+
+/*
+ * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating
+ * @ptr and is invoked once before a percpu area is accessed by all
+ * accessors and operations.  This is performed in the generic part of
+ * percpu and arch overrides don't need to worry about it; however, if an
+ * arch wants to implement an arch-specific percpu accessor or operation,
+ * it may use __verify_pcpu_ptr() to verify the parameters.
+ *
+ * + 0 is required in order to convert the pointer type from a
+ * potential array type to a pointer to a single item of the array.
+ */
+#define __verify_pcpu_ptr(ptr)                                         \
+do {                                                                   \
+       const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL;    \
+       (void)__vpp_verify;                                             \
+} while (0)
+
+#ifdef CONFIG_SMP
+
+/*
+ * Add an offset to a pointer but keep the pointer as-is.  Use RELOC_HIDE()
+ * to prevent the compiler from making incorrect assumptions about the
+ * pointer value.  The weird cast keeps both GCC and sparse happy.
+ */
+#define SHIFT_PERCPU_PTR(__p, __offset)                                        \
+       RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset))
+
+#define per_cpu_ptr(ptr, cpu)                                          \
+({                                                                     \
+       __verify_pcpu_ptr(ptr);                                         \
+       SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)));                 \
+})
+
+#define raw_cpu_ptr(ptr)                                               \
+({                                                                     \
+       __verify_pcpu_ptr(ptr);                                         \
+       arch_raw_cpu_ptr(ptr);                                          \
+})
+
+#ifdef CONFIG_DEBUG_PREEMPT
+#define this_cpu_ptr(ptr)                                              \
+({                                                                     \
+       __verify_pcpu_ptr(ptr);                                         \
+       SHIFT_PERCPU_PTR(ptr, my_cpu_offset);                           \
+})
+#else
+#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr)
+#endif
+
+#else  /* CONFIG_SMP */
+
+#define VERIFY_PERCPU_PTR(__p)                                         \
+({                                                                     \
+       __verify_pcpu_ptr(__p);                                         \
+       (typeof(*(__p)) __kernel __force *)(__p);                       \
+})
+
+#define per_cpu_ptr(ptr, cpu)  ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); })
+#define raw_cpu_ptr(ptr)       per_cpu_ptr(ptr, 0)
+#define this_cpu_ptr(ptr)      raw_cpu_ptr(ptr)
+
+#endif /* CONFIG_SMP */
+
+#define per_cpu(var, cpu)      (*per_cpu_ptr(&(var), cpu))
+#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var)))
+#define __get_cpu_var(var)     (*this_cpu_ptr(&(var)))
+
+/* keep until we have removed all uses of __this_cpu_ptr */
+#define __this_cpu_ptr(ptr)    raw_cpu_ptr(ptr)
+
+/*
+ * Must be an lvalue. Since @var must be a simple identifier,
+ * we force a syntax error here if it isn't.
+ */
+#define get_cpu_var(var)                                               \
+(*({                                                                   \
+       preempt_disable();                                              \
+       this_cpu_ptr(&var);                                             \
+}))
+
+/*
+ * The weird & is necessary because sparse considers (void)(var) to be
+ * a direct dereference of percpu variable (var).
+ */
+#define put_cpu_var(var)                                               \
+do {                                                                   \
+       (void)&(var);                                                   \
+       preempt_enable();                                               \
+} while (0)
+
+#define get_cpu_ptr(var)                                               \
+({                                                                     \
+       preempt_disable();                                              \
+       this_cpu_ptr(var);                                              \
+})
+
+#define put_cpu_ptr(var)                                               \
+do {                                                                   \
+       (void)(var);                                                    \
+       preempt_enable();                                               \
+} while (0)
+
+/*
+ * Branching function to split up a function into a set of functions that
+ * are called for different scalar sizes of the objects handled.
+ */
+
+extern void __bad_size_call_parameter(void);
+
+#ifdef CONFIG_DEBUG_PREEMPT
+extern void __this_cpu_preempt_check(const char *op);
+#else
+static inline void __this_cpu_preempt_check(const char *op) { }
+#endif
+
+#define __pcpu_size_call_return(stem, variable)                                \
+({                                                                     \
+       typeof(variable) pscr_ret__;                                    \
+       __verify_pcpu_ptr(&(variable));                                 \
+       switch(sizeof(variable)) {                                      \
+       case 1: pscr_ret__ = stem##1(variable); break;                  \
+       case 2: pscr_ret__ = stem##2(variable); break;                  \
+       case 4: pscr_ret__ = stem##4(variable); break;                  \
+       case 8: pscr_ret__ = stem##8(variable); break;                  \
+       default:                                                        \
+               __bad_size_call_parameter(); break;                     \
+       }                                                               \
+       pscr_ret__;                                                     \
+})
+
+#define __pcpu_size_call_return2(stem, variable, ...)                  \
+({                                                                     \
+       typeof(variable) pscr2_ret__;                                   \
+       __verify_pcpu_ptr(&(variable));                                 \
+       switch(sizeof(variable)) {                                      \
+       case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;    \
+       case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;    \
+       case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;    \
+       case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;    \
+       default:                                                        \
+               __bad_size_call_parameter(); break;                     \
+       }                                                               \
+       pscr2_ret__;                                                    \
+})
+
+/*
+ * Special handling for cmpxchg_double.  cmpxchg_double is passed two
+ * percpu variables.  The first has to be aligned to a double word
+ * boundary and the second has to follow directly thereafter.
+ * We enforce this on all architectures even if they don't support
+ * a double cmpxchg instruction, since it's a cheap requirement, and it
+ * avoids breaking the requirement for architectures with the instruction.
+ */
+#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)          \
+({                                                                     \
+       bool pdcrb_ret__;                                               \
+       __verify_pcpu_ptr(&(pcp1));                                     \
+       BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));                     \
+       VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1)));       \
+       VM_BUG_ON((unsigned long)(&(pcp2)) !=                           \
+                 (unsigned long)(&(pcp1)) + sizeof(pcp1));             \
+       switch(sizeof(pcp1)) {                                          \
+       case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;  \
+       case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;  \
+       case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;  \
+       case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;  \
+       default:                                                        \
+               __bad_size_call_parameter(); break;                     \
+       }                                                               \
+       pdcrb_ret__;                                                    \
+})
+
+#define __pcpu_size_call(stem, variable, ...)                          \
+do {                                                                   \
+       __verify_pcpu_ptr(&(variable));                                 \
+       switch(sizeof(variable)) {                                      \
+               case 1: stem##1(variable, __VA_ARGS__);break;           \
+               case 2: stem##2(variable, __VA_ARGS__);break;           \
+               case 4: stem##4(variable, __VA_ARGS__);break;           \
+               case 8: stem##8(variable, __VA_ARGS__);break;           \
+               default:                                                \
+                       __bad_size_call_parameter();break;              \
+       }                                                               \
+} while (0)
+
+/*
+ * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
+ *
+ * Optimized manipulation for memory allocated through the per cpu
+ * allocator or for addresses of per cpu variables.
+ *
+ * These operation guarantee exclusivity of access for other operations
+ * on the *same* processor. The assumption is that per cpu data is only
+ * accessed by a single processor instance (the current one).
+ *
+ * The arch code can provide optimized implementation by defining macros
+ * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per
+ * cpu atomic operations for 2 byte sized RMW actions. If arch code does
+ * not provide operations for a scalar size then the fallback in the
+ * generic code will be used.
+ *
+ * cmpxchg_double replaces two adjacent scalars at once.  The first two
+ * parameters are per cpu variables which have to be of the same size.  A
+ * truth value is returned to indicate success or failure (since a double
+ * register result is difficult to handle).  There is very limited hardware
+ * support for these operations, so only certain sizes may work.
+ */
+
+/*
+ * Operations for contexts where we do not want to do any checks for
+ * preemptions.  Unless strictly necessary, always use [__]this_cpu_*()
+ * instead.
+ *
+ * If there is no other protection through preempt disable and/or disabling
+ * interupts then one of these RMW operations can show unexpected behavior
+ * because the execution thread was rescheduled on another processor or an
+ * interrupt occurred and the same percpu variable was modified from the
+ * interrupt context.
+ */
+#define raw_cpu_read(pcp)              __pcpu_size_call_return(raw_cpu_read_, pcp)
+#define raw_cpu_write(pcp, val)                __pcpu_size_call(raw_cpu_write_, pcp, val)
+#define raw_cpu_add(pcp, val)          __pcpu_size_call(raw_cpu_add_, pcp, val)
+#define raw_cpu_and(pcp, val)          __pcpu_size_call(raw_cpu_and_, pcp, val)
+#define raw_cpu_or(pcp, val)           __pcpu_size_call(raw_cpu_or_, pcp, val)
+#define raw_cpu_add_return(pcp, val)   __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
+#define raw_cpu_xchg(pcp, nval)                __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval)
+#define raw_cpu_cmpxchg(pcp, oval, nval) \
+       __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
+#define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+       __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
+
+#define raw_cpu_sub(pcp, val)          raw_cpu_add(pcp, -(val))
+#define raw_cpu_inc(pcp)               raw_cpu_add(pcp, 1)
+#define raw_cpu_dec(pcp)               raw_cpu_sub(pcp, 1)
+#define raw_cpu_sub_return(pcp, val)   raw_cpu_add_return(pcp, -(typeof(pcp))(val))
+#define raw_cpu_inc_return(pcp)                raw_cpu_add_return(pcp, 1)
+#define raw_cpu_dec_return(pcp)                raw_cpu_add_return(pcp, -1)
+
+/*
+ * Operations for contexts that are safe from preemption/interrupts.  These
+ * operations verify that preemption is disabled.
+ */
+#define __this_cpu_read(pcp)                                           \
+({                                                                     \
+       __this_cpu_preempt_check("read");                               \
+       raw_cpu_read(pcp);                                              \
+})
+
+#define __this_cpu_write(pcp, val)                                     \
+({                                                                     \
+       __this_cpu_preempt_check("write");                              \
+       raw_cpu_write(pcp, val);                                        \
+})
+
+#define __this_cpu_add(pcp, val)                                       \
+({                                                                     \
+       __this_cpu_preempt_check("add");                                \
+       raw_cpu_add(pcp, val);                                          \
+})
+
+#define __this_cpu_and(pcp, val)                                       \
+({                                                                     \
+       __this_cpu_preempt_check("and");                                \
+       raw_cpu_and(pcp, val);                                          \
+})
+
+#define __this_cpu_or(pcp, val)                                                \
+({                                                                     \
+       __this_cpu_preempt_check("or");                                 \
+       raw_cpu_or(pcp, val);                                           \
+})
+
+#define __this_cpu_add_return(pcp, val)                                        \
+({                                                                     \
+       __this_cpu_preempt_check("add_return");                         \
+       raw_cpu_add_return(pcp, val);                                   \
+})
+
+#define __this_cpu_xchg(pcp, nval)                                     \
+({                                                                     \
+       __this_cpu_preempt_check("xchg");                               \
+       raw_cpu_xchg(pcp, nval);                                        \
+})
+
+#define __this_cpu_cmpxchg(pcp, oval, nval)                            \
+({                                                                     \
+       __this_cpu_preempt_check("cmpxchg");                            \
+       raw_cpu_cmpxchg(pcp, oval, nval);                               \
+})
+
+#define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+({     __this_cpu_preempt_check("cmpxchg_double");                     \
+       raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \
+})
+
+#define __this_cpu_sub(pcp, val)       __this_cpu_add(pcp, -(typeof(pcp))(val))
+#define __this_cpu_inc(pcp)            __this_cpu_add(pcp, 1)
+#define __this_cpu_dec(pcp)            __this_cpu_sub(pcp, 1)
+#define __this_cpu_sub_return(pcp, val)        __this_cpu_add_return(pcp, -(typeof(pcp))(val))
+#define __this_cpu_inc_return(pcp)     __this_cpu_add_return(pcp, 1)
+#define __this_cpu_dec_return(pcp)     __this_cpu_add_return(pcp, -1)
+
+/*
+ * Operations with implied preemption protection.  These operations can be
+ * used without worrying about preemption.  Note that interrupts may still
+ * occur while an operation is in progress and if the interrupt modifies
+ * the variable too then RMW actions may not be reliable.
+ */
+#define this_cpu_read(pcp)             __pcpu_size_call_return(this_cpu_read_, pcp)
+#define this_cpu_write(pcp, val)       __pcpu_size_call(this_cpu_write_, pcp, val)
+#define this_cpu_add(pcp, val)         __pcpu_size_call(this_cpu_add_, pcp, val)
+#define this_cpu_and(pcp, val)         __pcpu_size_call(this_cpu_and_, pcp, val)
+#define this_cpu_or(pcp, val)          __pcpu_size_call(this_cpu_or_, pcp, val)
+#define this_cpu_add_return(pcp, val)  __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+#define this_cpu_xchg(pcp, nval)       __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval)
+#define this_cpu_cmpxchg(pcp, oval, nval) \
+       __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
+#define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
+       __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2)
+
+#define this_cpu_sub(pcp, val)         this_cpu_add(pcp, -(typeof(pcp))(val))
+#define this_cpu_inc(pcp)              this_cpu_add(pcp, 1)
+#define this_cpu_dec(pcp)              this_cpu_sub(pcp, 1)
+#define this_cpu_sub_return(pcp, val)  this_cpu_add_return(pcp, -(typeof(pcp))(val))
+#define this_cpu_inc_return(pcp)       this_cpu_add_return(pcp, 1)
+#define this_cpu_dec_return(pcp)       this_cpu_add_return(pcp, -1)
+
+#endif /* __ASSEMBLY__ */
 #endif /* _LINUX_PERCPU_DEFS_H */
index 5d8920e..3dfbf23 100644 (file)
@@ -57,11 +57,9 @@ struct percpu_ref {
        atomic_t                count;
        /*
         * The low bit of the pointer indicates whether the ref is in percpu
-        * mode; if set, then get/put will manipulate the atomic_t (this is a
-        * hack because we need to keep the pointer around for
-        * percpu_ref_kill_rcu())
+        * mode; if set, then get/put will manipulate the atomic_t.
         */
-       unsigned __percpu       *pcpu_count;
+       unsigned long           pcpu_count_ptr;
        percpu_ref_func_t       *release;
        percpu_ref_func_t       *confirm_kill;
        struct rcu_head         rcu;
@@ -69,7 +67,8 @@ struct percpu_ref {
 
 int __must_check percpu_ref_init(struct percpu_ref *ref,
                                 percpu_ref_func_t *release);
-void percpu_ref_cancel_init(struct percpu_ref *ref);
+void percpu_ref_reinit(struct percpu_ref *ref);
+void percpu_ref_exit(struct percpu_ref *ref);
 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
                                 percpu_ref_func_t *confirm_kill);
 
@@ -88,12 +87,28 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
        return percpu_ref_kill_and_confirm(ref, NULL);
 }
 
-#define PCPU_STATUS_BITS       2
-#define PCPU_STATUS_MASK       ((1 << PCPU_STATUS_BITS) - 1)
-#define PCPU_REF_PTR           0
 #define PCPU_REF_DEAD          1
 
-#define REF_STATUS(count)      (((unsigned long) count) & PCPU_STATUS_MASK)
+/*
+ * Internal helper.  Don't use outside percpu-refcount proper.  The
+ * function doesn't return the pointer and let the caller test it for NULL
+ * because doing so forces the compiler to generate two conditional
+ * branches as it can't assume that @ref->pcpu_count is not NULL.
+ */
+static inline bool __pcpu_ref_alive(struct percpu_ref *ref,
+                                   unsigned __percpu **pcpu_countp)
+{
+       unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr);
+
+       /* paired with smp_store_release() in percpu_ref_reinit() */
+       smp_read_barrier_depends();
+
+       if (unlikely(pcpu_ptr & PCPU_REF_DEAD))
+               return false;
+
+       *pcpu_countp = (unsigned __percpu *)pcpu_ptr;
+       return true;
+}
 
 /**
  * percpu_ref_get - increment a percpu refcount
@@ -107,9 +122,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
 
        rcu_read_lock_sched();
 
-       pcpu_count = ACCESS_ONCE(ref->pcpu_count);
-
-       if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
+       if (__pcpu_ref_alive(ref, &pcpu_count))
                this_cpu_inc(*pcpu_count);
        else
                atomic_inc(&ref->count);
@@ -133,9 +146,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
 
        rcu_read_lock_sched();
 
-       pcpu_count = ACCESS_ONCE(ref->pcpu_count);
-
-       if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
+       if (__pcpu_ref_alive(ref, &pcpu_count)) {
                this_cpu_inc(*pcpu_count);
                ret = true;
        } else {
@@ -168,9 +179,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
 
        rcu_read_lock_sched();
 
-       pcpu_count = ACCESS_ONCE(ref->pcpu_count);
-
-       if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) {
+       if (__pcpu_ref_alive(ref, &pcpu_count)) {
                this_cpu_inc(*pcpu_count);
                ret = true;
        }
@@ -193,9 +202,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
 
        rcu_read_lock_sched();
 
-       pcpu_count = ACCESS_ONCE(ref->pcpu_count);
-
-       if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR))
+       if (__pcpu_ref_alive(ref, &pcpu_count))
                this_cpu_dec(*pcpu_count);
        else if (unlikely(atomic_dec_and_test(&ref->count)))
                ref->release(ref);
@@ -203,4 +210,19 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
        rcu_read_unlock_sched();
 }
 
+/**
+ * percpu_ref_is_zero - test whether a percpu refcount reached zero
+ * @ref: percpu_ref to test
+ *
+ * Returns %true if @ref reached zero.
+ */
+static inline bool percpu_ref_is_zero(struct percpu_ref *ref)
+{
+       unsigned __percpu *pcpu_count;
+
+       if (__pcpu_ref_alive(ref, &pcpu_count))
+               return false;
+       return !atomic_read(&ref->count);
+}
+
 #endif
index 8419053..6f61b61 100644 (file)
         PERCPU_MODULE_RESERVE)
 #endif
 
-/*
- * Must be an lvalue. Since @var must be a simple identifier,
- * we force a syntax error here if it isn't.
- */
-#define get_cpu_var(var) (*({                          \
-       preempt_disable();                              \
-       this_cpu_ptr(&var); }))
-
-/*
- * The weird & is necessary because sparse considers (void)(var) to be
- * a direct dereference of percpu variable (var).
- */
-#define put_cpu_var(var) do {                          \
-       (void)&(var);                                   \
-       preempt_enable();                               \
-} while (0)
-
-#define get_cpu_ptr(var) ({                            \
-       preempt_disable();                              \
-       this_cpu_ptr(var); })
-
-#define put_cpu_ptr(var) do {                          \
-       (void)(var);                                    \
-       preempt_enable();                               \
-} while (0)
-
 /* minimum unit size, also is the maximum supported allocation size */
 #define PCPU_MIN_UNIT_SIZE             PFN_ALIGN(32 << 10)
 
@@ -140,17 +114,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
                                pcpu_fc_populate_pte_fn_t populate_pte_fn);
 #endif
 
-/*
- * Use this to get to a cpu's version of the per-cpu object
- * dynamically allocated. Non-atomic access to the current CPU's
- * version should probably be combined with get_cpu()/put_cpu().
- */
-#ifdef CONFIG_SMP
-#define per_cpu_ptr(ptr, cpu)  SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
-#else
-#define per_cpu_ptr(ptr, cpu)  ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); })
-#endif
-
 extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
 extern bool is_kernel_percpu_address(unsigned long addr);
 
@@ -166,640 +129,4 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
 #define alloc_percpu(type)     \
        (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type))
 
-/*
- * Branching function to split up a function into a set of functions that
- * are called for different scalar sizes of the objects handled.
- */
-
-extern void __bad_size_call_parameter(void);
-
-#ifdef CONFIG_DEBUG_PREEMPT
-extern void __this_cpu_preempt_check(const char *op);
-#else
-static inline void __this_cpu_preempt_check(const char *op) { }
-#endif
-
-#define __pcpu_size_call_return(stem, variable)                                \
-({     typeof(variable) pscr_ret__;                                    \
-       __verify_pcpu_ptr(&(variable));                                 \
-       switch(sizeof(variable)) {                                      \
-       case 1: pscr_ret__ = stem##1(variable);break;                   \
-       case 2: pscr_ret__ = stem##2(variable);break;                   \
-       case 4: pscr_ret__ = stem##4(variable);break;                   \
-       case 8: pscr_ret__ = stem##8(variable);break;                   \
-       default:                                                        \
-               __bad_size_call_parameter();break;                      \
-       }                                                               \
-       pscr_ret__;                                                     \
-})
-
-#define __pcpu_size_call_return2(stem, variable, ...)                  \
-({                                                                     \
-       typeof(variable) pscr2_ret__;                                   \
-       __verify_pcpu_ptr(&(variable));                                 \
-       switch(sizeof(variable)) {                                      \
-       case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break;    \
-       case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break;    \
-       case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break;    \
-       case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break;    \
-       default:                                                        \
-               __bad_size_call_parameter(); break;                     \
-       }                                                               \
-       pscr2_ret__;                                                    \
-})
-
-/*
- * Special handling for cmpxchg_double.  cmpxchg_double is passed two
- * percpu variables.  The first has to be aligned to a double word
- * boundary and the second has to follow directly thereafter.
- * We enforce this on all architectures even if they don't support
- * a double cmpxchg instruction, since it's a cheap requirement, and it
- * avoids breaking the requirement for architectures with the instruction.
- */
-#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...)          \
-({                                                                     \
-       bool pdcrb_ret__;                                               \
-       __verify_pcpu_ptr(&pcp1);                                       \
-       BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2));                     \
-       VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1)));         \
-       VM_BUG_ON((unsigned long)(&pcp2) !=                             \
-                 (unsigned long)(&pcp1) + sizeof(pcp1));               \
-       switch(sizeof(pcp1)) {                                          \
-       case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break;  \
-       case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break;  \
-       case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break;  \
-       case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break;  \
-       default:                                                        \
-               __bad_size_call_parameter(); break;                     \
-       }                                                               \
-       pdcrb_ret__;                                                    \
-})
-
-#define __pcpu_size_call(stem, variable, ...)                          \
-do {                                                                   \
-       __verify_pcpu_ptr(&(variable));                                 \
-       switch(sizeof(variable)) {                                      \
-               case 1: stem##1(variable, __VA_ARGS__);break;           \
-               case 2: stem##2(variable, __VA_ARGS__);break;           \
-               case 4: stem##4(variable, __VA_ARGS__);break;           \
-               case 8: stem##8(variable, __VA_ARGS__);break;           \
-               default:                                                \
-                       __bad_size_call_parameter();break;              \
-       }                                                               \
-} while (0)
-
-/*
- * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com>
- *
- * Optimized manipulation for memory allocated through the per cpu
- * allocator or for addresses of per cpu variables.
- *
- * These operation guarantee exclusivity of access for other operations
- * on the *same* processor. The assumption is that per cpu data is only
- * accessed by a single processor instance (the current one).
- *
- * The first group is used for accesses that must be done in a
- * preemption safe way since we know that the context is not preempt
- * safe. Interrupts may occur. If the interrupt modifies the variable
- * too then RMW actions will not be reliable.
- *
- * The arch code can provide optimized functions in two ways:
- *
- * 1. Override the function completely. F.e. define this_cpu_add().
- *    The arch must then ensure that the various scalar format passed
- *    are handled correctly.
- *
- * 2. Provide functions for certain scalar sizes. F.e. provide
- *    this_cpu_add_2() to provide per cpu atomic operations for 2 byte
- *    sized RMW actions. If arch code does not provide operations for
- *    a scalar size then the fallback in the generic code will be
- *    used.
- */
-
-#define _this_cpu_generic_read(pcp)                                    \
-({     typeof(pcp) ret__;                                              \
-       preempt_disable();                                              \
-       ret__ = *this_cpu_ptr(&(pcp));                                  \
-       preempt_enable();                                               \
-       ret__;                                                          \
-})
-
-#ifndef this_cpu_read
-# ifndef this_cpu_read_1
-#  define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp)
-# endif
-# ifndef this_cpu_read_2
-#  define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp)
-# endif
-# ifndef this_cpu_read_4
-#  define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp)
-# endif
-# ifndef this_cpu_read_8
-#  define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp)
-# endif
-# define this_cpu_read(pcp)    __pcpu_size_call_return(this_cpu_read_, (pcp))
-#endif
-
-#define _this_cpu_generic_to_op(pcp, val, op)                          \
-do {                                                                   \
-       unsigned long flags;                                            \
-       raw_local_irq_save(flags);                                      \
-       *raw_cpu_ptr(&(pcp)) op val;                                    \
-       raw_local_irq_restore(flags);                                   \
-} while (0)
-
-#ifndef this_cpu_write
-# ifndef this_cpu_write_1
-#  define this_cpu_write_1(pcp, val)   _this_cpu_generic_to_op((pcp), (val), =)
-# endif
-# ifndef this_cpu_write_2
-#  define this_cpu_write_2(pcp, val)   _this_cpu_generic_to_op((pcp), (val), =)
-# endif
-# ifndef this_cpu_write_4
-#  define this_cpu_write_4(pcp, val)   _this_cpu_generic_to_op((pcp), (val), =)
-# endif
-# ifndef this_cpu_write_8
-#  define this_cpu_write_8(pcp, val)   _this_cpu_generic_to_op((pcp), (val), =)
-# endif
-# define this_cpu_write(pcp, val)      __pcpu_size_call(this_cpu_write_, (pcp), (val))
-#endif
-
-#ifndef this_cpu_add
-# ifndef this_cpu_add_1
-#  define this_cpu_add_1(pcp, val)     _this_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef this_cpu_add_2
-#  define this_cpu_add_2(pcp, val)     _this_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef this_cpu_add_4
-#  define this_cpu_add_4(pcp, val)     _this_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef this_cpu_add_8
-#  define this_cpu_add_8(pcp, val)     _this_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# define this_cpu_add(pcp, val)                __pcpu_size_call(this_cpu_add_, (pcp), (val))
-#endif
-
-#ifndef this_cpu_sub
-# define this_cpu_sub(pcp, val)                this_cpu_add((pcp), -(typeof(pcp))(val))
-#endif
-
-#ifndef this_cpu_inc
-# define this_cpu_inc(pcp)             this_cpu_add((pcp), 1)
-#endif
-
-#ifndef this_cpu_dec
-# define this_cpu_dec(pcp)             this_cpu_sub((pcp), 1)
-#endif
-
-#ifndef this_cpu_and
-# ifndef this_cpu_and_1
-#  define this_cpu_and_1(pcp, val)     _this_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef this_cpu_and_2
-#  define this_cpu_and_2(pcp, val)     _this_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef this_cpu_and_4
-#  define this_cpu_and_4(pcp, val)     _this_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef this_cpu_and_8
-#  define this_cpu_and_8(pcp, val)     _this_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# define this_cpu_and(pcp, val)                __pcpu_size_call(this_cpu_and_, (pcp), (val))
-#endif
-
-#ifndef this_cpu_or
-# ifndef this_cpu_or_1
-#  define this_cpu_or_1(pcp, val)      _this_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef this_cpu_or_2
-#  define this_cpu_or_2(pcp, val)      _this_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef this_cpu_or_4
-#  define this_cpu_or_4(pcp, val)      _this_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef this_cpu_or_8
-#  define this_cpu_or_8(pcp, val)      _this_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# define this_cpu_or(pcp, val)         __pcpu_size_call(this_cpu_or_, (pcp), (val))
-#endif
-
-#define _this_cpu_generic_add_return(pcp, val)                         \
-({                                                                     \
-       typeof(pcp) ret__;                                              \
-       unsigned long flags;                                            \
-       raw_local_irq_save(flags);                                      \
-       raw_cpu_add(pcp, val);                                  \
-       ret__ = raw_cpu_read(pcp);                                      \
-       raw_local_irq_restore(flags);                                   \
-       ret__;                                                          \
-})
-
-#ifndef this_cpu_add_return
-# ifndef this_cpu_add_return_1
-#  define this_cpu_add_return_1(pcp, val)      _this_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef this_cpu_add_return_2
-#  define this_cpu_add_return_2(pcp, val)      _this_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef this_cpu_add_return_4
-#  define this_cpu_add_return_4(pcp, val)      _this_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef this_cpu_add_return_8
-#  define this_cpu_add_return_8(pcp, val)      _this_cpu_generic_add_return(pcp, val)
-# endif
-# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
-#endif
-
-#define this_cpu_sub_return(pcp, val)  this_cpu_add_return(pcp, -(typeof(pcp))(val))
-#define this_cpu_inc_return(pcp)       this_cpu_add_return(pcp, 1)
-#define this_cpu_dec_return(pcp)       this_cpu_add_return(pcp, -1)
-
-#define _this_cpu_generic_xchg(pcp, nval)                              \
-({     typeof(pcp) ret__;                                              \
-       unsigned long flags;                                            \
-       raw_local_irq_save(flags);                                      \
-       ret__ = raw_cpu_read(pcp);                                      \
-       raw_cpu_write(pcp, nval);                                       \
-       raw_local_irq_restore(flags);                                   \
-       ret__;                                                          \
-})
-
-#ifndef this_cpu_xchg
-# ifndef this_cpu_xchg_1
-#  define this_cpu_xchg_1(pcp, nval)   _this_cpu_generic_xchg(pcp, nval)
-# endif
-# ifndef this_cpu_xchg_2
-#  define this_cpu_xchg_2(pcp, nval)   _this_cpu_generic_xchg(pcp, nval)
-# endif
-# ifndef this_cpu_xchg_4
-#  define this_cpu_xchg_4(pcp, nval)   _this_cpu_generic_xchg(pcp, nval)
-# endif
-# ifndef this_cpu_xchg_8
-#  define this_cpu_xchg_8(pcp, nval)   _this_cpu_generic_xchg(pcp, nval)
-# endif
-# define this_cpu_xchg(pcp, nval)      \
-       __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval)
-#endif
-
-#define _this_cpu_generic_cmpxchg(pcp, oval, nval)                     \
-({                                                                     \
-       typeof(pcp) ret__;                                              \
-       unsigned long flags;                                            \
-       raw_local_irq_save(flags);                                      \
-       ret__ = raw_cpu_read(pcp);                                      \
-       if (ret__ == (oval))                                            \
-               raw_cpu_write(pcp, nval);                               \
-       raw_local_irq_restore(flags);                                   \
-       ret__;                                                          \
-})
-
-#ifndef this_cpu_cmpxchg
-# ifndef this_cpu_cmpxchg_1
-#  define this_cpu_cmpxchg_1(pcp, oval, nval)  _this_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef this_cpu_cmpxchg_2
-#  define this_cpu_cmpxchg_2(pcp, oval, nval)  _this_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef this_cpu_cmpxchg_4
-#  define this_cpu_cmpxchg_4(pcp, oval, nval)  _this_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef this_cpu_cmpxchg_8
-#  define this_cpu_cmpxchg_8(pcp, oval, nval)  _this_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# define this_cpu_cmpxchg(pcp, oval, nval)     \
-       __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval)
-#endif
-
-/*
- * cmpxchg_double replaces two adjacent scalars at once.  The first
- * two parameters are per cpu variables which have to be of the same
- * size.  A truth value is returned to indicate success or failure
- * (since a double register result is difficult to handle).  There is
- * very limited hardware support for these operations, so only certain
- * sizes may work.
- */
-#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)       \
-({                                                                     \
-       int ret__;                                                      \
-       unsigned long flags;                                            \
-       raw_local_irq_save(flags);                                      \
-       ret__ = raw_cpu_generic_cmpxchg_double(pcp1, pcp2,              \
-                       oval1, oval2, nval1, nval2);                    \
-       raw_local_irq_restore(flags);                                   \
-       ret__;                                                          \
-})
-
-#ifndef this_cpu_cmpxchg_double
-# ifndef this_cpu_cmpxchg_double_1
-#  define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)    \
-       _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef this_cpu_cmpxchg_double_2
-#  define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)    \
-       _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef this_cpu_cmpxchg_double_4
-#  define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)    \
-       _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef this_cpu_cmpxchg_double_8
-#  define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)    \
-       _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)       \
-       __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
-#endif
-
-/*
- * Generic percpu operations for contexts where we do not want to do
- * any checks for preemptiosn.
- *
- * If there is no other protection through preempt disable and/or
- * disabling interupts then one of these RMW operations can show unexpected
- * behavior because the execution thread was rescheduled on another processor
- * or an interrupt occurred and the same percpu variable was modified from
- * the interrupt context.
- */
-#ifndef raw_cpu_read
-# ifndef raw_cpu_read_1
-#  define raw_cpu_read_1(pcp)  (*raw_cpu_ptr(&(pcp)))
-# endif
-# ifndef raw_cpu_read_2
-#  define raw_cpu_read_2(pcp)  (*raw_cpu_ptr(&(pcp)))
-# endif
-# ifndef raw_cpu_read_4
-#  define raw_cpu_read_4(pcp)  (*raw_cpu_ptr(&(pcp)))
-# endif
-# ifndef raw_cpu_read_8
-#  define raw_cpu_read_8(pcp)  (*raw_cpu_ptr(&(pcp)))
-# endif
-# define raw_cpu_read(pcp)     __pcpu_size_call_return(raw_cpu_read_, (pcp))
-#endif
-
-#define raw_cpu_generic_to_op(pcp, val, op)                            \
-do {                                                                   \
-       *raw_cpu_ptr(&(pcp)) op val;                                    \
-} while (0)
-
-
-#ifndef raw_cpu_write
-# ifndef raw_cpu_write_1
-#  define raw_cpu_write_1(pcp, val)    raw_cpu_generic_to_op((pcp), (val), =)
-# endif
-# ifndef raw_cpu_write_2
-#  define raw_cpu_write_2(pcp, val)    raw_cpu_generic_to_op((pcp), (val), =)
-# endif
-# ifndef raw_cpu_write_4
-#  define raw_cpu_write_4(pcp, val)    raw_cpu_generic_to_op((pcp), (val), =)
-# endif
-# ifndef raw_cpu_write_8
-#  define raw_cpu_write_8(pcp, val)    raw_cpu_generic_to_op((pcp), (val), =)
-# endif
-# define raw_cpu_write(pcp, val)       __pcpu_size_call(raw_cpu_write_, (pcp), (val))
-#endif
-
-#ifndef raw_cpu_add
-# ifndef raw_cpu_add_1
-#  define raw_cpu_add_1(pcp, val)      raw_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef raw_cpu_add_2
-#  define raw_cpu_add_2(pcp, val)      raw_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef raw_cpu_add_4
-#  define raw_cpu_add_4(pcp, val)      raw_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# ifndef raw_cpu_add_8
-#  define raw_cpu_add_8(pcp, val)      raw_cpu_generic_to_op((pcp), (val), +=)
-# endif
-# define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val))
-#endif
-
-#ifndef raw_cpu_sub
-# define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val))
-#endif
-
-#ifndef raw_cpu_inc
-# define raw_cpu_inc(pcp)              raw_cpu_add((pcp), 1)
-#endif
-
-#ifndef raw_cpu_dec
-# define raw_cpu_dec(pcp)              raw_cpu_sub((pcp), 1)
-#endif
-
-#ifndef raw_cpu_and
-# ifndef raw_cpu_and_1
-#  define raw_cpu_and_1(pcp, val)      raw_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef raw_cpu_and_2
-#  define raw_cpu_and_2(pcp, val)      raw_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef raw_cpu_and_4
-#  define raw_cpu_and_4(pcp, val)      raw_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# ifndef raw_cpu_and_8
-#  define raw_cpu_and_8(pcp, val)      raw_cpu_generic_to_op((pcp), (val), &=)
-# endif
-# define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val))
-#endif
-
-#ifndef raw_cpu_or
-# ifndef raw_cpu_or_1
-#  define raw_cpu_or_1(pcp, val)       raw_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef raw_cpu_or_2
-#  define raw_cpu_or_2(pcp, val)       raw_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef raw_cpu_or_4
-#  define raw_cpu_or_4(pcp, val)       raw_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# ifndef raw_cpu_or_8
-#  define raw_cpu_or_8(pcp, val)       raw_cpu_generic_to_op((pcp), (val), |=)
-# endif
-# define raw_cpu_or(pcp, val)  __pcpu_size_call(raw_cpu_or_, (pcp), (val))
-#endif
-
-#define raw_cpu_generic_add_return(pcp, val)                           \
-({                                                                     \
-       raw_cpu_add(pcp, val);                                          \
-       raw_cpu_read(pcp);                                              \
-})
-
-#ifndef raw_cpu_add_return
-# ifndef raw_cpu_add_return_1
-#  define raw_cpu_add_return_1(pcp, val)       raw_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef raw_cpu_add_return_2
-#  define raw_cpu_add_return_2(pcp, val)       raw_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef raw_cpu_add_return_4
-#  define raw_cpu_add_return_4(pcp, val)       raw_cpu_generic_add_return(pcp, val)
-# endif
-# ifndef raw_cpu_add_return_8
-#  define raw_cpu_add_return_8(pcp, val)       raw_cpu_generic_add_return(pcp, val)
-# endif
-# define raw_cpu_add_return(pcp, val)  \
-       __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)
-#endif
-
-#define raw_cpu_sub_return(pcp, val)   raw_cpu_add_return(pcp, -(typeof(pcp))(val))
-#define raw_cpu_inc_return(pcp)        raw_cpu_add_return(pcp, 1)
-#define raw_cpu_dec_return(pcp)        raw_cpu_add_return(pcp, -1)
-
-#define raw_cpu_generic_xchg(pcp, nval)                                        \
-({     typeof(pcp) ret__;                                              \
-       ret__ = raw_cpu_read(pcp);                                      \
-       raw_cpu_write(pcp, nval);                                       \
-       ret__;                                                          \
-})
-
-#ifndef raw_cpu_xchg
-# ifndef raw_cpu_xchg_1
-#  define raw_cpu_xchg_1(pcp, nval)    raw_cpu_generic_xchg(pcp, nval)
-# endif
-# ifndef raw_cpu_xchg_2
-#  define raw_cpu_xchg_2(pcp, nval)    raw_cpu_generic_xchg(pcp, nval)
-# endif
-# ifndef raw_cpu_xchg_4
-#  define raw_cpu_xchg_4(pcp, nval)    raw_cpu_generic_xchg(pcp, nval)
-# endif
-# ifndef raw_cpu_xchg_8
-#  define raw_cpu_xchg_8(pcp, nval)    raw_cpu_generic_xchg(pcp, nval)
-# endif
-# define raw_cpu_xchg(pcp, nval)       \
-       __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)
-#endif
-
-#define raw_cpu_generic_cmpxchg(pcp, oval, nval)                       \
-({                                                                     \
-       typeof(pcp) ret__;                                              \
-       ret__ = raw_cpu_read(pcp);                                      \
-       if (ret__ == (oval))                                            \
-               raw_cpu_write(pcp, nval);                               \
-       ret__;                                                          \
-})
-
-#ifndef raw_cpu_cmpxchg
-# ifndef raw_cpu_cmpxchg_1
-#  define raw_cpu_cmpxchg_1(pcp, oval, nval)   raw_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef raw_cpu_cmpxchg_2
-#  define raw_cpu_cmpxchg_2(pcp, oval, nval)   raw_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef raw_cpu_cmpxchg_4
-#  define raw_cpu_cmpxchg_4(pcp, oval, nval)   raw_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# ifndef raw_cpu_cmpxchg_8
-#  define raw_cpu_cmpxchg_8(pcp, oval, nval)   raw_cpu_generic_cmpxchg(pcp, oval, nval)
-# endif
-# define raw_cpu_cmpxchg(pcp, oval, nval)      \
-       __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)
-#endif
-
-#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
-({                                                                     \
-       int __ret = 0;                                                  \
-       if (raw_cpu_read(pcp1) == (oval1) &&                            \
-                        raw_cpu_read(pcp2)  == (oval2)) {              \
-               raw_cpu_write(pcp1, (nval1));                           \
-               raw_cpu_write(pcp2, (nval2));                           \
-               __ret = 1;                                              \
-       }                                                               \
-       (__ret);                                                        \
-})
-
-#ifndef raw_cpu_cmpxchg_double
-# ifndef raw_cpu_cmpxchg_double_1
-#  define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
-       raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef raw_cpu_cmpxchg_double_2
-#  define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
-       raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef raw_cpu_cmpxchg_double_4
-#  define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
-       raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# ifndef raw_cpu_cmpxchg_double_8
-#  define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
-       raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
-# endif
-# define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)        \
-       __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
-#endif
-
-/*
- * Generic percpu operations for context that are safe from preemption/interrupts.
- */
-#ifndef __this_cpu_read
-# define __this_cpu_read(pcp) \
-       (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp)))
-#endif
-
-#ifndef __this_cpu_write
-# define __this_cpu_write(pcp, val)                                    \
-do { __this_cpu_preempt_check("write");                                        \
-     __pcpu_size_call(raw_cpu_write_, (pcp), (val));                   \
-} while (0)
-#endif
-
-#ifndef __this_cpu_add
-# define __this_cpu_add(pcp, val)                                       \
-do { __this_cpu_preempt_check("add");                                  \
-       __pcpu_size_call(raw_cpu_add_, (pcp), (val));                   \
-} while (0)
-#endif
-
-#ifndef __this_cpu_sub
-# define __this_cpu_sub(pcp, val)      __this_cpu_add((pcp), -(typeof(pcp))(val))
-#endif
-
-#ifndef __this_cpu_inc
-# define __this_cpu_inc(pcp)           __this_cpu_add((pcp), 1)
-#endif
-
-#ifndef __this_cpu_dec
-# define __this_cpu_dec(pcp)           __this_cpu_sub((pcp), 1)
-#endif
-
-#ifndef __this_cpu_and
-# define __this_cpu_and(pcp, val)                                      \
-do { __this_cpu_preempt_check("and");                                  \
-       __pcpu_size_call(raw_cpu_and_, (pcp), (val));                   \
-} while (0)
-
-#endif
-
-#ifndef __this_cpu_or
-# define __this_cpu_or(pcp, val)                                       \
-do { __this_cpu_preempt_check("or");                                   \
-       __pcpu_size_call(raw_cpu_or_, (pcp), (val));                    \
-} while (0)
-#endif
-
-#ifndef __this_cpu_add_return
-# define __this_cpu_add_return(pcp, val)       \
-       (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val))
-#endif
-
-#define __this_cpu_sub_return(pcp, val)        __this_cpu_add_return(pcp, -(typeof(pcp))(val))
-#define __this_cpu_inc_return(pcp)     __this_cpu_add_return(pcp, 1)
-#define __this_cpu_dec_return(pcp)     __this_cpu_add_return(pcp, -1)
-
-#ifndef __this_cpu_xchg
-# define __this_cpu_xchg(pcp, nval)    \
-       (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval))
-#endif
-
-#ifndef __this_cpu_cmpxchg
-# define __this_cpu_cmpxchg(pcp, oval, nval)   \
-       (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval))
-#endif
-
-#ifndef __this_cpu_cmpxchg_double
-# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)     \
-       (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)))
-#endif
-
 #endif /* __LINUX_PERCPU_H */
index 864ddaf..6804144 100644 (file)
@@ -536,6 +536,15 @@ struct phy_driver {
        /* See set_wol, but for checking whether Wake on LAN is enabled. */
        void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
 
+       /*
+        * Called to inform a PHY device driver when the core is about to
+        * change the link state. This callback is supposed to be used as
+        * fixup hook for drivers that need to take action when the link
+        * state changes. Drivers are by no means allowed to mess with the
+        * PHY device structure in their implementations.
+        */
+       void (*link_change_notify)(struct phy_device *dev);
+
        struct device_driver driver;
 };
 #define to_phy_driver(d) container_of(d, struct phy_driver, driver)
index c2049e3..748e716 100644 (file)
@@ -29,7 +29,6 @@ extern void s3c_ide_set_platdata(struct s3c_ide_platdata *pdata);
 
 /* architecture-specific IDE configuration */
 extern void s3c64xx_ide_setup_gpio(void);
-extern void s5pc100_ide_setup_gpio(void);
 extern void s5pv210_ide_setup_gpio(void);
 
 #endif /*__ATA_SAMSUNG_CF_H */
index aaad386..b537a25 100644 (file)
@@ -44,6 +44,7 @@ extern int prof_on __read_mostly;
 int profile_init(void);
 int profile_setup(char *str);
 void profile_tick(int type);
+int setup_profiling_timer(unsigned int multiplier);
 
 /*
  * Add multiple profiler hits to a given address:
index 077904c..cc79eff 100644 (file)
@@ -334,6 +334,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
  * calling arch_ptrace_stop() when it would be superfluous.  For example,
  * if the thread has not been back to user mode since the last stop, the
  * thread state might indicate that nothing needs to be done.
+ *
+ * This is guaranteed to be invoked once before a task stops for ptrace and
+ * may include arch-specific operations necessary prior to a ptrace stop.
  */
 #define arch_ptrace_stop_needed(code, info)    (0)
 #endif
index 5a75d19..d231aa1 100644 (file)
@@ -44,7 +44,6 @@
 #include <linux/debugobjects.h>
 #include <linux/bug.h>
 #include <linux/compiler.h>
-#include <linux/percpu.h>
 #include <asm/barrier.h>
 
 extern int rcu_expedited; /* for sysctl */
@@ -299,41 +298,6 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
 bool __rcu_is_watching(void);
 #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
 
-/*
- * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
- */
-
-#define RCU_COND_RESCHED_LIM 256       /* ms vs. 100s of ms. */
-DECLARE_PER_CPU(int, rcu_cond_resched_count);
-void rcu_resched(void);
-
-/*
- * Is it time to report RCU quiescent states?
- *
- * Note unsynchronized access to rcu_cond_resched_count.  Yes, we might
- * increment some random CPU's count, and possibly also load the result from
- * yet another CPU's count.  We might even clobber some other CPU's attempt
- * to zero its counter.  This is all OK because the goal is not precision,
- * but rather reasonable amortization of rcu_note_context_switch() overhead
- * and extremely high probability of avoiding RCU CPU stall warnings.
- * Note that this function has to be preempted in just the wrong place,
- * many thousands of times in a row, for anything bad to happen.
- */
-static inline bool rcu_should_resched(void)
-{
-       return raw_cpu_inc_return(rcu_cond_resched_count) >=
-              RCU_COND_RESCHED_LIM;
-}
-
-/*
- * Report quiscent states to RCU if it is time to do so.
- */
-static inline void rcu_cond_resched(void)
-{
-       if (unlikely(rcu_should_resched()))
-               rcu_resched();
-}
-
 /*
  * Infrastructure to implement the synchronize_() primitives in
  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf);
  * initialization.
  */
 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+void init_rcu_head(struct rcu_head *head);
+void destroy_rcu_head(struct rcu_head *head);
 void init_rcu_head_on_stack(struct rcu_head *head);
 void destroy_rcu_head_on_stack(struct rcu_head *head);
 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+static inline void init_rcu_head(struct rcu_head *head)
+{
+}
+
+static inline void destroy_rcu_head(struct rcu_head *head)
+{
+}
+
 static inline void init_rcu_head_on_stack(struct rcu_head *head)
 {
 }
@@ -852,15 +826,14 @@ static inline void rcu_preempt_sleep_check(void)
  * read-side critical section that would block in a !PREEMPT kernel.
  * But if you want the full story, read on!
  *
- * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it
- * is illegal to block while in an RCU read-side critical section.  In
- * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU)
- * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may
- * be preempted, but explicit blocking is illegal.  Finally, in preemptible
- * RCU implementations in real-time (with -rt patchset) kernel builds,
- * RCU read-side critical sections may be preempted and they may also
- * block, but only when acquiring spinlocks that are subject to priority
- * inheritance.
+ * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
+ * it is illegal to block while in an RCU read-side critical section.
+ * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT
+ * kernel builds, RCU read-side critical sections may be preempted,
+ * but explicit blocking is illegal.  Finally, in preemptible RCU
+ * implementations in real-time (with -rt patchset) kernel builds, RCU
+ * read-side critical sections may be preempted and they may also block, but
+ * only when acquiring spinlocks that are subject to priority inheritance.
  */
 static inline void rcu_read_lock(void)
 {
@@ -884,6 +857,34 @@ static inline void rcu_read_lock(void)
 /**
  * rcu_read_unlock() - marks the end of an RCU read-side critical section.
  *
+ * In most situations, rcu_read_unlock() is immune from deadlock.
+ * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock()
+ * is responsible for deboosting, which it does via rt_mutex_unlock().
+ * Unfortunately, this function acquires the scheduler's runqueue and
+ * priority-inheritance spinlocks.  This means that deadlock could result
+ * if the caller of rcu_read_unlock() already holds one of these locks or
+ * any lock that is ever acquired while holding them.
+ *
+ * That said, RCU readers are never priority boosted unless they were
+ * preempted.  Therefore, one way to avoid deadlock is to make sure
+ * that preemption never happens within any RCU read-side critical
+ * section whose outermost rcu_read_unlock() is called with one of
+ * rt_mutex_unlock()'s locks held.  Such preemption can be avoided in
+ * a number of ways, for example, by invoking preempt_disable() before
+ * critical section's outermost rcu_read_lock().
+ *
+ * Given that the set of locks acquired by rt_mutex_unlock() might change
+ * at any time, a somewhat more future-proofed approach is to make sure
+ * that that preemption never happens within any RCU read-side critical
+ * section whose outermost rcu_read_unlock() is called with irqs disabled.
+ * This approach relies on the fact that rt_mutex_unlock() currently only
+ * acquires irq-disabled locks.
+ *
+ * The second of these two approaches is best in most situations,
+ * however, the first approach can also be useful, at least to those
+ * developers willing to keep abreast of the set of locks acquired by
+ * rt_mutex_unlock().
+ *
  * See rcu_read_lock() for more information.
  */
 static inline void rcu_read_unlock(void)
index a2d9d81..14ec18d 100644 (file)
@@ -395,6 +395,11 @@ static inline void regulator_bulk_free(int num_consumers,
 {
 }
 
+static inline int regulator_can_change_voltage(struct regulator *regulator)
+{
+       return 0;
+}
+
 static inline int regulator_set_voltage(struct regulator *regulator,
                                        int min_uV, int max_uV)
 {
index 3aed8d7..1abba5c 100644 (file)
@@ -90,11 +90,9 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name);
 extern void rt_mutex_destroy(struct rt_mutex *lock);
 
 extern void rt_mutex_lock(struct rt_mutex *lock);
-extern int rt_mutex_lock_interruptible(struct rt_mutex *lock,
-                                               int detect_deadlock);
+extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
 extern int rt_mutex_timed_lock(struct rt_mutex *lock,
-                                       struct hrtimer_sleeper *timeout,
-                                       int detect_deadlock);
+                              struct hrtimer_sleeper *timeout);
 
 extern int rt_mutex_trylock(struct rt_mutex *lock);
 
index d5b13bc..561e861 100644 (file)
 #ifdef __KERNEL__
 /*
  * the rw-semaphore definition
- * - if activity is 0 then there are no active readers or writers
- * - if activity is +ve then that is the number of active readers
- * - if activity is -1 then there is one active writer
+ * - if count is 0 then there are no active readers or writers
+ * - if count is +ve then that is the number of active readers
+ * - if count is -1 then there is one active writer
  * - if wait_list is not empty, then there are processes waiting for the semaphore
  */
 struct rw_semaphore {
-       __s32                   activity;
+       __s32                   count;
        raw_spinlock_t          wait_lock;
        struct list_head        wait_list;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
index 8d79708..035d3c5 100644 (file)
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
-
 #include <linux/atomic.h>
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+#include <linux/osq_lock.h>
+#endif
 
-struct optimistic_spin_queue;
 struct rw_semaphore;
 
 #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -25,15 +26,15 @@ struct rw_semaphore;
 /* All arch specific implementations share the same struct */
 struct rw_semaphore {
        long count;
-       raw_spinlock_t wait_lock;
        struct list_head wait_list;
-#ifdef CONFIG_SMP
+       raw_spinlock_t wait_lock;
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+       struct optimistic_spin_queue osq; /* spinner MCS lock */
        /*
         * Write owner. Used as a speculative check to see
         * if the owner is running on the cpu.
         */
        struct task_struct *owner;
-       struct optimistic_spin_queue *osq; /* spinner MCS lock */
 #endif
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map      dep_map;
@@ -64,22 +65,19 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
 # define __RWSEM_DEP_MAP_INIT(lockname)
 #endif
 
-#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
-#define __RWSEM_INITIALIZER(name)                      \
-       { RWSEM_UNLOCKED_VALUE,                         \
-         __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),     \
-         LIST_HEAD_INIT((name).wait_list),             \
-         NULL, /* owner */                             \
-         NULL /* mcs lock */                           \
-         __RWSEM_DEP_MAP_INIT(name) }
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
 #else
-#define __RWSEM_INITIALIZER(name)                      \
-       { RWSEM_UNLOCKED_VALUE,                         \
-         __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),     \
-         LIST_HEAD_INIT((name).wait_list)              \
-         __RWSEM_DEP_MAP_INIT(name) }
+#define __RWSEM_OPT_INIT(lockname)
 #endif
 
+#define __RWSEM_INITIALIZER(name)                              \
+       { .count = RWSEM_UNLOCKED_VALUE,                        \
+         .wait_list = LIST_HEAD_INIT((name).wait_list),        \
+         .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
+         __RWSEM_OPT_INIT(name)                                \
+         __RWSEM_DEP_MAP_INIT(name) }
+
 #define DECLARE_RWSEM(name) \
        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
 
index 306f4f0..42cac4d 100644 (file)
@@ -872,21 +872,21 @@ enum cpu_idle_type {
 #define SD_NUMA                        0x4000  /* cross-node balancing */
 
 #ifdef CONFIG_SCHED_SMT
-static inline const int cpu_smt_flags(void)
+static inline int cpu_smt_flags(void)
 {
        return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
 }
 #endif
 
 #ifdef CONFIG_SCHED_MC
-static inline const int cpu_core_flags(void)
+static inline int cpu_core_flags(void)
 {
        return SD_SHARE_PKG_RESOURCES;
 }
 #endif
 
 #ifdef CONFIG_NUMA
-static inline const int cpu_numa_flags(void)
+static inline int cpu_numa_flags(void)
 {
        return SD_NUMA;
 }
@@ -999,7 +999,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
 bool cpus_share_cache(int this_cpu, int that_cpu);
 
 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
-typedef const int (*sched_domain_flags_f)(void);
+typedef int (*sched_domain_flags_f)(void);
 
 #define SDTL_OVERLAP   0x01
 
@@ -1270,9 +1270,6 @@ struct task_struct {
 #ifdef CONFIG_TREE_PREEMPT_RCU
        struct rcu_node *rcu_blocked_node;
 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-#ifdef CONFIG_RCU_BOOST
-       struct rt_mutex *rcu_boost_mutex;
-#endif /* #ifdef CONFIG_RCU_BOOST */
 
 #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
        struct sched_info sched_info;
@@ -1440,8 +1437,6 @@ struct task_struct {
        struct rb_node *pi_waiters_leftmost;
        /* Deadlock detection and priority inheritance handling */
        struct rt_mutex_waiter *pi_blocked_on;
-       /* Top pi_waiters task */
-       struct task_struct *pi_top_task;
 #endif
 
 #ifdef CONFIG_DEBUG_MUTEXES
@@ -2009,9 +2004,6 @@ static inline void rcu_copy_process(struct task_struct *p)
 #ifdef CONFIG_TREE_PREEMPT_RCU
        p->rcu_blocked_node = NULL;
 #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */
-#ifdef CONFIG_RCU_BOOST
-       p->rcu_boost_mutex = NULL;
-#endif /* #ifdef CONFIG_RCU_BOOST */
        INIT_LIST_HEAD(&p->rcu_node_entry);
 }
 
@@ -2788,7 +2780,7 @@ static inline bool __must_check current_set_polling_and_test(void)
 
        /*
         * Polling state must be visible before we test NEED_RESCHED,
-        * paired by resched_task()
+        * paired by resched_curr()
         */
        smp_mb__after_atomic();
 
@@ -2806,7 +2798,7 @@ static inline bool __must_check current_clr_polling_and_test(void)
 
        /*
         * Polling state must be visible before we test NEED_RESCHED,
-        * paired by resched_task()
+        * paired by resched_curr()
         */
        smp_mb__after_atomic();
 
@@ -2838,7 +2830,7 @@ static inline void current_clr_polling(void)
         * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
         * fold.
         */
-       smp_mb(); /* paired with resched_task() */
+       smp_mb(); /* paired with resched_curr() */
 
        preempt_fold_need_resched();
 }
index 535f158..8cf3503 100644 (file)
@@ -164,8 +164,6 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s)
 static inline unsigned raw_seqcount_begin(const seqcount_t *s)
 {
        unsigned ret = ACCESS_ONCE(s->sequence);
-
-       seqcount_lockdep_reader_access(s);
        smp_rmb();
        return ret & ~1;
 }
index 8e98297..ec538fc 100644 (file)
@@ -305,8 +305,6 @@ struct ucred {
 /* IPX options */
 #define IPX_TYPE       1
 
-extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
-                              int offset, int len);
 extern int csum_partial_copy_fromiovecend(unsigned char *kdata, 
                                          struct iovec *iov, 
                                          int offset, 
@@ -315,8 +313,6 @@ extern unsigned long iov_pages(const struct iovec *iov, int offset,
                               unsigned long nr_segs);
 
 extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
-extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
-                            int offset, int len);
 extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
 extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
 
index ad7dbe2..1a89599 100644 (file)
@@ -236,7 +236,7 @@ void *              rpc_malloc(struct rpc_task *, size_t);
 void           rpc_free(void *);
 int            rpciod_up(void);
 void           rpciod_down(void);
-int            __rpc_wait_for_completion_task(struct rpc_task *task, int (*)(void *));
+int            __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
 #ifdef RPC_DEBUG
 struct net;
 void           rpc_show_tasks(struct net *);
index f76994b..519064e 100644 (file)
@@ -327,6 +327,7 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
 extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
 extern int hibernate(void);
 extern bool system_entering_hibernation(void);
+extern bool hibernation_available(void);
 asmlinkage int swsusp_save(void);
 extern struct pbe *restore_pblist;
 #else /* CONFIG_HIBERNATION */
@@ -339,6 +340,7 @@ static inline void swsusp_unset_page_free(struct page *p) {}
 static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
 static inline int hibernate(void) { return -ENOSYS; }
 static inline bool system_entering_hibernation(void) { return false; }
+static inline bool hibernation_available(void) { return false; }
 #endif /* CONFIG_HIBERNATION */
 
 /* Hibernation and suspend events */
index b84773c..0590523 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/hrtimer.h>
 #include <linux/context_tracking_state.h>
 #include <linux/cpumask.h>
+#include <linux/sched.h>
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 
@@ -162,6 +163,7 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
 #ifdef CONFIG_NO_HZ_FULL
 extern bool tick_nohz_full_running;
 extern cpumask_var_t tick_nohz_full_mask;
+extern cpumask_var_t housekeeping_mask;
 
 static inline bool tick_nohz_full_enabled(void)
 {
@@ -181,7 +183,13 @@ static inline bool tick_nohz_full_cpu(int cpu)
 
 extern void tick_nohz_init(void);
 extern void __tick_nohz_full_check(void);
-extern void tick_nohz_full_kick(void);
+extern void tick_nohz_full_kick_cpu(int cpu);
+
+static inline void tick_nohz_full_kick(void)
+{
+       tick_nohz_full_kick_cpu(smp_processor_id());
+}
+
 extern void tick_nohz_full_kick_all(void);
 extern void __tick_nohz_task_switch(struct task_struct *tsk);
 #else
@@ -189,11 +197,30 @@ static inline void tick_nohz_init(void) { }
 static inline bool tick_nohz_full_enabled(void) { return false; }
 static inline bool tick_nohz_full_cpu(int cpu) { return false; }
 static inline void __tick_nohz_full_check(void) { }
+static inline void tick_nohz_full_kick_cpu(int cpu) { }
 static inline void tick_nohz_full_kick(void) { }
 static inline void tick_nohz_full_kick_all(void) { }
 static inline void __tick_nohz_task_switch(struct task_struct *tsk) { }
 #endif
 
+static inline bool is_housekeeping_cpu(int cpu)
+{
+#ifdef CONFIG_NO_HZ_FULL
+       if (tick_nohz_full_enabled())
+               return cpumask_test_cpu(cpu, housekeeping_mask);
+#endif
+       return true;
+}
+
+static inline void housekeeping_affine(struct task_struct *t)
+{
+#ifdef CONFIG_NO_HZ_FULL
+       if (tick_nohz_full_enabled())
+               set_cpus_allowed_ptr(t, housekeeping_mask);
+
+#endif
+}
+
 static inline void tick_nohz_full_check(void)
 {
        if (tick_nohz_full_enabled())
index 1361169..ea6c9de 100644 (file)
@@ -25,6 +25,21 @@ trace_seq_init(struct trace_seq *s)
        s->full = 0;
 }
 
+/**
+ * trace_seq_buffer_ptr - return pointer to next location in buffer
+ * @s: trace sequence descriptor
+ *
+ * Returns the pointer to the buffer where the next write to
+ * the buffer will happen. This is useful to save the location
+ * that is about to be written to and then return the result
+ * of that write.
+ */
+static inline unsigned char *
+trace_seq_buffer_ptr(struct trace_seq *s)
+{
+       return s->buffer + s->len;
+}
+
 /*
  * Currently only defined when tracing is enabled.
  */
@@ -36,14 +51,13 @@ int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args);
 extern int
 trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
 extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
-extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
-                                size_t cnt);
+extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
+                            int cnt);
 extern int trace_seq_puts(struct trace_seq *s, const char *str);
 extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
-extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len);
+extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len);
 extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
-                               size_t len);
-extern void *trace_seq_reserve(struct trace_seq *s, size_t len);
+                               unsigned int len);
 extern int trace_seq_path(struct trace_seq *s, const struct path *path);
 
 extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
@@ -71,8 +85,8 @@ static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
 {
        return 0;
 }
-static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
-                                size_t cnt)
+static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
+                                   int cnt)
 {
        return 0;
 }
@@ -85,19 +99,15 @@ static inline int trace_seq_putc(struct trace_seq *s, unsigned char c)
        return 0;
 }
 static inline int
-trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
+trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len)
 {
        return 0;
 }
 static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
-                                      size_t len)
+                                      unsigned int len)
 {
        return 0;
 }
-static inline void *trace_seq_reserve(struct trace_seq *s, size_t len)
-{
-       return NULL;
-}
 static inline int trace_seq_path(struct trace_seq *s, const struct path *path)
 {
        return 0;
index e2231e4..09a7cff 100644 (file)
@@ -94,8 +94,20 @@ static inline size_t iov_iter_count(struct iov_iter *i)
        return i->count;
 }
 
-static inline void iov_iter_truncate(struct iov_iter *i, size_t count)
+/*
+ * Cap the iov_iter by given limit; note that the second argument is
+ * *not* the new size - it's upper limit for such.  Passing it a value
+ * greater than the amount of data in iov_iter is fine - it'll just do
+ * nothing in that case.
+ */
+static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
 {
+       /*
+        * count doesn't have to fit in size_t - comparison extends both
+        * operands to u64 here and any value that would be truncated by
+        * conversion in assignement is by definition greater than all
+        * values of size_t, including old i->count.
+        */
        if (i->count > count)
                i->count = count;
 }
@@ -111,6 +123,9 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
 
 int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
 int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
-
+int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
+                       int offset, int len);
+int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
+                     int offset, int len);
 
 #endif
index 1a64b26..9b7de1b 100644 (file)
@@ -70,7 +70,9 @@
        US_FLAG(NEEDS_CAP16,    0x00400000)                     \
                /* cannot handle READ_CAPACITY_10 */            \
        US_FLAG(IGNORE_UAS,     0x00800000)                     \
-               /* Device advertises UAS but it is broken */
+               /* Device advertises UAS but it is broken */    \
+       US_FLAG(BROKEN_FUA,     0x01000000)                     \
+               /* Cannot handle FUA in WRITE or READ CDBs */   \
 
 #define US_FLAG(name, value)   US_FL_##name = value ,
 enum { US_DO_ALL_FLAGS };
index bd68819..6fb1ba5 100644 (file)
@@ -25,6 +25,7 @@ struct wait_bit_key {
        void                    *flags;
        int                     bit_nr;
 #define WAIT_ATOMIC_T_BIT_NR   -1
+       unsigned long           private;
 };
 
 struct wait_bit_queue {
@@ -141,18 +142,19 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old)
        list_del(&old->task_list);
 }
 
+typedef int wait_bit_action_f(struct wait_bit_key *);
 void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
 void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
 void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
 void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr);
 void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr);
 void __wake_up_bit(wait_queue_head_t *, void *, int);
-int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
-int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned);
+int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
+int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned);
 void wake_up_bit(void *, int);
 void wake_up_atomic_t(atomic_t *);
-int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned);
-int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned);
+int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned);
+int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned);
 int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned);
 wait_queue_head_t *bit_waitqueue(void *, int);
 
@@ -854,11 +856,14 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
                (wait)->flags = 0;                                      \
        } while (0)
 
+
+extern int bit_wait(struct wait_bit_key *);
+extern int bit_wait_io(struct wait_bit_key *);
+
 /**
  * wait_on_bit - wait for a bit to be cleared
  * @word: the word being waited on, a kernel virtual address
  * @bit: the bit of the word being waited on
- * @action: the function used to sleep, which may take special actions
  * @mode: the task state to sleep in
  *
  * There is a standard hashed waitqueue table for generic use. This
@@ -867,9 +872,62 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
  * call wait_on_bit() in threads waiting for the bit to clear.
  * One uses wait_on_bit() where one is waiting for the bit to clear,
  * but has no intention of setting it.
+ * Returned value will be zero if the bit was cleared, or non-zero
+ * if the process received a signal and the mode permitted wakeup
+ * on that signal.
+ */
+static inline int
+wait_on_bit(void *word, int bit, unsigned mode)
+{
+       if (!test_bit(bit, word))
+               return 0;
+       return out_of_line_wait_on_bit(word, bit,
+                                      bit_wait,
+                                      mode);
+}
+
+/**
+ * wait_on_bit_io - wait for a bit to be cleared
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared.  This is similar to wait_on_bit(), but calls
+ * io_schedule() instead of schedule() for the actual waiting.
+ *
+ * Returned value will be zero if the bit was cleared, or non-zero
+ * if the process received a signal and the mode permitted wakeup
+ * on that signal.
+ */
+static inline int
+wait_on_bit_io(void *word, int bit, unsigned mode)
+{
+       if (!test_bit(bit, word))
+               return 0;
+       return out_of_line_wait_on_bit(word, bit,
+                                      bit_wait_io,
+                                      mode);
+}
+
+/**
+ * wait_on_bit_action - wait for a bit to be cleared
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared, and allow the waiting action to be specified.
+ * This is like wait_on_bit() but allows fine control of how the waiting
+ * is done.
+ *
+ * Returned value will be zero if the bit was cleared, or non-zero
+ * if the process received a signal and the mode permitted wakeup
+ * on that signal.
  */
 static inline int
-wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
+wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
 {
        if (!test_bit(bit, word))
                return 0;
@@ -880,7 +938,6 @@ wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
  * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
  * @word: the word being waited on, a kernel virtual address
  * @bit: the bit of the word being waited on
- * @action: the function used to sleep, which may take special actions
  * @mode: the task state to sleep in
  *
  * There is a standard hashed waitqueue table for generic use. This
@@ -891,9 +948,61 @@ wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode)
  * wait_on_bit() in threads waiting to be able to set the bit.
  * One uses wait_on_bit_lock() where one is waiting for the bit to
  * clear with the intention of setting it, and when done, clearing it.
+ *
+ * Returns zero if the bit was (eventually) found to be clear and was
+ * set.  Returns non-zero if a signal was delivered to the process and
+ * the @mode allows that signal to wake the process.
+ */
+static inline int
+wait_on_bit_lock(void *word, int bit, unsigned mode)
+{
+       if (!test_and_set_bit(bit, word))
+               return 0;
+       return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
+}
+
+/**
+ * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @mode: the task state to sleep in
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared and then to atomically set it.  This is similar
+ * to wait_on_bit(), but calls io_schedule() instead of schedule()
+ * for the actual waiting.
+ *
+ * Returns zero if the bit was (eventually) found to be clear and was
+ * set.  Returns non-zero if a signal was delivered to the process and
+ * the @mode allows that signal to wake the process.
+ */
+static inline int
+wait_on_bit_lock_io(void *word, int bit, unsigned mode)
+{
+       if (!test_and_set_bit(bit, word))
+               return 0;
+       return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
+}
+
+/**
+ * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
+ * @word: the word being waited on, a kernel virtual address
+ * @bit: the bit of the word being waited on
+ * @action: the function used to sleep, which may take special actions
+ * @mode: the task state to sleep in
+ *
+ * Use the standard hashed waitqueue table to wait for a bit
+ * to be cleared and then to set it, and allow the waiting action
+ * to be specified.
+ * This is like wait_on_bit() but allows fine control of how the waiting
+ * is done.
+ *
+ * Returns zero if the bit was (eventually) found to be clear and was
+ * set.  Returns non-zero if a signal was delivered to the process and
+ * the @mode allows that signal to wake the process.
  */
 static inline int
-wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode)
+wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
 {
        if (!test_and_set_bit(bit, word))
                return 0;
index 5777c13..a219be9 100644 (file)
@@ -90,7 +90,6 @@ struct writeback_control {
  * fs/fs-writeback.c
  */    
 struct bdi_writeback;
-int inode_wait(void *);
 void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
 void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
                                                        enum wb_reason reason);
@@ -105,7 +104,7 @@ void inode_wait_for_writeback(struct inode *inode);
 static inline void wait_on_inode(struct inode *inode)
 {
        might_sleep();
-       wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
+       wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
 }
 
 /*
index 0e795df..7596eb2 100644 (file)
@@ -309,16 +309,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
        }
 }
 
-#define IP_IDENTS_SZ 2048u
-extern atomic_t *ip_idents;
-
-static inline u32 ip_idents_reserve(u32 hash, int segs)
-{
-       atomic_t *id_ptr = ip_idents + hash % IP_IDENTS_SZ;
-
-       return atomic_add_return(segs, id_ptr) - segs;
-}
-
+u32 ip_idents_reserve(u32 hash, int segs);
 void __ip_select_ident(struct iphdr *iph, int segs);
 
 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
index 7277caf..47f4254 100644 (file)
@@ -203,7 +203,6 @@ struct neigh_table {
        void                    (*proxy_redo)(struct sk_buff *skb);
        char                    *id;
        struct neigh_parms      parms;
-       /* HACK. gc_* should follow parms without a gap! */
        int                     gc_interval;
        int                     gc_thresh1;
        int                     gc_thresh2;
index 7ee6ce6..c4d8619 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/nf_tables.h>
+#include <linux/u64_stats_sync.h>
 #include <net/netlink.h>
 
 #define NFT_JUMP_STACK_SIZE    16
@@ -503,9 +504,9 @@ enum nft_chain_flags {
  *     @net: net namespace that this chain belongs to
  *     @table: table that this chain belongs to
  *     @handle: chain handle
- *     @flags: bitmask of enum nft_chain_flags
  *     @use: number of jump references to this chain
  *     @level: length of longest path to this chain
+ *     @flags: bitmask of enum nft_chain_flags
  *     @name: name of the chain
  */
 struct nft_chain {
@@ -514,9 +515,9 @@ struct nft_chain {
        struct net                      *net;
        struct nft_table                *table;
        u64                             handle;
-       u8                              flags;
-       u16                             use;
+       u32                             use;
        u16                             level;
+       u8                              flags;
        char                            name[NFT_CHAIN_MAXNAMELEN];
 };
 
@@ -528,8 +529,9 @@ enum nft_chain_type {
 };
 
 struct nft_stats {
-       u64 bytes;
-       u64 pkts;
+       u64                     bytes;
+       u64                     pkts;
+       struct u64_stats_sync   syncp;
 };
 
 #define NFT_HOOK_OPS_MAX               2
index 079030c..e207096 100644 (file)
@@ -16,7 +16,7 @@ struct netns_sysctl_lowpan {
 struct netns_ieee802154_lowpan {
        struct netns_sysctl_lowpan sysctl;
        struct netns_frags      frags;
-       u16                     max_dsize;
+       int                     max_dsize;
 };
 
 #endif
index 26a394c..eee608b 100644 (file)
@@ -13,8 +13,8 @@ struct netns_nftables {
        struct nft_af_info      *inet;
        struct nft_af_info      *arp;
        struct nft_af_info      *bridge;
+       unsigned int            base_seq;
        u8                      gencursor;
-       u8                      genctr;
 };
 
 #endif
index 07b7fcd..1563507 100644 (file)
@@ -1730,8 +1730,8 @@ sk_dst_get(struct sock *sk)
 
        rcu_read_lock();
        dst = rcu_dereference(sk->sk_dst_cache);
-       if (dst)
-               dst_hold(dst);
+       if (dst && !atomic_inc_not_zero(&dst->__refcnt))
+               dst = NULL;
        rcu_read_unlock();
        return dst;
 }
@@ -1768,9 +1768,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
 static inline void
 sk_dst_set(struct sock *sk, struct dst_entry *dst)
 {
-       spin_lock(&sk->sk_dst_lock);
-       __sk_dst_set(sk, dst);
-       spin_unlock(&sk->sk_dst_lock);
+       struct dst_entry *old_dst;
+
+       sk_tx_queue_clear(sk);
+       old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+       dst_release(old_dst);
 }
 
 static inline void
@@ -1782,9 +1784,7 @@ __sk_dst_reset(struct sock *sk)
 static inline void
 sk_dst_reset(struct sock *sk)
 {
-       spin_lock(&sk->sk_dst_lock);
-       __sk_dst_reset(sk);
-       spin_unlock(&sk->sk_dst_lock);
+       sk_dst_set(sk, NULL);
 }
 
 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
index 42ed789..e0ae710 100644 (file)
@@ -318,7 +318,7 @@ static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
 
 static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
 {
-       unsigned int xfer_len = blk_rq_bytes(scmd->request);
+       unsigned int xfer_len = scsi_out(scmd)->length;
        unsigned int prot_op = scsi_get_prot_op(scmd);
        unsigned int sector_size = scmd->device->sector_size;
 
index 5853c91..27ab310 100644 (file)
@@ -173,6 +173,7 @@ struct scsi_device {
        unsigned is_visible:1;  /* is the device visible in sysfs */
        unsigned wce_default_on:1;      /* Cache is ON by default */
        unsigned no_dif:1;      /* T10 PI (DIF) should be disabled */
+       unsigned broken_fua:1;          /* Don't set FUA bit */
 
        atomic_t disk_events_disable_depth; /* disable depth for disk events */
 
index eedda2c..1df3f2f 100644 (file)
@@ -116,6 +116,8 @@ struct snd_card {
        int user_ctl_count;             /* count of all user controls */
        struct list_head controls;      /* all controls for this card */
        struct list_head ctl_files;     /* active control files */
+       struct mutex user_ctl_lock;     /* protects user controls against
+                                          concurrent access */
 
        struct snd_info_entry *proc_root;       /* root for soundcard specific files */
        struct snd_info_entry *proc_id; /* the card id */
index 0fd06fe..26b4f2e 100644 (file)
 #undef __field_ext
 #define __field_ext(type, item, filter_type)   type    item;
 
+#undef __field_struct
+#define __field_struct(type, item)     type    item;
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)    type    item;
+
 #undef __array
 #define __array(type, item, len)       type    item[len];
 
 #undef __field_ext
 #define __field_ext(type, item, filter_type)
 
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)
+
 #undef __array
 #define __array(type, item, len)
 
@@ -315,9 +327,21 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = {     \
        if (ret)                                                        \
                return ret;
 
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)                    \
+       ret = trace_define_field(event_call, #type, #item,              \
+                                offsetof(typeof(field), item),         \
+                                sizeof(field.item),                    \
+                                0, filter_type);                       \
+       if (ret)                                                        \
+               return ret;
+
 #undef __field
 #define __field(type, item)    __field_ext(type, item, FILTER_OTHER)
 
+#undef __field_struct
+#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
+
 #undef __array
 #define __array(type, item, len)                                       \
        do {                                                            \
@@ -379,6 +403,12 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call)  \
 #undef __field_ext
 #define __field_ext(type, item, filter_type)
 
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)
+
 #undef __array
 #define __array(type, item, len)
 
@@ -550,6 +580,9 @@ static inline notrace int ftrace_get_offsets_##call(                        \
 #undef __field
 #define __field(type, item)
 
+#undef __field_struct
+#define __field_struct(type, item)
+
 #undef __array
 #define __array(type, item, len)
 
index fed853f..9674145 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/tracepoint.h>
 #include <linux/unistd.h>
 #include <linux/ftrace_event.h>
+#include <linux/thread_info.h>
 
 #include <asm/ptrace.h>
 
@@ -32,4 +33,18 @@ struct syscall_metadata {
        struct ftrace_event_call *exit_event;
 };
 
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
+static inline void syscall_tracepoint_update(struct task_struct *p)
+{
+       if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+               set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
+       else
+               clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
+}
+#else
+static inline void syscall_tracepoint_update(struct task_struct *p)
+{
+}
+#endif
+
 #endif /* _TRACE_SYSCALL_H */
index cf67147..3b9ff33 100644 (file)
@@ -342,6 +342,7 @@ enum {
 #define __AUDIT_ARCH_64BIT 0x80000000
 #define __AUDIT_ARCH_LE           0x40000000
 
+#define AUDIT_ARCH_AARCH64     (EM_AARCH64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ALPHA       (EM_ALPHA|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARM         (EM_ARM|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_ARMEB       (EM_ARM)
index 6f9c38c..2f47824 100644 (file)
@@ -38,6 +38,7 @@ struct btrfs_ioctl_vol_args {
 #define BTRFS_SUBVOL_QGROUP_INHERIT    (1ULL << 2)
 #define BTRFS_FSID_SIZE 16
 #define BTRFS_UUID_SIZE 16
+#define BTRFS_UUID_UNPARSED_SIZE       37
 
 #define BTRFS_QGROUP_INHERIT_SET_LIMITS        (1ULL << 0)
 
index 40b5ca8..25084a0 100644 (file)
  *  - add FATTR_CTIME
  *  - add ctime and ctimensec to fuse_setattr_in
  *  - add FUSE_RENAME2 request
+ *  - add FUSE_NO_OPEN_SUPPORT flag
  */
 
 #ifndef _LINUX_FUSE_H
@@ -229,6 +230,7 @@ struct fuse_file_lock {
  * FUSE_READDIRPLUS_AUTO: adaptive readdirplus
  * FUSE_ASYNC_DIO: asynchronous direct I/O submission
  * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
+ * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
  */
 #define FUSE_ASYNC_READ                (1 << 0)
 #define FUSE_POSIX_LOCKS       (1 << 1)
@@ -247,6 +249,7 @@ struct fuse_file_lock {
 #define FUSE_READDIRPLUS_AUTO  (1 << 14)
 #define FUSE_ASYNC_DIO         (1 << 15)
 #define FUSE_WRITEBACK_CACHE   (1 << 16)
+#define FUSE_NO_OPEN_SUPPORT   (1 << 17)
 
 /**
  * CUSE INIT request/reply flags
index e11d8f1..9b744af 100644 (file)
@@ -399,13 +399,18 @@ struct kvm_vapic_addr {
        __u64 vapic_addr;
 };
 
-/* for KVM_SET_MPSTATE */
+/* for KVM_SET_MP_STATE */
 
+/* not all states are valid on all architectures */
 #define KVM_MP_STATE_RUNNABLE          0
 #define KVM_MP_STATE_UNINITIALIZED     1
 #define KVM_MP_STATE_INIT_RECEIVED     2
 #define KVM_MP_STATE_HALTED            3
 #define KVM_MP_STATE_SIPI_RECEIVED     4
+#define KVM_MP_STATE_STOPPED           5
+#define KVM_MP_STATE_CHECK_STOP        6
+#define KVM_MP_STATE_OPERATING         7
+#define KVM_MP_STATE_LOAD              8
 
 struct kvm_mp_state {
        __u32 mp_state;
index 5312fae..9269de2 100644 (file)
@@ -705,6 +705,7 @@ enum perf_event_type {
         *      u32                             min;
         *      u64                             ino;
         *      u64                             ino_generation;
+        *      u32                             prot, flags;
         *      char                            filename[];
         *      struct sample_id                sample_id;
         * };
index 2a4b4a7..24b68c5 100644 (file)
@@ -33,6 +33,13 @@ struct usb_endpoint_descriptor_no_audio {
        __u8  bInterval;
 } __attribute__((packed));
 
+/* Legacy format, deprecated as of 3.14. */
+struct usb_functionfs_descs_head {
+       __le32 magic;
+       __le32 length;
+       __le32 fs_count;
+       __le32 hs_count;
+} __attribute__((packed, deprecated));
 
 /*
  * Descriptors format:
index 21eed48..1964026 100644 (file)
@@ -39,7 +39,7 @@
 struct snd_compressed_buffer {
        __u32 fragment_size;
        __u32 fragments;
-};
+} __attribute__((packed, aligned(4)));
 
 /**
  * struct snd_compr_params: compressed stream params
@@ -51,7 +51,7 @@ struct snd_compr_params {
        struct snd_compressed_buffer buffer;
        struct snd_codec codec;
        __u8 no_wake_mode;
-};
+} __attribute__((packed, aligned(4)));
 
 /**
  * struct snd_compr_tstamp: timestamp descriptor
@@ -70,7 +70,7 @@ struct snd_compr_tstamp {
        __u32 pcm_frames;
        __u32 pcm_io_frames;
        __u32 sampling_rate;
-};
+} __attribute__((packed, aligned(4)));
 
 /**
  * struct snd_compr_avail: avail descriptor
@@ -80,7 +80,7 @@ struct snd_compr_tstamp {
 struct snd_compr_avail {
        __u64 avail;
        struct snd_compr_tstamp tstamp;
-} __attribute__((packed));
+} __attribute__((packed, aligned(4)));
 
 enum snd_compr_direction {
        SND_COMPRESS_PLAYBACK = 0,
@@ -107,7 +107,7 @@ struct snd_compr_caps {
        __u32 max_fragments;
        __u32 codecs[MAX_NUM_CODECS];
        __u32 reserved[11];
-};
+} __attribute__((packed, aligned(4)));
 
 /**
  * struct snd_compr_codec_caps: query capability of codec
@@ -119,7 +119,7 @@ struct snd_compr_codec_caps {
        __u32 codec;
        __u32 num_descriptors;
        struct snd_codec_desc descriptor[MAX_NUM_CODEC_DESCRIPTORS];
-};
+} __attribute__((packed, aligned(4)));
 
 /**
  * @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the
@@ -140,7 +140,7 @@ enum {
 struct snd_compr_metadata {
         __u32 key;
         __u32 value[8];
-};
+} __attribute__((packed, aligned(4)));
 
 /**
  * compress path ioctl definitions
index 165e705..d9bd9ca 100644 (file)
@@ -268,7 +268,7 @@ struct snd_enc_vorbis {
        __u32 max_bit_rate;
        __u32 min_bit_rate;
        __u32 downmix;
-};
+} __attribute__((packed, aligned(4)));
 
 
 /**
@@ -284,7 +284,7 @@ struct snd_enc_real {
        __u32 quant_bits;
        __u32 start_region;
        __u32 num_regions;
-};
+} __attribute__((packed, aligned(4)));
 
 /**
  * struct snd_enc_flac
@@ -308,12 +308,12 @@ struct snd_enc_real {
 struct snd_enc_flac {
        __u32 num;
        __u32 gain;
-};
+} __attribute__((packed, aligned(4)));
 
 struct snd_enc_generic {
        __u32 bw;       /* encoder bandwidth */
        __s32 reserved[15];
-};
+} __attribute__((packed, aligned(4)));
 
 union snd_codec_options {
        struct snd_enc_wma wma;
@@ -321,7 +321,7 @@ union snd_codec_options {
        struct snd_enc_real real;
        struct snd_enc_flac flac;
        struct snd_enc_generic generic;
-};
+} __attribute__((packed, aligned(4)));
 
 /** struct snd_codec_desc - description of codec capabilities
  * @max_ch: Maximum number of audio channels
@@ -358,7 +358,7 @@ struct snd_codec_desc {
        __u32 formats;
        __u32 min_buffer;
        __u32 reserved[15];
-};
+} __attribute__((packed, aligned(4)));
 
 /** struct snd_codec
  * @id: Identifies the supported audio encoder/decoder.
@@ -399,6 +399,6 @@ struct snd_codec {
        __u32 align;
        union snd_codec_options options;
        __u32 reserved[3];
-};
+} __attribute__((packed, aligned(4)));
 
 #endif
index a5af2a2..5c1aba1 100644 (file)
@@ -170,6 +170,7 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
        unmap->dev_bus_addr = 0;
 }
 
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
 int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
                           unsigned long max_nr_gframes,
                           void **__shared);
index 9d76b99..41066e4 100644 (file)
@@ -505,7 +505,7 @@ config PREEMPT_RCU
        def_bool TREE_PREEMPT_RCU
        help
          This option enables preemptible-RCU code that is common between
-         the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations.
+         TREE_PREEMPT_RCU and, in the old days, TINY_PREEMPT_RCU.
 
 config RCU_STALL_COMMON
        def_bool ( TREE_RCU || TREE_PREEMPT_RCU || RCU_TRACE )
@@ -737,7 +737,7 @@ choice
 
 config RCU_NOCB_CPU_NONE
        bool "No build_forced no-CBs CPUs"
-       depends on RCU_NOCB_CPU && !NO_HZ_FULL
+       depends on RCU_NOCB_CPU && !NO_HZ_FULL_ALL
        help
          This option does not force any of the CPUs to be no-CBs CPUs.
          Only CPUs designated by the rcu_nocbs= boot parameter will be
@@ -751,7 +751,7 @@ config RCU_NOCB_CPU_NONE
 
 config RCU_NOCB_CPU_ZERO
        bool "CPU 0 is a build_forced no-CBs CPU"
-       depends on RCU_NOCB_CPU && !NO_HZ_FULL
+       depends on RCU_NOCB_CPU && !NO_HZ_FULL_ALL
        help
          This option forces CPU 0 to be a no-CBs CPU, so that its RCU
          callbacks are invoked by a per-CPU kthread whose name begins
index 35536d9..76768ee 100644 (file)
@@ -220,9 +220,16 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
 
 endif
 
+config ARCH_SUPPORTS_ATOMIC_RMW
+       bool
+
 config MUTEX_SPIN_ON_OWNER
        def_bool y
-       depends on SMP && !DEBUG_MUTEXES
+       depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
+
+config RWSEM_SPIN_ON_OWNER
+       def_bool y
+       depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
 
 config ARCH_USE_QUEUE_RWLOCK
        bool
index 7868fc3..7dc8788 100644 (file)
@@ -149,12 +149,14 @@ struct cgroup_root cgrp_dfl_root;
  */
 static bool cgrp_dfl_root_visible;
 
+/*
+ * Set by the boot param of the same name and makes subsystems with NULL
+ * ->dfl_files to use ->legacy_files on the default hierarchy.
+ */
+static bool cgroup_legacy_files_on_dfl;
+
 /* some controllers are not supported in the default hierarchy */
-static const unsigned int cgrp_dfl_root_inhibit_ss_mask = 0
-#ifdef CONFIG_CGROUP_DEBUG
-       | (1 << debug_cgrp_id)
-#endif
-       ;
+static unsigned int cgrp_dfl_root_inhibit_ss_mask;
 
 /* The list of hierarchy roots */
 
@@ -180,13 +182,15 @@ static u64 css_serial_nr_next = 1;
  */
 static int need_forkexit_callback __read_mostly;
 
-static struct cftype cgroup_base_files[];
+static struct cftype cgroup_dfl_base_files[];
+static struct cftype cgroup_legacy_base_files[];
 
 static void cgroup_put(struct cgroup *cgrp);
 static int rebind_subsystems(struct cgroup_root *dst_root,
                             unsigned int ss_mask);
 static int cgroup_destroy_locked(struct cgroup *cgrp);
-static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss);
+static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
+                     bool visible);
 static void css_release(struct percpu_ref *ref);
 static void kill_css(struct cgroup_subsys_state *css);
 static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
@@ -1036,6 +1040,58 @@ static void cgroup_put(struct cgroup *cgrp)
        css_put(&cgrp->self);
 }
 
+/**
+ * cgroup_refresh_child_subsys_mask - update child_subsys_mask
+ * @cgrp: the target cgroup
+ *
+ * On the default hierarchy, a subsystem may request other subsystems to be
+ * enabled together through its ->depends_on mask.  In such cases, more
+ * subsystems than specified in "cgroup.subtree_control" may be enabled.
+ *
+ * This function determines which subsystems need to be enabled given the
+ * current @cgrp->subtree_control and records it in
+ * @cgrp->child_subsys_mask.  The resulting mask is always a superset of
+ * @cgrp->subtree_control and follows the usual hierarchy rules.
+ */
+static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
+{
+       struct cgroup *parent = cgroup_parent(cgrp);
+       unsigned int cur_ss_mask = cgrp->subtree_control;
+       struct cgroup_subsys *ss;
+       int ssid;
+
+       lockdep_assert_held(&cgroup_mutex);
+
+       if (!cgroup_on_dfl(cgrp)) {
+               cgrp->child_subsys_mask = cur_ss_mask;
+               return;
+       }
+
+       while (true) {
+               unsigned int new_ss_mask = cur_ss_mask;
+
+               for_each_subsys(ss, ssid)
+                       if (cur_ss_mask & (1 << ssid))
+                               new_ss_mask |= ss->depends_on;
+
+               /*
+                * Mask out subsystems which aren't available.  This can
+                * happen only if some depended-upon subsystems were bound
+                * to non-default hierarchies.
+                */
+               if (parent)
+                       new_ss_mask &= parent->child_subsys_mask;
+               else
+                       new_ss_mask &= cgrp->root->subsys_mask;
+
+               if (new_ss_mask == cur_ss_mask)
+                       break;
+               cur_ss_mask = new_ss_mask;
+       }
+
+       cgrp->child_subsys_mask = cur_ss_mask;
+}
+
 /**
  * cgroup_kn_unlock - unlocking helper for cgroup kernfs methods
  * @kn: the kernfs_node being serviced
@@ -1208,12 +1264,15 @@ static int rebind_subsystems(struct cgroup_root *dst_root, unsigned int ss_mask)
                up_write(&css_set_rwsem);
 
                src_root->subsys_mask &= ~(1 << ssid);
-               src_root->cgrp.child_subsys_mask &= ~(1 << ssid);
+               src_root->cgrp.subtree_control &= ~(1 << ssid);
+               cgroup_refresh_child_subsys_mask(&src_root->cgrp);
 
                /* default hierarchy doesn't enable controllers by default */
                dst_root->subsys_mask |= 1 << ssid;
-               if (dst_root != &cgrp_dfl_root)
-                       dst_root->cgrp.child_subsys_mask |= 1 << ssid;
+               if (dst_root != &cgrp_dfl_root) {
+                       dst_root->cgrp.subtree_control |= 1 << ssid;
+                       cgroup_refresh_child_subsys_mask(&dst_root->cgrp);
+               }
 
                if (ss->bind)
                        ss->bind(css);
@@ -1233,8 +1292,6 @@ static int cgroup_show_options(struct seq_file *seq,
        for_each_subsys(ss, ssid)
                if (root->subsys_mask & (1 << ssid))
                        seq_printf(seq, ",%s", ss->name);
-       if (root->flags & CGRP_ROOT_SANE_BEHAVIOR)
-               seq_puts(seq, ",sane_behavior");
        if (root->flags & CGRP_ROOT_NOPREFIX)
                seq_puts(seq, ",noprefix");
        if (root->flags & CGRP_ROOT_XATTR)
@@ -1268,6 +1325,7 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
        bool all_ss = false, one_ss = false;
        unsigned int mask = -1U;
        struct cgroup_subsys *ss;
+       int nr_opts = 0;
        int i;
 
 #ifdef CONFIG_CPUSETS
@@ -1277,6 +1335,8 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
        memset(opts, 0, sizeof(*opts));
 
        while ((token = strsep(&o, ",")) != NULL) {
+               nr_opts++;
+
                if (!*token)
                        return -EINVAL;
                if (!strcmp(token, "none")) {
@@ -1361,36 +1421,32 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
                        return -ENOENT;
        }
 
-       /* Consistency checks */
-
        if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
                pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
-
-               if ((opts->flags & (CGRP_ROOT_NOPREFIX | CGRP_ROOT_XATTR)) ||
-                   opts->cpuset_clone_children || opts->release_agent ||
-                   opts->name) {
-                       pr_err("sane_behavior: noprefix, xattr, clone_children, release_agent and name are not allowed\n");
+               if (nr_opts != 1) {
+                       pr_err("sane_behavior: no other mount options allowed\n");
                        return -EINVAL;
                }
-       } else {
-               /*
-                * If the 'all' option was specified select all the
-                * subsystems, otherwise if 'none', 'name=' and a subsystem
-                * name options were not specified, let's default to 'all'
-                */
-               if (all_ss || (!one_ss && !opts->none && !opts->name))
-                       for_each_subsys(ss, i)
-                               if (!ss->disabled)
-                                       opts->subsys_mask |= (1 << i);
-
-               /*
-                * We either have to specify by name or by subsystems. (So
-                * all empty hierarchies must have a name).
-                */
-               if (!opts->subsys_mask && !opts->name)
-                       return -EINVAL;
+               return 0;
        }
 
+       /*
+        * If the 'all' option was specified select all the subsystems,
+        * otherwise if 'none', 'name=' and a subsystem name options were
+        * not specified, let's default to 'all'
+        */
+       if (all_ss || (!one_ss && !opts->none && !opts->name))
+               for_each_subsys(ss, i)
+                       if (!ss->disabled)
+                               opts->subsys_mask |= (1 << i);
+
+       /*
+        * We either have to specify by name or by subsystems. (So all
+        * empty hierarchies must have a name).
+        */
+       if (!opts->subsys_mask && !opts->name)
+               return -EINVAL;
+
        /*
         * Option noprefix was introduced just for backward compatibility
         * with the old cpuset, so we allow noprefix only if mounting just
@@ -1399,7 +1455,6 @@ static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
        if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))
                return -EINVAL;
 
-
        /* Can't specify "none" and some subsystems */
        if (opts->subsys_mask && opts->none)
                return -EINVAL;
@@ -1414,8 +1469,8 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
        struct cgroup_sb_opts opts;
        unsigned int added_mask, removed_mask;
 
-       if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) {
-               pr_err("sane_behavior: remount is not allowed\n");
+       if (root == &cgrp_dfl_root) {
+               pr_err("remount is not allowed\n");
                return -EINVAL;
        }
 
@@ -1434,11 +1489,10 @@ static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
        removed_mask = root->subsys_mask & ~opts.subsys_mask;
 
        /* Don't allow flags or name to change at remount */
-       if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) ||
+       if ((opts.flags ^ root->flags) ||
            (opts.name && strcmp(opts.name, root->name))) {
                pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
-                      opts.flags & CGRP_ROOT_OPTION_MASK, opts.name ?: "",
-                      root->flags & CGRP_ROOT_OPTION_MASK, root->name);
+                      opts.flags, opts.name ?: "", root->flags, root->name);
                ret = -EINVAL;
                goto out_unlock;
        }
@@ -1563,6 +1617,7 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
 {
        LIST_HEAD(tmp_links);
        struct cgroup *root_cgrp = &root->cgrp;
+       struct cftype *base_files;
        struct css_set *cset;
        int i, ret;
 
@@ -1600,7 +1655,12 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
        }
        root_cgrp->kn = root->kf_root->kn;
 
-       ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true);
+       if (root == &cgrp_dfl_root)
+               base_files = cgroup_dfl_base_files;
+       else
+               base_files = cgroup_legacy_base_files;
+
+       ret = cgroup_addrm_files(root_cgrp, base_files, true);
        if (ret)
                goto destroy_root;
 
@@ -1638,7 +1698,7 @@ destroy_root:
 exit_root_id:
        cgroup_exit_root_id(root);
 cancel_ref:
-       percpu_ref_cancel_init(&root_cgrp->self.refcnt);
+       percpu_ref_exit(&root_cgrp->self.refcnt);
 out:
        free_cgrp_cset_links(&tmp_links);
        return ret;
@@ -1648,10 +1708,13 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                         int flags, const char *unused_dev_name,
                         void *data)
 {
+       struct super_block *pinned_sb = NULL;
+       struct cgroup_subsys *ss;
        struct cgroup_root *root;
        struct cgroup_sb_opts opts;
        struct dentry *dentry;
        int ret;
+       int i;
        bool new_sb;
 
        /*
@@ -1669,7 +1732,7 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                goto out_unlock;
 
        /* look for a matching existing root */
-       if (!opts.subsys_mask && !opts.none && !opts.name) {
+       if (opts.flags & CGRP_ROOT_SANE_BEHAVIOR) {
                cgrp_dfl_root_visible = true;
                root = &cgrp_dfl_root;
                cgroup_get(&root->cgrp);
@@ -1677,6 +1740,27 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                goto out_unlock;
        }
 
+       /*
+        * Destruction of cgroup root is asynchronous, so subsystems may
+        * still be dying after the previous unmount.  Let's drain the
+        * dying subsystems.  We just need to ensure that the ones
+        * unmounted previously finish dying and don't care about new ones
+        * starting.  Testing ref liveliness is good enough.
+        */
+       for_each_subsys(ss, i) {
+               if (!(opts.subsys_mask & (1 << i)) ||
+                   ss->root == &cgrp_dfl_root)
+                       continue;
+
+               if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
+                       mutex_unlock(&cgroup_mutex);
+                       msleep(10);
+                       ret = restart_syscall();
+                       goto out_free;
+               }
+               cgroup_put(&ss->root->cgrp);
+       }
+
        for_each_root(root) {
                bool name_match = false;
 
@@ -1706,26 +1790,27 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                        goto out_unlock;
                }
 
-               if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) {
-                       if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
-                               pr_err("sane_behavior: new mount options should match the existing superblock\n");
-                               ret = -EINVAL;
-                               goto out_unlock;
-                       } else {
-                               pr_warn("new mount options do not match the existing superblock, will be ignored\n");
-                       }
-               }
+               if (root->flags ^ opts.flags)
+                       pr_warn("new mount options do not match the existing superblock, will be ignored\n");
 
                /*
-                * A root's lifetime is governed by its root cgroup.
-                * tryget_live failure indicate that the root is being
-                * destroyed.  Wait for destruction to complete so that the
-                * subsystems are free.  We can use wait_queue for the wait
-                * but this path is super cold.  Let's just sleep for a bit
-                * and retry.
+                * We want to reuse @root whose lifetime is governed by its
+                * ->cgrp.  Let's check whether @root is alive and keep it
+                * that way.  As cgroup_kill_sb() can happen anytime, we
+                * want to block it by pinning the sb so that @root doesn't
+                * get killed before mount is complete.
+                *
+                * With the sb pinned, tryget_live can reliably indicate
+                * whether @root can be reused.  If it's being killed,
+                * drain it.  We can use wait_queue for the wait but this
+                * path is super cold.  Let's just sleep a bit and retry.
                 */
-               if (!percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
+               pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
+               if (IS_ERR(pinned_sb) ||
+                   !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
                        mutex_unlock(&cgroup_mutex);
+                       if (!IS_ERR_OR_NULL(pinned_sb))
+                               deactivate_super(pinned_sb);
                        msleep(10);
                        ret = restart_syscall();
                        goto out_free;
@@ -1770,6 +1855,16 @@ out_free:
                                CGROUP_SUPER_MAGIC, &new_sb);
        if (IS_ERR(dentry) || !new_sb)
                cgroup_put(&root->cgrp);
+
+       /*
+        * If @pinned_sb, we're reusing an existing root and holding an
+        * extra ref on its sb.  Mount is complete.  Put the extra ref.
+        */
+       if (pinned_sb) {
+               WARN_ON(new_sb);
+               deactivate_super(pinned_sb);
+       }
+
        return dentry;
 }
 
@@ -2415,9 +2510,7 @@ static int cgroup_release_agent_show(struct seq_file *seq, void *v)
 
 static int cgroup_sane_behavior_show(struct seq_file *seq, void *v)
 {
-       struct cgroup *cgrp = seq_css(seq)->cgroup;
-
-       seq_printf(seq, "%d\n", cgroup_sane_behavior(cgrp));
+       seq_puts(seq, "0\n");
        return 0;
 }
 
@@ -2454,7 +2547,7 @@ static int cgroup_controllers_show(struct seq_file *seq, void *v)
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
 
-       cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->child_subsys_mask);
+       cgroup_print_ss_mask(seq, cgroup_parent(cgrp)->subtree_control);
        return 0;
 }
 
@@ -2463,7 +2556,7 @@ static int cgroup_subtree_control_show(struct seq_file *seq, void *v)
 {
        struct cgroup *cgrp = seq_css(seq)->cgroup;
 
-       cgroup_print_ss_mask(seq, cgrp->child_subsys_mask);
+       cgroup_print_ss_mask(seq, cgrp->subtree_control);
        return 0;
 }
 
@@ -2569,6 +2662,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
                                            loff_t off)
 {
        unsigned int enable = 0, disable = 0;
+       unsigned int css_enable, css_disable, old_ctrl, new_ctrl;
        struct cgroup *cgrp, *child;
        struct cgroup_subsys *ss;
        char *tok;
@@ -2608,11 +2702,26 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
 
        for_each_subsys(ss, ssid) {
                if (enable & (1 << ssid)) {
-                       if (cgrp->child_subsys_mask & (1 << ssid)) {
+                       if (cgrp->subtree_control & (1 << ssid)) {
                                enable &= ~(1 << ssid);
                                continue;
                        }
 
+                       /* unavailable or not enabled on the parent? */
+                       if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
+                           (cgroup_parent(cgrp) &&
+                            !(cgroup_parent(cgrp)->subtree_control & (1 << ssid)))) {
+                               ret = -ENOENT;
+                               goto out_unlock;
+                       }
+
+                       /*
+                        * @ss is already enabled through dependency and
+                        * we'll just make it visible.  Skip draining.
+                        */
+                       if (cgrp->child_subsys_mask & (1 << ssid))
+                               continue;
+
                        /*
                         * Because css offlining is asynchronous, userland
                         * might try to re-enable the same controller while
@@ -2635,23 +2744,15 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
 
                                return restart_syscall();
                        }
-
-                       /* unavailable or not enabled on the parent? */
-                       if (!(cgrp_dfl_root.subsys_mask & (1 << ssid)) ||
-                           (cgroup_parent(cgrp) &&
-                            !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ssid)))) {
-                               ret = -ENOENT;
-                               goto out_unlock;
-                       }
                } else if (disable & (1 << ssid)) {
-                       if (!(cgrp->child_subsys_mask & (1 << ssid))) {
+                       if (!(cgrp->subtree_control & (1 << ssid))) {
                                disable &= ~(1 << ssid);
                                continue;
                        }
 
                        /* a child has it enabled? */
                        cgroup_for_each_live_child(child, cgrp) {
-                               if (child->child_subsys_mask & (1 << ssid)) {
+                               if (child->subtree_control & (1 << ssid)) {
                                        ret = -EBUSY;
                                        goto out_unlock;
                                }
@@ -2665,7 +2766,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
        }
 
        /*
-        * Except for the root, child_subsys_mask must be zero for a cgroup
+        * Except for the root, subtree_control must be zero for a cgroup
         * with tasks so that child cgroups don't compete against tasks.
         */
        if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) {
@@ -2674,36 +2775,75 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
        }
 
        /*
-        * Create csses for enables and update child_subsys_mask.  This
-        * changes cgroup_e_css() results which in turn makes the
-        * subsequent cgroup_update_dfl_csses() associate all tasks in the
-        * subtree to the updated csses.
+        * Update subsys masks and calculate what needs to be done.  More
+        * subsystems than specified may need to be enabled or disabled
+        * depending on subsystem dependencies.
+        */
+       cgrp->subtree_control |= enable;
+       cgrp->subtree_control &= ~disable;
+
+       old_ctrl = cgrp->child_subsys_mask;
+       cgroup_refresh_child_subsys_mask(cgrp);
+       new_ctrl = cgrp->child_subsys_mask;
+
+       css_enable = ~old_ctrl & new_ctrl;
+       css_disable = old_ctrl & ~new_ctrl;
+       enable |= css_enable;
+       disable |= css_disable;
+
+       /*
+        * Create new csses or make the existing ones visible.  A css is
+        * created invisible if it's being implicitly enabled through
+        * dependency.  An invisible css is made visible when the userland
+        * explicitly enables it.
         */
        for_each_subsys(ss, ssid) {
                if (!(enable & (1 << ssid)))
                        continue;
 
                cgroup_for_each_live_child(child, cgrp) {
-                       ret = create_css(child, ss);
+                       if (css_enable & (1 << ssid))
+                               ret = create_css(child, ss,
+                                       cgrp->subtree_control & (1 << ssid));
+                       else
+                               ret = cgroup_populate_dir(child, 1 << ssid);
                        if (ret)
                                goto err_undo_css;
                }
        }
 
-       cgrp->child_subsys_mask |= enable;
-       cgrp->child_subsys_mask &= ~disable;
-
+       /*
+        * At this point, cgroup_e_css() results reflect the new csses
+        * making the following cgroup_update_dfl_csses() properly update
+        * css associations of all tasks in the subtree.
+        */
        ret = cgroup_update_dfl_csses(cgrp);
        if (ret)
                goto err_undo_css;
 
-       /* all tasks are now migrated away from the old csses, kill them */
+       /*
+        * All tasks are migrated out of disabled csses.  Kill or hide
+        * them.  A css is hidden when the userland requests it to be
+        * disabled while other subsystems are still depending on it.  The
+        * css must not actively control resources and be in the vanilla
+        * state if it's made visible again later.  Controllers which may
+        * be depended upon should provide ->css_reset() for this purpose.
+        */
        for_each_subsys(ss, ssid) {
                if (!(disable & (1 << ssid)))
                        continue;
 
-               cgroup_for_each_live_child(child, cgrp)
-                       kill_css(cgroup_css(child, ss));
+               cgroup_for_each_live_child(child, cgrp) {
+                       struct cgroup_subsys_state *css = cgroup_css(child, ss);
+
+                       if (css_disable & (1 << ssid)) {
+                               kill_css(css);
+                       } else {
+                               cgroup_clear_dir(child, 1 << ssid);
+                               if (ss->css_reset)
+                                       ss->css_reset(css);
+                       }
+               }
        }
 
        kernfs_activate(cgrp->kn);
@@ -2713,8 +2853,9 @@ out_unlock:
        return ret ?: nbytes;
 
 err_undo_css:
-       cgrp->child_subsys_mask &= ~enable;
-       cgrp->child_subsys_mask |= disable;
+       cgrp->subtree_control &= ~enable;
+       cgrp->subtree_control |= disable;
+       cgroup_refresh_child_subsys_mask(cgrp);
 
        for_each_subsys(ss, ssid) {
                if (!(enable & (1 << ssid)))
@@ -2722,8 +2863,14 @@ err_undo_css:
 
                cgroup_for_each_live_child(child, cgrp) {
                        struct cgroup_subsys_state *css = cgroup_css(child, ss);
-                       if (css)
+
+                       if (!css)
+                               continue;
+
+                       if (css_enable & (1 << ssid))
                                kill_css(css);
+                       else
+                               cgroup_clear_dir(child, 1 << ssid);
                }
        }
        goto out_unlock;
@@ -2836,9 +2983,9 @@ static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
 
        /*
         * This isn't a proper migration and its usefulness is very
-        * limited.  Disallow if sane_behavior.
+        * limited.  Disallow on the default hierarchy.
         */
-       if (cgroup_sane_behavior(cgrp))
+       if (cgroup_on_dfl(cgrp))
                return -EPERM;
 
        /*
@@ -2922,9 +3069,9 @@ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
 
        for (cft = cfts; cft->name[0] != '\0'; cft++) {
                /* does cft->flags tell us to skip this file on @cgrp? */
-               if ((cft->flags & CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
+               if ((cft->flags & __CFTYPE_ONLY_ON_DFL) && !cgroup_on_dfl(cgrp))
                        continue;
-               if ((cft->flags & CFTYPE_INSANE) && cgroup_sane_behavior(cgrp))
+               if ((cft->flags & __CFTYPE_NOT_ON_DFL) && cgroup_on_dfl(cgrp))
                        continue;
                if ((cft->flags & CFTYPE_NOT_ON_ROOT) && !cgroup_parent(cgrp))
                        continue;
@@ -2982,6 +3129,9 @@ static void cgroup_exit_cftypes(struct cftype *cfts)
                        kfree(cft->kf_ops);
                cft->kf_ops = NULL;
                cft->ss = NULL;
+
+               /* revert flags set by cgroup core while adding @cfts */
+               cft->flags &= ~(__CFTYPE_ONLY_ON_DFL | __CFTYPE_NOT_ON_DFL);
        }
 }
 
@@ -3067,7 +3217,7 @@ int cgroup_rm_cftypes(struct cftype *cfts)
  * function currently returns 0 as long as @cfts registration is successful
  * even if some file creation attempts on existing cgroups fail.
  */
-int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+static int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
 {
        int ret;
 
@@ -3092,6 +3242,40 @@ int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
        return ret;
 }
 
+/**
+ * cgroup_add_dfl_cftypes - add an array of cftypes for default hierarchy
+ * @ss: target cgroup subsystem
+ * @cfts: zero-length name terminated array of cftypes
+ *
+ * Similar to cgroup_add_cftypes() but the added files are only used for
+ * the default hierarchy.
+ */
+int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+{
+       struct cftype *cft;
+
+       for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
+               cft->flags |= __CFTYPE_ONLY_ON_DFL;
+       return cgroup_add_cftypes(ss, cfts);
+}
+
+/**
+ * cgroup_add_legacy_cftypes - add an array of cftypes for legacy hierarchies
+ * @ss: target cgroup subsystem
+ * @cfts: zero-length name terminated array of cftypes
+ *
+ * Similar to cgroup_add_cftypes() but the added files are only used for
+ * the legacy hierarchies.
+ */
+int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts)
+{
+       struct cftype *cft;
+
+       for (cft = cfts; cft && cft->name[0] != '\0'; cft++)
+               cft->flags |= __CFTYPE_NOT_ON_DFL;
+       return cgroup_add_cftypes(ss, cfts);
+}
+
 /**
  * cgroup_task_count - count the number of tasks in a cgroup.
  * @cgrp: the cgroup in question
@@ -3328,7 +3512,7 @@ bool css_has_online_children(struct cgroup_subsys_state *css)
 
        rcu_read_lock();
        css_for_each_child(child, css) {
-               if (css->flags & CSS_ONLINE) {
+               if (child->flags & CSS_ONLINE) {
                        ret = true;
                        break;
                }
@@ -3657,8 +3841,9 @@ after:
  *
  * All this extra complexity was caused by the original implementation
  * committing to an entirely unnecessary property.  In the long term, we
- * want to do away with it.  Explicitly scramble sort order if
- * sane_behavior so that no such expectation exists in the new interface.
+ * want to do away with it.  Explicitly scramble sort order if on the
+ * default hierarchy so that no such expectation exists in the new
+ * interface.
  *
  * Scrambling is done by swapping every two consecutive bits, which is
  * non-identity one-to-one mapping which disturbs sort order sufficiently.
@@ -3673,7 +3858,7 @@ static pid_t pid_fry(pid_t pid)
 
 static pid_t cgroup_pid_fry(struct cgroup *cgrp, pid_t pid)
 {
-       if (cgroup_sane_behavior(cgrp))
+       if (cgroup_on_dfl(cgrp))
                return pid_fry(pid);
        else
                return pid;
@@ -3776,7 +3961,7 @@ static int pidlist_array_load(struct cgroup *cgrp, enum cgroup_filetype type,
        css_task_iter_end(&it);
        length = n;
        /* now sort & (if procs) strip out duplicates */
-       if (cgroup_sane_behavior(cgrp))
+       if (cgroup_on_dfl(cgrp))
                sort(array, length, sizeof(pid_t), fried_cmppid, NULL);
        else
                sort(array, length, sizeof(pid_t), cmppid, NULL);
@@ -3998,7 +4183,8 @@ static int cgroup_clone_children_write(struct cgroup_subsys_state *css,
        return 0;
 }
 
-static struct cftype cgroup_base_files[] = {
+/* cgroup core interface files for the default hierarchy */
+static struct cftype cgroup_dfl_base_files[] = {
        {
                .name = "cgroup.procs",
                .seq_start = cgroup_pidlist_start,
@@ -4009,47 +4195,53 @@ static struct cftype cgroup_base_files[] = {
                .write = cgroup_procs_write,
                .mode = S_IRUGO | S_IWUSR,
        },
-       {
-               .name = "cgroup.clone_children",
-               .flags = CFTYPE_INSANE,
-               .read_u64 = cgroup_clone_children_read,
-               .write_u64 = cgroup_clone_children_write,
-       },
-       {
-               .name = "cgroup.sane_behavior",
-               .flags = CFTYPE_ONLY_ON_ROOT,
-               .seq_show = cgroup_sane_behavior_show,
-       },
        {
                .name = "cgroup.controllers",
-               .flags = CFTYPE_ONLY_ON_DFL | CFTYPE_ONLY_ON_ROOT,
+               .flags = CFTYPE_ONLY_ON_ROOT,
                .seq_show = cgroup_root_controllers_show,
        },
        {
                .name = "cgroup.controllers",
-               .flags = CFTYPE_ONLY_ON_DFL | CFTYPE_NOT_ON_ROOT,
+               .flags = CFTYPE_NOT_ON_ROOT,
                .seq_show = cgroup_controllers_show,
        },
        {
                .name = "cgroup.subtree_control",
-               .flags = CFTYPE_ONLY_ON_DFL,
                .seq_show = cgroup_subtree_control_show,
                .write = cgroup_subtree_control_write,
        },
        {
                .name = "cgroup.populated",
-               .flags = CFTYPE_ONLY_ON_DFL | CFTYPE_NOT_ON_ROOT,
+               .flags = CFTYPE_NOT_ON_ROOT,
                .seq_show = cgroup_populated_show,
        },
+       { }     /* terminate */
+};
 
-       /*
-        * Historical crazy stuff.  These don't have "cgroup."  prefix and
-        * don't exist if sane_behavior.  If you're depending on these, be
-        * prepared to be burned.
-        */
+/* cgroup core interface files for the legacy hierarchies */
+static struct cftype cgroup_legacy_base_files[] = {
+       {
+               .name = "cgroup.procs",
+               .seq_start = cgroup_pidlist_start,
+               .seq_next = cgroup_pidlist_next,
+               .seq_stop = cgroup_pidlist_stop,
+               .seq_show = cgroup_pidlist_show,
+               .private = CGROUP_FILE_PROCS,
+               .write = cgroup_procs_write,
+               .mode = S_IRUGO | S_IWUSR,
+       },
+       {
+               .name = "cgroup.clone_children",
+               .read_u64 = cgroup_clone_children_read,
+               .write_u64 = cgroup_clone_children_write,
+       },
+       {
+               .name = "cgroup.sane_behavior",
+               .flags = CFTYPE_ONLY_ON_ROOT,
+               .seq_show = cgroup_sane_behavior_show,
+       },
        {
                .name = "tasks",
-               .flags = CFTYPE_INSANE,         /* use "procs" instead */
                .seq_start = cgroup_pidlist_start,
                .seq_next = cgroup_pidlist_next,
                .seq_stop = cgroup_pidlist_stop,
@@ -4060,13 +4252,12 @@ static struct cftype cgroup_base_files[] = {
        },
        {
                .name = "notify_on_release",
-               .flags = CFTYPE_INSANE,
                .read_u64 = cgroup_read_notify_on_release,
                .write_u64 = cgroup_write_notify_on_release,
        },
        {
                .name = "release_agent",
-               .flags = CFTYPE_INSANE | CFTYPE_ONLY_ON_ROOT,
+               .flags = CFTYPE_ONLY_ON_ROOT,
                .seq_show = cgroup_release_agent_show,
                .write = cgroup_release_agent_write,
                .max_write_len = PATH_MAX - 1,
@@ -4133,6 +4324,8 @@ static void css_free_work_fn(struct work_struct *work)
                container_of(work, struct cgroup_subsys_state, destroy_work);
        struct cgroup *cgrp = css->cgroup;
 
+       percpu_ref_exit(&css->refcnt);
+
        if (css->ss) {
                /* css free path */
                if (css->parent)
@@ -4272,12 +4465,14 @@ static void offline_css(struct cgroup_subsys_state *css)
  * create_css - create a cgroup_subsys_state
  * @cgrp: the cgroup new css will be associated with
  * @ss: the subsys of new css
+ * @visible: whether to create control knobs for the new css or not
  *
  * Create a new css associated with @cgrp - @ss pair.  On success, the new
- * css is online and installed in @cgrp with all interface files created.
- * Returns 0 on success, -errno on failure.
+ * css is online and installed in @cgrp with all interface files created if
+ * @visible.  Returns 0 on success, -errno on failure.
  */
-static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
+static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss,
+                     bool visible)
 {
        struct cgroup *parent = cgroup_parent(cgrp);
        struct cgroup_subsys_state *parent_css = cgroup_css(parent, ss);
@@ -4301,9 +4496,11 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
                goto err_free_percpu_ref;
        css->id = err;
 
-       err = cgroup_populate_dir(cgrp, 1 << ss->id);
-       if (err)
-               goto err_free_id;
+       if (visible) {
+               err = cgroup_populate_dir(cgrp, 1 << ss->id);
+               if (err)
+                       goto err_free_id;
+       }
 
        /* @css is ready to be brought online now, make it visible */
        list_add_tail_rcu(&css->sibling, &parent_css->children);
@@ -4330,7 +4527,7 @@ err_list_del:
 err_free_id:
        cgroup_idr_remove(&ss->css_idr, css->id);
 err_free_percpu_ref:
-       percpu_ref_cancel_init(&css->refcnt);
+       percpu_ref_exit(&css->refcnt);
 err_free_css:
        call_rcu(&css->rcu_head, css_free_rcu_fn);
        return err;
@@ -4343,6 +4540,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
        struct cgroup_root *root;
        struct cgroup_subsys *ss;
        struct kernfs_node *kn;
+       struct cftype *base_files;
        int ssid, ret;
 
        parent = cgroup_kn_lock_live(parent_kn);
@@ -4413,14 +4611,20 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
        if (ret)
                goto out_destroy;
 
-       ret = cgroup_addrm_files(cgrp, cgroup_base_files, true);
+       if (cgroup_on_dfl(cgrp))
+               base_files = cgroup_dfl_base_files;
+       else
+               base_files = cgroup_legacy_base_files;
+
+       ret = cgroup_addrm_files(cgrp, base_files, true);
        if (ret)
                goto out_destroy;
 
        /* let's create and online css's */
        for_each_subsys(ss, ssid) {
                if (parent->child_subsys_mask & (1 << ssid)) {
-                       ret = create_css(cgrp, ss);
+                       ret = create_css(cgrp, ss,
+                                        parent->subtree_control & (1 << ssid));
                        if (ret)
                                goto out_destroy;
                }
@@ -4428,10 +4632,12 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
 
        /*
         * On the default hierarchy, a child doesn't automatically inherit
-        * child_subsys_mask from the parent.  Each is configured manually.
+        * subtree_control from the parent.  Each is configured manually.
         */
-       if (!cgroup_on_dfl(cgrp))
-               cgrp->child_subsys_mask = parent->child_subsys_mask;
+       if (!cgroup_on_dfl(cgrp)) {
+               cgrp->subtree_control = parent->subtree_control;
+               cgroup_refresh_child_subsys_mask(cgrp);
+       }
 
        kernfs_activate(kn);
 
@@ -4441,7 +4647,7 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
 out_free_id:
        cgroup_idr_remove(&root->cgroup_idr, cgrp->id);
 out_cancel_ref:
-       percpu_ref_cancel_init(&cgrp->self.refcnt);
+       percpu_ref_exit(&cgrp->self.refcnt);
 out_free_cgrp:
        kfree(cgrp);
 out_unlock:
@@ -4694,8 +4900,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
  */
 int __init cgroup_init_early(void)
 {
-       static struct cgroup_sb_opts __initdata opts =
-               { .flags = CGRP_ROOT_SANE_BEHAVIOR };
+       static struct cgroup_sb_opts __initdata opts;
        struct cgroup_subsys *ss;
        int i;
 
@@ -4733,7 +4938,8 @@ int __init cgroup_init(void)
        unsigned long key;
        int ssid, err;
 
-       BUG_ON(cgroup_init_cftypes(NULL, cgroup_base_files));
+       BUG_ON(cgroup_init_cftypes(NULL, cgroup_dfl_base_files));
+       BUG_ON(cgroup_init_cftypes(NULL, cgroup_legacy_base_files));
 
        mutex_lock(&cgroup_mutex);
 
@@ -4765,9 +4971,22 @@ int __init cgroup_init(void)
                 * disabled flag and cftype registration needs kmalloc,
                 * both of which aren't available during early_init.
                 */
-               if (!ss->disabled) {
-                       cgrp_dfl_root.subsys_mask |= 1 << ss->id;
-                       WARN_ON(cgroup_add_cftypes(ss, ss->base_cftypes));
+               if (ss->disabled)
+                       continue;
+
+               cgrp_dfl_root.subsys_mask |= 1 << ss->id;
+
+               if (cgroup_legacy_files_on_dfl && !ss->dfl_cftypes)
+                       ss->dfl_cftypes = ss->legacy_cftypes;
+
+               if (!ss->dfl_cftypes)
+                       cgrp_dfl_root_inhibit_ss_mask |= 1 << ss->id;
+
+               if (ss->dfl_cftypes == ss->legacy_cftypes) {
+                       WARN_ON(cgroup_add_cftypes(ss, ss->dfl_cftypes));
+               } else {
+                       WARN_ON(cgroup_add_dfl_cftypes(ss, ss->dfl_cftypes));
+                       WARN_ON(cgroup_add_legacy_cftypes(ss, ss->legacy_cftypes));
                }
        }
 
@@ -5163,6 +5382,14 @@ static int __init cgroup_disable(char *str)
 }
 __setup("cgroup_disable=", cgroup_disable);
 
+static int __init cgroup_set_legacy_files_on_dfl(char *str)
+{
+       printk("cgroup: using legacy files on the default hierarchy\n");
+       cgroup_legacy_files_on_dfl = true;
+       return 0;
+}
+__setup("cgroup__DEVEL__legacy_files_on_dfl", cgroup_set_legacy_files_on_dfl);
+
 /**
  * css_tryget_online_from_dir - get corresponding css from a cgroup dentry
  * @dentry: directory dentry of interest
@@ -5357,6 +5584,6 @@ static struct cftype debug_files[] =  {
 struct cgroup_subsys debug_cgrp_subsys = {
        .css_alloc = debug_css_alloc,
        .css_free = debug_css_free,
-       .base_cftypes = debug_files,
+       .legacy_cftypes = debug_files,
 };
 #endif /* CONFIG_CGROUP_DEBUG */
index a79e40f..92b98cc 100644 (file)
@@ -480,5 +480,5 @@ struct cgroup_subsys freezer_cgrp_subsys = {
        .css_free       = freezer_css_free,
        .attach         = freezer_attach,
        .fork           = freezer_fork,
-       .base_cftypes   = files,
+       .legacy_cftypes = files,
 };
index 019d450..5664985 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/sched.h>
 #include <linux/hardirq.h>
 #include <linux/export.h>
+#include <linux/kprobes.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/context_tracking.h>
@@ -104,6 +105,7 @@ void context_tracking_user_enter(void)
        }
        local_irq_restore(flags);
 }
+NOKPROBE_SYMBOL(context_tracking_user_enter);
 
 #ifdef CONFIG_PREEMPT
 /**
@@ -181,6 +183,7 @@ void context_tracking_user_exit(void)
        }
        local_irq_restore(flags);
 }
+NOKPROBE_SYMBOL(context_tracking_user_exit);
 
 /**
  * __context_tracking_task_switch - context switch the syscall callbacks
index a343bde..81e2a38 100644 (file)
@@ -274,21 +274,28 @@ void clear_tasks_mm_cpumask(int cpu)
        rcu_read_unlock();
 }
 
-static inline void check_for_tasks(int cpu)
+static inline void check_for_tasks(int dead_cpu)
 {
-       struct task_struct *p;
-       cputime_t utime, stime;
+       struct task_struct *g, *p;
 
-       write_lock_irq(&tasklist_lock);
-       for_each_process(p) {
-               task_cputime(p, &utime, &stime);
-               if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
-                   (utime || stime))
-                       pr_warn("Task %s (pid = %d) is on cpu %d (state = %ld, flags = %x)\n",
-                               p->comm, task_pid_nr(p), cpu,
-                               p->state, p->flags);
-       }
-       write_unlock_irq(&tasklist_lock);
+       read_lock_irq(&tasklist_lock);
+       do_each_thread(g, p) {
+               if (!p->on_rq)
+                       continue;
+               /*
+                * We do the check with unlocked task_rq(p)->lock.
+                * Order the reading to do not warn about a task,
+                * which was running on this cpu in the past, and
+                * it's just been woken on another cpu.
+                */
+               rmb();
+               if (task_cpu(p) != dead_cpu)
+                       continue;
+
+               pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
+                       p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
+       } while_each_thread(g, p);
+       read_unlock_irq(&tasklist_lock);
 }
 
 struct take_cpu_down_param {
index f6b33c6..22874d7 100644 (file)
@@ -76,8 +76,34 @@ struct cpuset {
        struct cgroup_subsys_state css;
 
        unsigned long flags;            /* "unsigned long" so bitops work */
-       cpumask_var_t cpus_allowed;     /* CPUs allowed to tasks in cpuset */
-       nodemask_t mems_allowed;        /* Memory Nodes allowed to tasks */
+
+       /*
+        * On default hierarchy:
+        *
+        * The user-configured masks can only be changed by writing to
+        * cpuset.cpus and cpuset.mems, and won't be limited by the
+        * parent masks.
+        *
+        * The effective masks is the real masks that apply to the tasks
+        * in the cpuset. They may be changed if the configured masks are
+        * changed or hotplug happens.
+        *
+        * effective_mask == configured_mask & parent's effective_mask,
+        * and if it ends up empty, it will inherit the parent's mask.
+        *
+        *
+        * On legacy hierachy:
+        *
+        * The user-configured masks are always the same with effective masks.
+        */
+
+       /* user-configured CPUs and Memory Nodes allow to tasks */
+       cpumask_var_t cpus_allowed;
+       nodemask_t mems_allowed;
+
+       /* effective CPUs and Memory Nodes allow to tasks */
+       cpumask_var_t effective_cpus;
+       nodemask_t effective_mems;
 
        /*
         * This is old Memory Nodes tasks took on.
@@ -307,9 +333,9 @@ static struct file_system_type cpuset_fs_type = {
  */
 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
 {
-       while (!cpumask_intersects(cs->cpus_allowed, cpu_online_mask))
+       while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask))
                cs = parent_cs(cs);
-       cpumask_and(pmask, cs->cpus_allowed, cpu_online_mask);
+       cpumask_and(pmask, cs->effective_cpus, cpu_online_mask);
 }
 
 /*
@@ -325,9 +351,9 @@ static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
  */
 static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
 {
-       while (!nodes_intersects(cs->mems_allowed, node_states[N_MEMORY]))
+       while (!nodes_intersects(cs->effective_mems, node_states[N_MEMORY]))
                cs = parent_cs(cs);
-       nodes_and(*pmask, cs->mems_allowed, node_states[N_MEMORY]);
+       nodes_and(*pmask, cs->effective_mems, node_states[N_MEMORY]);
 }
 
 /*
@@ -376,13 +402,20 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
        if (!trial)
                return NULL;
 
-       if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL)) {
-               kfree(trial);
-               return NULL;
-       }
-       cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
+       if (!alloc_cpumask_var(&trial->cpus_allowed, GFP_KERNEL))
+               goto free_cs;
+       if (!alloc_cpumask_var(&trial->effective_cpus, GFP_KERNEL))
+               goto free_cpus;
 
+       cpumask_copy(trial->cpus_allowed, cs->cpus_allowed);
+       cpumask_copy(trial->effective_cpus, cs->effective_cpus);
        return trial;
+
+free_cpus:
+       free_cpumask_var(trial->cpus_allowed);
+free_cs:
+       kfree(trial);
+       return NULL;
 }
 
 /**
@@ -391,6 +424,7 @@ static struct cpuset *alloc_trial_cpuset(struct cpuset *cs)
  */
 static void free_trial_cpuset(struct cpuset *trial)
 {
+       free_cpumask_var(trial->effective_cpus);
        free_cpumask_var(trial->cpus_allowed);
        kfree(trial);
 }
@@ -436,9 +470,9 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial)
 
        par = parent_cs(cur);
 
-       /* We must be a subset of our parent cpuset */
+       /* On legacy hiearchy, we must be a subset of our parent cpuset. */
        ret = -EACCES;
-       if (!is_cpuset_subset(trial, par))
+       if (!cgroup_on_dfl(cur->css.cgroup) && !is_cpuset_subset(trial, par))
                goto out;
 
        /*
@@ -480,11 +514,11 @@ out:
 #ifdef CONFIG_SMP
 /*
  * Helper routine for generate_sched_domains().
- * Do cpusets a, b have overlapping cpus_allowed masks?
+ * Do cpusets a, b have overlapping effective cpus_allowed masks?
  */
 static int cpusets_overlap(struct cpuset *a, struct cpuset *b)
 {
-       return cpumask_intersects(a->cpus_allowed, b->cpus_allowed);
+       return cpumask_intersects(a->effective_cpus, b->effective_cpus);
 }
 
 static void
@@ -601,7 +635,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
                        *dattr = SD_ATTR_INIT;
                        update_domain_attr_tree(dattr, &top_cpuset);
                }
-               cpumask_copy(doms[0], top_cpuset.cpus_allowed);
+               cpumask_copy(doms[0], top_cpuset.effective_cpus);
 
                goto done;
        }
@@ -705,7 +739,7 @@ restart:
                        struct cpuset *b = csa[j];
 
                        if (apn == b->pn) {
-                               cpumask_or(dp, dp, b->cpus_allowed);
+                               cpumask_or(dp, dp, b->effective_cpus);
                                if (dattr)
                                        update_domain_attr_tree(dattr + nslot, b);
 
@@ -757,7 +791,7 @@ static void rebuild_sched_domains_locked(void)
         * passing doms with offlined cpu to partition_sched_domains().
         * Anyways, hotplug work item will rebuild sched domains.
         */
-       if (!cpumask_equal(top_cpuset.cpus_allowed, cpu_active_mask))
+       if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
                goto out;
 
        /* Generate domain masks and attrs */
@@ -781,45 +815,6 @@ void rebuild_sched_domains(void)
        mutex_unlock(&cpuset_mutex);
 }
 
-/*
- * effective_cpumask_cpuset - return nearest ancestor with non-empty cpus
- * @cs: the cpuset in interest
- *
- * A cpuset's effective cpumask is the cpumask of the nearest ancestor
- * with non-empty cpus. We use effective cpumask whenever:
- * - we update tasks' cpus_allowed. (they take on the ancestor's cpumask
- *   if the cpuset they reside in has no cpus)
- * - we want to retrieve task_cs(tsk)'s cpus_allowed.
- *
- * Called with cpuset_mutex held. cpuset_cpus_allowed_fallback() is an
- * exception. See comments there.
- */
-static struct cpuset *effective_cpumask_cpuset(struct cpuset *cs)
-{
-       while (cpumask_empty(cs->cpus_allowed))
-               cs = parent_cs(cs);
-       return cs;
-}
-
-/*
- * effective_nodemask_cpuset - return nearest ancestor with non-empty mems
- * @cs: the cpuset in interest
- *
- * A cpuset's effective nodemask is the nodemask of the nearest ancestor
- * with non-empty memss. We use effective nodemask whenever:
- * - we update tasks' mems_allowed. (they take on the ancestor's nodemask
- *   if the cpuset they reside in has no mems)
- * - we want to retrieve task_cs(tsk)'s mems_allowed.
- *
- * Called with cpuset_mutex held.
- */
-static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
-{
-       while (nodes_empty(cs->mems_allowed))
-               cs = parent_cs(cs);
-       return cs;
-}
-
 /**
  * update_tasks_cpumask - Update the cpumasks of tasks in the cpuset.
  * @cs: the cpuset in which each task's cpus_allowed mask needs to be changed
@@ -830,53 +825,80 @@ static struct cpuset *effective_nodemask_cpuset(struct cpuset *cs)
  */
 static void update_tasks_cpumask(struct cpuset *cs)
 {
-       struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
        struct css_task_iter it;
        struct task_struct *task;
 
        css_task_iter_start(&cs->css, &it);
        while ((task = css_task_iter_next(&it)))
-               set_cpus_allowed_ptr(task, cpus_cs->cpus_allowed);
+               set_cpus_allowed_ptr(task, cs->effective_cpus);
        css_task_iter_end(&it);
 }
 
 /*
- * update_tasks_cpumask_hier - Update the cpumasks of tasks in the hierarchy.
- * @root_cs: the root cpuset of the hierarchy
- * @update_root: update root cpuset or not?
+ * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree
+ * @cs: the cpuset to consider
+ * @new_cpus: temp variable for calculating new effective_cpus
+ *
+ * When congifured cpumask is changed, the effective cpumasks of this cpuset
+ * and all its descendants need to be updated.
  *
- * This will update cpumasks of tasks in @root_cs and all other empty cpusets
- * which take on cpumask of @root_cs.
+ * On legacy hierachy, effective_cpus will be the same with cpu_allowed.
  *
  * Called with cpuset_mutex held
  */
-static void update_tasks_cpumask_hier(struct cpuset *root_cs, bool update_root)
+static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
 {
        struct cpuset *cp;
        struct cgroup_subsys_state *pos_css;
+       bool need_rebuild_sched_domains = false;
 
        rcu_read_lock();
-       cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
-               if (cp == root_cs) {
-                       if (!update_root)
-                               continue;
-               } else {
-                       /* skip the whole subtree if @cp have some CPU */
-                       if (!cpumask_empty(cp->cpus_allowed)) {
-                               pos_css = css_rightmost_descendant(pos_css);
-                               continue;
-                       }
+       cpuset_for_each_descendant_pre(cp, pos_css, cs) {
+               struct cpuset *parent = parent_cs(cp);
+
+               cpumask_and(new_cpus, cp->cpus_allowed, parent->effective_cpus);
+
+               /*
+                * If it becomes empty, inherit the effective mask of the
+                * parent, which is guaranteed to have some CPUs.
+                */
+               if (cpumask_empty(new_cpus))
+                       cpumask_copy(new_cpus, parent->effective_cpus);
+
+               /* Skip the whole subtree if the cpumask remains the same. */
+               if (cpumask_equal(new_cpus, cp->effective_cpus)) {
+                       pos_css = css_rightmost_descendant(pos_css);
+                       continue;
                }
+
                if (!css_tryget_online(&cp->css))
                        continue;
                rcu_read_unlock();
 
+               mutex_lock(&callback_mutex);
+               cpumask_copy(cp->effective_cpus, new_cpus);
+               mutex_unlock(&callback_mutex);
+
+               WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
+                       !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
+
                update_tasks_cpumask(cp);
 
+               /*
+                * If the effective cpumask of any non-empty cpuset is changed,
+                * we need to rebuild sched domains.
+                */
+               if (!cpumask_empty(cp->cpus_allowed) &&
+                   is_sched_load_balance(cp))
+                       need_rebuild_sched_domains = true;
+
                rcu_read_lock();
                css_put(&cp->css);
        }
        rcu_read_unlock();
+
+       if (need_rebuild_sched_domains)
+               rebuild_sched_domains_locked();
 }
 
 /**
@@ -889,7 +911,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
                          const char *buf)
 {
        int retval;
-       int is_load_balanced;
 
        /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */
        if (cs == &top_cpuset)
@@ -908,7 +929,8 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
                if (retval < 0)
                        return retval;
 
-               if (!cpumask_subset(trialcs->cpus_allowed, cpu_active_mask))
+               if (!cpumask_subset(trialcs->cpus_allowed,
+                                   top_cpuset.cpus_allowed))
                        return -EINVAL;
        }
 
@@ -920,16 +942,12 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
        if (retval < 0)
                return retval;
 
-       is_load_balanced = is_sched_load_balance(trialcs);
-
        mutex_lock(&callback_mutex);
        cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
        mutex_unlock(&callback_mutex);
 
-       update_tasks_cpumask_hier(cs, true);
-
-       if (is_load_balanced)
-               rebuild_sched_domains_locked();
+       /* use trialcs->cpus_allowed as a temp variable */
+       update_cpumasks_hier(cs, trialcs->cpus_allowed);
        return 0;
 }
 
@@ -951,15 +969,13 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
                                                        const nodemask_t *to)
 {
        struct task_struct *tsk = current;
-       struct cpuset *mems_cs;
 
        tsk->mems_allowed = *to;
 
        do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
 
        rcu_read_lock();
-       mems_cs = effective_nodemask_cpuset(task_cs(tsk));
-       guarantee_online_mems(mems_cs, &tsk->mems_allowed);
+       guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
        rcu_read_unlock();
 }
 
@@ -1028,13 +1044,12 @@ static void *cpuset_being_rebound;
 static void update_tasks_nodemask(struct cpuset *cs)
 {
        static nodemask_t newmems;      /* protected by cpuset_mutex */
-       struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
        struct css_task_iter it;
        struct task_struct *task;
 
        cpuset_being_rebound = cs;              /* causes mpol_dup() rebind */
 
-       guarantee_online_mems(mems_cs, &newmems);
+       guarantee_online_mems(cs, &newmems);
 
        /*
         * The mpol_rebind_mm() call takes mmap_sem, which we couldn't
@@ -1077,36 +1092,52 @@ static void update_tasks_nodemask(struct cpuset *cs)
 }
 
 /*
- * update_tasks_nodemask_hier - Update the nodemasks of tasks in the hierarchy.
- * @cs: the root cpuset of the hierarchy
- * @update_root: update the root cpuset or not?
+ * update_nodemasks_hier - Update effective nodemasks and tasks in the subtree
+ * @cs: the cpuset to consider
+ * @new_mems: a temp variable for calculating new effective_mems
  *
- * This will update nodemasks of tasks in @root_cs and all other empty cpusets
- * which take on nodemask of @root_cs.
+ * When configured nodemask is changed, the effective nodemasks of this cpuset
+ * and all its descendants need to be updated.
+ *
+ * On legacy hiearchy, effective_mems will be the same with mems_allowed.
  *
  * Called with cpuset_mutex held
  */
-static void update_tasks_nodemask_hier(struct cpuset *root_cs, bool update_root)
+static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
 {
        struct cpuset *cp;
        struct cgroup_subsys_state *pos_css;
 
        rcu_read_lock();
-       cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
-               if (cp == root_cs) {
-                       if (!update_root)
-                               continue;
-               } else {
-                       /* skip the whole subtree if @cp have some CPU */
-                       if (!nodes_empty(cp->mems_allowed)) {
-                               pos_css = css_rightmost_descendant(pos_css);
-                               continue;
-                       }
+       cpuset_for_each_descendant_pre(cp, pos_css, cs) {
+               struct cpuset *parent = parent_cs(cp);
+
+               nodes_and(*new_mems, cp->mems_allowed, parent->effective_mems);
+
+               /*
+                * If it becomes empty, inherit the effective mask of the
+                * parent, which is guaranteed to have some MEMs.
+                */
+               if (nodes_empty(*new_mems))
+                       *new_mems = parent->effective_mems;
+
+               /* Skip the whole subtree if the nodemask remains the same. */
+               if (nodes_equal(*new_mems, cp->effective_mems)) {
+                       pos_css = css_rightmost_descendant(pos_css);
+                       continue;
                }
+
                if (!css_tryget_online(&cp->css))
                        continue;
                rcu_read_unlock();
 
+               mutex_lock(&callback_mutex);
+               cp->effective_mems = *new_mems;
+               mutex_unlock(&callback_mutex);
+
+               WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
+                       !nodes_equal(cp->mems_allowed, cp->effective_mems));
+
                update_tasks_nodemask(cp);
 
                rcu_read_lock();
@@ -1156,8 +1187,8 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
                        goto done;
 
                if (!nodes_subset(trialcs->mems_allowed,
-                               node_states[N_MEMORY])) {
-                       retval =  -EINVAL;
+                                 top_cpuset.mems_allowed)) {
+                       retval = -EINVAL;
                        goto done;
                }
        }
@@ -1174,14 +1205,21 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
        cs->mems_allowed = trialcs->mems_allowed;
        mutex_unlock(&callback_mutex);
 
-       update_tasks_nodemask_hier(cs, true);
+       /* use trialcs->mems_allowed as a temp variable */
+       update_nodemasks_hier(cs, &cs->mems_allowed);
 done:
        return retval;
 }
 
 int current_cpuset_is_being_rebound(void)
 {
-       return task_cs(current) == cpuset_being_rebound;
+       int ret;
+
+       rcu_read_lock();
+       ret = task_cs(current) == cpuset_being_rebound;
+       rcu_read_unlock();
+
+       return ret;
 }
 
 static int update_relax_domain_level(struct cpuset *cs, s64 val)
@@ -1383,12 +1421,9 @@ static int cpuset_can_attach(struct cgroup_subsys_state *css,
 
        mutex_lock(&cpuset_mutex);
 
-       /*
-        * We allow to move tasks into an empty cpuset if sane_behavior
-        * flag is set.
-        */
+       /* allow moving tasks into an empty cpuset if on default hierarchy */
        ret = -ENOSPC;
-       if (!cgroup_sane_behavior(css->cgroup) &&
+       if (!cgroup_on_dfl(css->cgroup) &&
            (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)))
                goto out_unlock;
 
@@ -1446,8 +1481,6 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
        struct task_struct *leader = cgroup_taskset_first(tset);
        struct cpuset *cs = css_cs(css);
        struct cpuset *oldcs = cpuset_attach_old_cs;
-       struct cpuset *cpus_cs = effective_cpumask_cpuset(cs);
-       struct cpuset *mems_cs = effective_nodemask_cpuset(cs);
 
        mutex_lock(&cpuset_mutex);
 
@@ -1455,9 +1488,9 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
        if (cs == &top_cpuset)
                cpumask_copy(cpus_attach, cpu_possible_mask);
        else
-               guarantee_online_cpus(cpus_cs, cpus_attach);
+               guarantee_online_cpus(cs, cpus_attach);
 
-       guarantee_online_mems(mems_cs, &cpuset_attach_nodemask_to);
+       guarantee_online_mems(cs, &cpuset_attach_nodemask_to);
 
        cgroup_taskset_for_each(task, tset) {
                /*
@@ -1474,11 +1507,9 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
         * Change mm, possibly for multiple threads in a threadgroup. This is
         * expensive and may sleep.
         */
-       cpuset_attach_nodemask_to = cs->mems_allowed;
+       cpuset_attach_nodemask_to = cs->effective_mems;
        mm = get_task_mm(leader);
        if (mm) {
-               struct cpuset *mems_oldcs = effective_nodemask_cpuset(oldcs);
-
                mpol_rebind_mm(mm, &cpuset_attach_nodemask_to);
 
                /*
@@ -1489,7 +1520,7 @@ static void cpuset_attach(struct cgroup_subsys_state *css,
                 * mm from.
                 */
                if (is_memory_migrate(cs)) {
-                       cpuset_migrate_mm(mm, &mems_oldcs->old_mems_allowed,
+                       cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
                                          &cpuset_attach_nodemask_to);
                }
                mmput(mm);
@@ -1510,6 +1541,8 @@ typedef enum {
        FILE_MEMORY_MIGRATE,
        FILE_CPULIST,
        FILE_MEMLIST,
+       FILE_EFFECTIVE_CPULIST,
+       FILE_EFFECTIVE_MEMLIST,
        FILE_CPU_EXCLUSIVE,
        FILE_MEM_EXCLUSIVE,
        FILE_MEM_HARDWALL,
@@ -1617,7 +1650,17 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
         * resources, wait for the previously scheduled operations before
         * proceeding, so that we don't end up keep removing tasks added
         * after execution capability is restored.
+        *
+        * cpuset_hotplug_work calls back into cgroup core via
+        * cgroup_transfer_tasks() and waiting for it from a cgroupfs
+        * operation like this one can lead to a deadlock through kernfs
+        * active_ref protection.  Let's break the protection.  Losing the
+        * protection is okay as we check whether @cs is online after
+        * grabbing cpuset_mutex anyway.  This only happens on the legacy
+        * hierarchies.
         */
+       css_get(&cs->css);
+       kernfs_break_active_protection(of->kn);
        flush_work(&cpuset_hotplug_work);
 
        mutex_lock(&cpuset_mutex);
@@ -1645,6 +1688,8 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
        free_trial_cpuset(trialcs);
 out_unlock:
        mutex_unlock(&cpuset_mutex);
+       kernfs_unbreak_active_protection(of->kn);
+       css_put(&cs->css);
        return retval ?: nbytes;
 }
 
@@ -1676,6 +1721,12 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
        case FILE_MEMLIST:
                s += nodelist_scnprintf(s, count, cs->mems_allowed);
                break;
+       case FILE_EFFECTIVE_CPULIST:
+               s += cpulist_scnprintf(s, count, cs->effective_cpus);
+               break;
+       case FILE_EFFECTIVE_MEMLIST:
+               s += nodelist_scnprintf(s, count, cs->effective_mems);
+               break;
        default:
                ret = -EINVAL;
                goto out_unlock;
@@ -1760,6 +1811,18 @@ static struct cftype files[] = {
                .private = FILE_MEMLIST,
        },
 
+       {
+               .name = "effective_cpus",
+               .seq_show = cpuset_common_seq_show,
+               .private = FILE_EFFECTIVE_CPULIST,
+       },
+
+       {
+               .name = "effective_mems",
+               .seq_show = cpuset_common_seq_show,
+               .private = FILE_EFFECTIVE_MEMLIST,
+       },
+
        {
                .name = "cpu_exclusive",
                .read_u64 = cpuset_read_u64,
@@ -1851,18 +1914,26 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
        cs = kzalloc(sizeof(*cs), GFP_KERNEL);
        if (!cs)
                return ERR_PTR(-ENOMEM);
-       if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL)) {
-               kfree(cs);
-               return ERR_PTR(-ENOMEM);
-       }
+       if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
+               goto free_cs;
+       if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
+               goto free_cpus;
 
        set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
        cpumask_clear(cs->cpus_allowed);
        nodes_clear(cs->mems_allowed);
+       cpumask_clear(cs->effective_cpus);
+       nodes_clear(cs->effective_mems);
        fmeter_init(&cs->fmeter);
        cs->relax_domain_level = -1;
 
        return &cs->css;
+
+free_cpus:
+       free_cpumask_var(cs->cpus_allowed);
+free_cs:
+       kfree(cs);
+       return ERR_PTR(-ENOMEM);
 }
 
 static int cpuset_css_online(struct cgroup_subsys_state *css)
@@ -1885,6 +1956,13 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
 
        cpuset_inc();
 
+       mutex_lock(&callback_mutex);
+       if (cgroup_on_dfl(cs->css.cgroup)) {
+               cpumask_copy(cs->effective_cpus, parent->effective_cpus);
+               cs->effective_mems = parent->effective_mems;
+       }
+       mutex_unlock(&callback_mutex);
+
        if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
                goto out_unlock;
 
@@ -1944,20 +2022,40 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
 {
        struct cpuset *cs = css_cs(css);
 
+       free_cpumask_var(cs->effective_cpus);
        free_cpumask_var(cs->cpus_allowed);
        kfree(cs);
 }
 
+static void cpuset_bind(struct cgroup_subsys_state *root_css)
+{
+       mutex_lock(&cpuset_mutex);
+       mutex_lock(&callback_mutex);
+
+       if (cgroup_on_dfl(root_css->cgroup)) {
+               cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
+               top_cpuset.mems_allowed = node_possible_map;
+       } else {
+               cpumask_copy(top_cpuset.cpus_allowed,
+                            top_cpuset.effective_cpus);
+               top_cpuset.mems_allowed = top_cpuset.effective_mems;
+       }
+
+       mutex_unlock(&callback_mutex);
+       mutex_unlock(&cpuset_mutex);
+}
+
 struct cgroup_subsys cpuset_cgrp_subsys = {
-       .css_alloc = cpuset_css_alloc,
-       .css_online = cpuset_css_online,
-       .css_offline = cpuset_css_offline,
-       .css_free = cpuset_css_free,
-       .can_attach = cpuset_can_attach,
-       .cancel_attach = cpuset_cancel_attach,
-       .attach = cpuset_attach,
-       .base_cftypes = files,
-       .early_init = 1,
+       .css_alloc      = cpuset_css_alloc,
+       .css_online     = cpuset_css_online,
+       .css_offline    = cpuset_css_offline,
+       .css_free       = cpuset_css_free,
+       .can_attach     = cpuset_can_attach,
+       .cancel_attach  = cpuset_cancel_attach,
+       .attach         = cpuset_attach,
+       .bind           = cpuset_bind,
+       .legacy_cftypes = files,
+       .early_init     = 1,
 };
 
 /**
@@ -1972,9 +2070,13 @@ int __init cpuset_init(void)
 
        if (!alloc_cpumask_var(&top_cpuset.cpus_allowed, GFP_KERNEL))
                BUG();
+       if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
+               BUG();
 
        cpumask_setall(top_cpuset.cpus_allowed);
        nodes_setall(top_cpuset.mems_allowed);
+       cpumask_setall(top_cpuset.effective_cpus);
+       nodes_setall(top_cpuset.effective_mems);
 
        fmeter_init(&top_cpuset.fmeter);
        set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags);
@@ -2017,6 +2119,66 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
        }
 }
 
+static void
+hotplug_update_tasks_legacy(struct cpuset *cs,
+                           struct cpumask *new_cpus, nodemask_t *new_mems,
+                           bool cpus_updated, bool mems_updated)
+{
+       bool is_empty;
+
+       mutex_lock(&callback_mutex);
+       cpumask_copy(cs->cpus_allowed, new_cpus);
+       cpumask_copy(cs->effective_cpus, new_cpus);
+       cs->mems_allowed = *new_mems;
+       cs->effective_mems = *new_mems;
+       mutex_unlock(&callback_mutex);
+
+       /*
+        * Don't call update_tasks_cpumask() if the cpuset becomes empty,
+        * as the tasks will be migratecd to an ancestor.
+        */
+       if (cpus_updated && !cpumask_empty(cs->cpus_allowed))
+               update_tasks_cpumask(cs);
+       if (mems_updated && !nodes_empty(cs->mems_allowed))
+               update_tasks_nodemask(cs);
+
+       is_empty = cpumask_empty(cs->cpus_allowed) ||
+                  nodes_empty(cs->mems_allowed);
+
+       mutex_unlock(&cpuset_mutex);
+
+       /*
+        * Move tasks to the nearest ancestor with execution resources,
+        * This is full cgroup operation which will also call back into
+        * cpuset. Should be done outside any lock.
+        */
+       if (is_empty)
+               remove_tasks_in_empty_cpuset(cs);
+
+       mutex_lock(&cpuset_mutex);
+}
+
+static void
+hotplug_update_tasks(struct cpuset *cs,
+                    struct cpumask *new_cpus, nodemask_t *new_mems,
+                    bool cpus_updated, bool mems_updated)
+{
+       if (cpumask_empty(new_cpus))
+               cpumask_copy(new_cpus, parent_cs(cs)->effective_cpus);
+       if (nodes_empty(*new_mems))
+               *new_mems = parent_cs(cs)->effective_mems;
+
+       mutex_lock(&callback_mutex);
+       cpumask_copy(cs->effective_cpus, new_cpus);
+       cs->effective_mems = *new_mems;
+       mutex_unlock(&callback_mutex);
+
+       if (cpus_updated)
+               update_tasks_cpumask(cs);
+       if (mems_updated)
+               update_tasks_nodemask(cs);
+}
+
 /**
  * cpuset_hotplug_update_tasks - update tasks in a cpuset for hotunplug
  * @cs: cpuset in interest
@@ -2027,11 +2189,10 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
  */
 static void cpuset_hotplug_update_tasks(struct cpuset *cs)
 {
-       static cpumask_t off_cpus;
-       static nodemask_t off_mems;
-       bool is_empty;
-       bool sane = cgroup_sane_behavior(cs->css.cgroup);
-
+       static cpumask_t new_cpus;
+       static nodemask_t new_mems;
+       bool cpus_updated;
+       bool mems_updated;
 retry:
        wait_event(cpuset_attach_wq, cs->attach_in_progress == 0);
 
@@ -2046,51 +2207,20 @@ retry:
                goto retry;
        }
 
-       cpumask_andnot(&off_cpus, cs->cpus_allowed, top_cpuset.cpus_allowed);
-       nodes_andnot(off_mems, cs->mems_allowed, top_cpuset.mems_allowed);
+       cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus);
+       nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
 
-       mutex_lock(&callback_mutex);
-       cpumask_andnot(cs->cpus_allowed, cs->cpus_allowed, &off_cpus);
-       mutex_unlock(&callback_mutex);
-
-       /*
-        * If sane_behavior flag is set, we need to update tasks' cpumask
-        * for empty cpuset to take on ancestor's cpumask. Otherwise, don't
-        * call update_tasks_cpumask() if the cpuset becomes empty, as
-        * the tasks in it will be migrated to an ancestor.
-        */
-       if ((sane && cpumask_empty(cs->cpus_allowed)) ||
-           (!cpumask_empty(&off_cpus) && !cpumask_empty(cs->cpus_allowed)))
-               update_tasks_cpumask(cs);
-
-       mutex_lock(&callback_mutex);
-       nodes_andnot(cs->mems_allowed, cs->mems_allowed, off_mems);
-       mutex_unlock(&callback_mutex);
-
-       /*
-        * If sane_behavior flag is set, we need to update tasks' nodemask
-        * for empty cpuset to take on ancestor's nodemask. Otherwise, don't
-        * call update_tasks_nodemask() if the cpuset becomes empty, as
-        * the tasks in it will be migratd to an ancestor.
-        */
-       if ((sane && nodes_empty(cs->mems_allowed)) ||
-           (!nodes_empty(off_mems) && !nodes_empty(cs->mems_allowed)))
-               update_tasks_nodemask(cs);
+       cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
+       mems_updated = !nodes_equal(new_mems, cs->effective_mems);
 
-       is_empty = cpumask_empty(cs->cpus_allowed) ||
-               nodes_empty(cs->mems_allowed);
+       if (cgroup_on_dfl(cs->css.cgroup))
+               hotplug_update_tasks(cs, &new_cpus, &new_mems,
+                                    cpus_updated, mems_updated);
+       else
+               hotplug_update_tasks_legacy(cs, &new_cpus, &new_mems,
+                                           cpus_updated, mems_updated);
 
        mutex_unlock(&cpuset_mutex);
-
-       /*
-        * If sane_behavior flag is set, we'll keep tasks in empty cpusets.
-        *
-        * Otherwise move tasks to the nearest ancestor with execution
-        * resources.  This is full cgroup operation which will
-        * also call back into cpuset.  Should be done outside any lock.
-        */
-       if (!sane && is_empty)
-               remove_tasks_in_empty_cpuset(cs);
 }
 
 /**
@@ -2114,6 +2244,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
        static cpumask_t new_cpus;
        static nodemask_t new_mems;
        bool cpus_updated, mems_updated;
+       bool on_dfl = cgroup_on_dfl(top_cpuset.css.cgroup);
 
        mutex_lock(&cpuset_mutex);
 
@@ -2121,13 +2252,15 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
        cpumask_copy(&new_cpus, cpu_active_mask);
        new_mems = node_states[N_MEMORY];
 
-       cpus_updated = !cpumask_equal(top_cpuset.cpus_allowed, &new_cpus);
-       mems_updated = !nodes_equal(top_cpuset.mems_allowed, new_mems);
+       cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
+       mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);
 
        /* synchronize cpus_allowed to cpu_active_mask */
        if (cpus_updated) {
                mutex_lock(&callback_mutex);
-               cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
+               if (!on_dfl)
+                       cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
+               cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
                mutex_unlock(&callback_mutex);
                /* we don't mess with cpumasks of tasks in top_cpuset */
        }
@@ -2135,7 +2268,9 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
        /* synchronize mems_allowed to N_MEMORY */
        if (mems_updated) {
                mutex_lock(&callback_mutex);
-               top_cpuset.mems_allowed = new_mems;
+               if (!on_dfl)
+                       top_cpuset.mems_allowed = new_mems;
+               top_cpuset.effective_mems = new_mems;
                mutex_unlock(&callback_mutex);
                update_tasks_nodemask(&top_cpuset);
        }
@@ -2210,6 +2345,9 @@ void __init cpuset_init_smp(void)
        top_cpuset.mems_allowed = node_states[N_MEMORY];
        top_cpuset.old_mems_allowed = top_cpuset.mems_allowed;
 
+       cpumask_copy(top_cpuset.effective_cpus, cpu_active_mask);
+       top_cpuset.effective_mems = node_states[N_MEMORY];
+
        register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
 }
 
@@ -2226,23 +2364,17 @@ void __init cpuset_init_smp(void)
 
 void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
 {
-       struct cpuset *cpus_cs;
-
        mutex_lock(&callback_mutex);
        rcu_read_lock();
-       cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
-       guarantee_online_cpus(cpus_cs, pmask);
+       guarantee_online_cpus(task_cs(tsk), pmask);
        rcu_read_unlock();
        mutex_unlock(&callback_mutex);
 }
 
 void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
 {
-       struct cpuset *cpus_cs;
-
        rcu_read_lock();
-       cpus_cs = effective_cpumask_cpuset(task_cs(tsk));
-       do_set_cpus_allowed(tsk, cpus_cs->cpus_allowed);
+       do_set_cpus_allowed(tsk, task_cs(tsk)->effective_cpus);
        rcu_read_unlock();
 
        /*
@@ -2281,13 +2413,11 @@ void cpuset_init_current_mems_allowed(void)
 
 nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
 {
-       struct cpuset *mems_cs;
        nodemask_t mask;
 
        mutex_lock(&callback_mutex);
        rcu_read_lock();
-       mems_cs = effective_nodemask_cpuset(task_cs(tsk));
-       guarantee_online_mems(mems_cs, &mask);
+       guarantee_online_mems(task_cs(tsk), &mask);
        rcu_read_unlock();
        mutex_unlock(&callback_mutex);
 
index 5fa58e4..1cf24b3 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/mm_types.h>
 #include <linux/cgroup.h>
 #include <linux/module.h>
+#include <linux/mman.h>
 
 #include "internal.h"
 
@@ -2319,7 +2320,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
        next_parent = rcu_dereference(next_ctx->parent_ctx);
 
        /* If neither context have a parent context; they cannot be clones. */
-       if (!parent && !next_parent)
+       if (!parent || !next_parent)
                goto unlock;
 
        if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
@@ -5128,6 +5129,7 @@ struct perf_mmap_event {
        int                     maj, min;
        u64                     ino;
        u64                     ino_generation;
+       u32                     prot, flags;
 
        struct {
                struct perf_event_header        header;
@@ -5169,6 +5171,8 @@ static void perf_event_mmap_output(struct perf_event *event,
                mmap_event->event_id.header.size += sizeof(mmap_event->min);
                mmap_event->event_id.header.size += sizeof(mmap_event->ino);
                mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
+               mmap_event->event_id.header.size += sizeof(mmap_event->prot);
+               mmap_event->event_id.header.size += sizeof(mmap_event->flags);
        }
 
        perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
@@ -5187,6 +5191,8 @@ static void perf_event_mmap_output(struct perf_event *event,
                perf_output_put(&handle, mmap_event->min);
                perf_output_put(&handle, mmap_event->ino);
                perf_output_put(&handle, mmap_event->ino_generation);
+               perf_output_put(&handle, mmap_event->prot);
+               perf_output_put(&handle, mmap_event->flags);
        }
 
        __output_copy(&handle, mmap_event->file_name,
@@ -5205,6 +5211,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
        struct file *file = vma->vm_file;
        int maj = 0, min = 0;
        u64 ino = 0, gen = 0;
+       u32 prot = 0, flags = 0;
        unsigned int size;
        char tmp[16];
        char *buf = NULL;
@@ -5235,8 +5242,36 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
                gen = inode->i_generation;
                maj = MAJOR(dev);
                min = MINOR(dev);
+
+               if (vma->vm_flags & VM_READ)
+                       prot |= PROT_READ;
+               if (vma->vm_flags & VM_WRITE)
+                       prot |= PROT_WRITE;
+               if (vma->vm_flags & VM_EXEC)
+                       prot |= PROT_EXEC;
+
+               if (vma->vm_flags & VM_MAYSHARE)
+                       flags = MAP_SHARED;
+               else
+                       flags = MAP_PRIVATE;
+
+               if (vma->vm_flags & VM_DENYWRITE)
+                       flags |= MAP_DENYWRITE;
+               if (vma->vm_flags & VM_MAYEXEC)
+                       flags |= MAP_EXECUTABLE;
+               if (vma->vm_flags & VM_LOCKED)
+                       flags |= MAP_LOCKED;
+               if (vma->vm_flags & VM_HUGETLB)
+                       flags |= MAP_HUGETLB;
+
                goto got_name;
        } else {
+               if (vma->vm_ops && vma->vm_ops->name) {
+                       name = (char *) vma->vm_ops->name(vma);
+                       if (name)
+                               goto cpy_name;
+               }
+
                name = (char *)arch_vma_name(vma);
                if (name)
                        goto cpy_name;
@@ -5275,6 +5310,8 @@ got_name:
        mmap_event->min = min;
        mmap_event->ino = ino;
        mmap_event->ino_generation = gen;
+       mmap_event->prot = prot;
+       mmap_event->flags = flags;
 
        if (!(vma->vm_flags & VM_EXEC))
                mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
@@ -5315,6 +5352,8 @@ void perf_event_mmap(struct vm_area_struct *vma)
                /* .min (attr_mmap2 only) */
                /* .ino (attr_mmap2 only) */
                /* .ino_generation (attr_mmap2 only) */
+               /* .prot (attr_mmap2 only) */
+               /* .flags (attr_mmap2 only) */
        };
 
        perf_event_mmap_event(&mmap_event);
@@ -6897,10 +6936,6 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
        if (ret)
                return -EFAULT;
 
-       /* disabled for now */
-       if (attr->mmap2)
-               return -EINVAL;
-
        if (attr->__reserved_1)
                return -EINVAL;
 
@@ -7429,7 +7464,19 @@ __perf_event_exit_task(struct perf_event *child_event,
                         struct perf_event_context *child_ctx,
                         struct task_struct *child)
 {
-       perf_remove_from_context(child_event, true);
+       /*
+        * Do not destroy the 'original' grouping; because of the context
+        * switch optimization the original events could've ended up in a
+        * random child task.
+        *
+        * If we were to destroy the original group, all group related
+        * operations would cease to function properly after this random
+        * child dies.
+        *
+        * Do destroy all inherited groups, we don't care about those
+        * and being thorough is better.
+        */
+       perf_remove_from_context(child_event, !!child_event->parent);
 
        /*
         * It can happen that the parent exits first, and has events
@@ -7445,7 +7492,7 @@ __perf_event_exit_task(struct perf_event *child_event,
 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
 {
        struct perf_event *child_event, *next;
-       struct perf_event_context *child_ctx;
+       struct perf_event_context *child_ctx, *parent_ctx;
        unsigned long flags;
 
        if (likely(!child->perf_event_ctxp[ctxn])) {
@@ -7470,6 +7517,15 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
        raw_spin_lock(&child_ctx->lock);
        task_ctx_sched_out(child_ctx);
        child->perf_event_ctxp[ctxn] = NULL;
+
+       /*
+        * In order to avoid freeing: child_ctx->parent_ctx->task
+        * under perf_event_context::lock, grab another reference.
+        */
+       parent_ctx = child_ctx->parent_ctx;
+       if (parent_ctx)
+               get_ctx(parent_ctx);
+
        /*
         * If this context is a clone; unclone it so it can't get
         * swapped to another process while we're removing all
@@ -7479,6 +7535,13 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
        update_context_time(child_ctx);
        raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
 
+       /*
+        * Now that we no longer hold perf_event_context::lock, drop
+        * our extra child_ctx->parent_ctx reference.
+        */
+       if (parent_ctx)
+               put_ctx(parent_ctx);
+
        /*
         * Report the task dead after unscheduling the events so that we
         * won't get any samples after PERF_RECORD_EXIT. We can however still
@@ -7747,7 +7810,7 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
 /*
  * Initialize the perf_event context in task_struct
  */
-int perf_event_init_context(struct task_struct *child, int ctxn)
+static int perf_event_init_context(struct task_struct *child, int ctxn)
 {
        struct perf_event_context *child_ctx, *parent_ctx;
        struct perf_event_context *cloned_ctx;
index c445e39..6f3254e 100644 (file)
@@ -846,7 +846,7 @@ static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *u
 {
        int err;
 
-       if (!consumer_del(uprobe, uc))  /* WARN? */
+       if (WARN_ON(!consumer_del(uprobe, uc)))
                return;
 
        err = register_for_each_vma(uprobe, NULL);
@@ -927,7 +927,7 @@ int uprobe_apply(struct inode *inode, loff_t offset,
        int ret = -ENOENT;
 
        uprobe = find_uprobe(inode, offset);
-       if (!uprobe)
+       if (WARN_ON(!uprobe))
                return ret;
 
        down_write(&uprobe->register_rwsem);
@@ -952,7 +952,7 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume
        struct uprobe *uprobe;
 
        uprobe = find_uprobe(inode, offset);
-       if (!uprobe)
+       if (WARN_ON(!uprobe))
                return;
 
        down_write(&uprobe->register_rwsem);
index d2799d1..962885e 100644 (file)
@@ -1095,7 +1095,6 @@ static void rt_mutex_init_task(struct task_struct *p)
        p->pi_waiters = RB_ROOT;
        p->pi_waiters_leftmost = NULL;
        p->pi_blocked_on = NULL;
-       p->pi_top_task = NULL;
 #endif
 }
 
@@ -1487,7 +1486,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        total_forks++;
        spin_unlock(&current->sighand->siglock);
+       syscall_tracepoint_update(p);
        write_unlock_irq(&tasklist_lock);
+
        proc_fork_connector(p);
        cgroup_post_fork(p);
        if (clone_flags & CLONE_THREAD)
index b632b5f..d3a9d94 100644 (file)
@@ -792,93 +792,90 @@ void exit_pi_state_list(struct task_struct *curr)
  * [10] There is no transient state which leaves owner and user space
  *     TID out of sync.
  */
-static int
-lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
-               union futex_key *key, struct futex_pi_state **ps)
+
+/*
+ * Validate that the existing waiter has a pi_state and sanity check
+ * the pi_state against the user space value. If correct, attach to
+ * it.
+ */
+static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state,
+                             struct futex_pi_state **ps)
 {
-       struct futex_pi_state *pi_state = NULL;
-       struct futex_q *this, *next;
-       struct task_struct *p;
        pid_t pid = uval & FUTEX_TID_MASK;
 
-       plist_for_each_entry_safe(this, next, &hb->chain, list) {
-               if (match_futex(&this->key, key)) {
-                       /*
-                        * Sanity check the waiter before increasing
-                        * the refcount and attaching to it.
-                        */
-                       pi_state = this->pi_state;
-                       /*
-                        * Userspace might have messed up non-PI and
-                        * PI futexes [3]
-                        */
-                       if (unlikely(!pi_state))
-                               return -EINVAL;
+       /*
+        * Userspace might have messed up non-PI and PI futexes [3]
+        */
+       if (unlikely(!pi_state))
+               return -EINVAL;
 
-                       WARN_ON(!atomic_read(&pi_state->refcount));
+       WARN_ON(!atomic_read(&pi_state->refcount));
 
+       /*
+        * Handle the owner died case:
+        */
+       if (uval & FUTEX_OWNER_DIED) {
+               /*
+                * exit_pi_state_list sets owner to NULL and wakes the
+                * topmost waiter. The task which acquires the
+                * pi_state->rt_mutex will fixup owner.
+                */
+               if (!pi_state->owner) {
                        /*
-                        * Handle the owner died case:
+                        * No pi state owner, but the user space TID
+                        * is not 0. Inconsistent state. [5]
                         */
-                       if (uval & FUTEX_OWNER_DIED) {
-                               /*
-                                * exit_pi_state_list sets owner to NULL and
-                                * wakes the topmost waiter. The task which
-                                * acquires the pi_state->rt_mutex will fixup
-                                * owner.
-                                */
-                               if (!pi_state->owner) {
-                                       /*
-                                        * No pi state owner, but the user
-                                        * space TID is not 0. Inconsistent
-                                        * state. [5]
-                                        */
-                                       if (pid)
-                                               return -EINVAL;
-                                       /*
-                                        * Take a ref on the state and
-                                        * return. [4]
-                                        */
-                                       goto out_state;
-                               }
-
-                               /*
-                                * If TID is 0, then either the dying owner
-                                * has not yet executed exit_pi_state_list()
-                                * or some waiter acquired the rtmutex in the
-                                * pi state, but did not yet fixup the TID in
-                                * user space.
-                                *
-                                * Take a ref on the state and return. [6]
-                                */
-                               if (!pid)
-                                       goto out_state;
-                       } else {
-                               /*
-                                * If the owner died bit is not set,
-                                * then the pi_state must have an
-                                * owner. [7]
-                                */
-                               if (!pi_state->owner)
-                                       return -EINVAL;
-                       }
-
+                       if (pid)
+                               return -EINVAL;
                        /*
-                        * Bail out if user space manipulated the
-                        * futex value. If pi state exists then the
-                        * owner TID must be the same as the user
-                        * space TID. [9/10]
+                        * Take a ref on the state and return success. [4]
                         */
-                       if (pid != task_pid_vnr(pi_state->owner))
-                               return -EINVAL;
-
-               out_state:
-                       atomic_inc(&pi_state->refcount);
-                       *ps = pi_state;
-                       return 0;
+                       goto out_state;
                }
+
+               /*
+                * If TID is 0, then either the dying owner has not
+                * yet executed exit_pi_state_list() or some waiter
+                * acquired the rtmutex in the pi state, but did not
+                * yet fixup the TID in user space.
+                *
+                * Take a ref on the state and return success. [6]
+                */
+               if (!pid)
+                       goto out_state;
+       } else {
+               /*
+                * If the owner died bit is not set, then the pi_state
+                * must have an owner. [7]
+                */
+               if (!pi_state->owner)
+                       return -EINVAL;
        }
 
+       /*
+        * Bail out if user space manipulated the futex value. If pi
+        * state exists then the owner TID must be the same as the
+        * user space TID. [9/10]
+        */
+       if (pid != task_pid_vnr(pi_state->owner))
+               return -EINVAL;
+out_state:
+       atomic_inc(&pi_state->refcount);
+       *ps = pi_state;
+       return 0;
+}
+
+/*
+ * Lookup the task for the TID provided from user space and attach to
+ * it after doing proper sanity checks.
+ */
+static int attach_to_pi_owner(u32 uval, union futex_key *key,
+                             struct futex_pi_state **ps)
+{
+       pid_t pid = uval & FUTEX_TID_MASK;
+       struct futex_pi_state *pi_state;
+       struct task_struct *p;
+
        /*
         * We are the first waiter - try to look up the real owner and attach
         * the new pi_state to it, but bail out when TID = 0 [1]
@@ -920,7 +917,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
        pi_state = alloc_pi_state();
 
        /*
-        * Initialize the pi_mutex in locked state and make 'p'
+        * Initialize the pi_mutex in locked state and make @p
         * the owner of it:
         */
        rt_mutex_init_proxy_locked(&pi_state->pi_mutex, p);
@@ -940,6 +937,36 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
        return 0;
 }
 
+static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
+                          union futex_key *key, struct futex_pi_state **ps)
+{
+       struct futex_q *match = futex_top_waiter(hb, key);
+
+       /*
+        * If there is a waiter on that futex, validate it and
+        * attach to the pi_state when the validation succeeds.
+        */
+       if (match)
+               return attach_to_pi_state(uval, match->pi_state, ps);
+
+       /*
+        * We are the first waiter - try to look up the owner based on
+        * @uval and attach to it.
+        */
+       return attach_to_pi_owner(uval, key, ps);
+}
+
+static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval)
+{
+       u32 uninitialized_var(curval);
+
+       if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
+               return -EFAULT;
+
+       /*If user space value changed, let the caller retry */
+       return curval != uval ? -EAGAIN : 0;
+}
+
 /**
  * futex_lock_pi_atomic() - Atomic work required to acquire a pi aware futex
  * @uaddr:             the pi futex user address
@@ -963,113 +990,69 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
                                struct futex_pi_state **ps,
                                struct task_struct *task, int set_waiters)
 {
-       int lock_taken, ret, force_take = 0;
-       u32 uval, newval, curval, vpid = task_pid_vnr(task);
-
-retry:
-       ret = lock_taken = 0;
+       u32 uval, newval, vpid = task_pid_vnr(task);
+       struct futex_q *match;
+       int ret;
 
        /*
-        * To avoid races, we attempt to take the lock here again
-        * (by doing a 0 -> TID atomic cmpxchg), while holding all
-        * the locks. It will most likely not succeed.
+        * Read the user space value first so we can validate a few
+        * things before proceeding further.
         */
-       newval = vpid;
-       if (set_waiters)
-               newval |= FUTEX_WAITERS;
-
-       if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval)))
+       if (get_futex_value_locked(&uval, uaddr))
                return -EFAULT;
 
        /*
         * Detect deadlocks.
         */
-       if ((unlikely((curval & FUTEX_TID_MASK) == vpid)))
+       if ((unlikely((uval & FUTEX_TID_MASK) == vpid)))
                return -EDEADLK;
 
        /*
-        * Surprise - we got the lock, but we do not trust user space at all.
-        */
-       if (unlikely(!curval)) {
-               /*
-                * We verify whether there is kernel state for this
-                * futex. If not, we can safely assume, that the 0 ->
-                * TID transition is correct. If state exists, we do
-                * not bother to fixup the user space state as it was
-                * corrupted already.
-                */
-               return futex_top_waiter(hb, key) ? -EINVAL : 1;
-       }
-
-       uval = curval;
-
-       /*
-        * Set the FUTEX_WAITERS flag, so the owner will know it has someone
-        * to wake at the next unlock.
+        * Lookup existing state first. If it exists, try to attach to
+        * its pi_state.
         */
-       newval = curval | FUTEX_WAITERS;
+       match = futex_top_waiter(hb, key);
+       if (match)
+               return attach_to_pi_state(uval, match->pi_state, ps);
 
        /*
-        * Should we force take the futex? See below.
+        * No waiter and user TID is 0. We are here because the
+        * waiters or the owner died bit is set or called from
+        * requeue_cmp_pi or for whatever reason something took the
+        * syscall.
         */
-       if (unlikely(force_take)) {
+       if (!(uval & FUTEX_TID_MASK)) {
                /*
-                * Keep the OWNER_DIED and the WAITERS bit and set the
-                * new TID value.
+                * We take over the futex. No other waiters and the user space
+                * TID is 0. We preserve the owner died bit.
                 */
-               newval = (curval & ~FUTEX_TID_MASK) | vpid;
-               force_take = 0;
-               lock_taken = 1;
-       }
+               newval = uval & FUTEX_OWNER_DIED;
+               newval |= vpid;
 
-       if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
-               return -EFAULT;
-       if (unlikely(curval != uval))
-               goto retry;
+               /* The futex requeue_pi code can enforce the waiters bit */
+               if (set_waiters)
+                       newval |= FUTEX_WAITERS;
+
+               ret = lock_pi_update_atomic(uaddr, uval, newval);
+               /* If the take over worked, return 1 */
+               return ret < 0 ? ret : 1;
+       }
 
        /*
-        * We took the lock due to forced take over.
+        * First waiter. Set the waiters bit before attaching ourself to
+        * the owner. If owner tries to unlock, it will be forced into
+        * the kernel and blocked on hb->lock.
         */
-       if (unlikely(lock_taken))
-               return 1;
-
+       newval = uval | FUTEX_WAITERS;
+       ret = lock_pi_update_atomic(uaddr, uval, newval);
+       if (ret)
+               return ret;
        /*
-        * We dont have the lock. Look up the PI state (or create it if
-        * we are the first waiter):
+        * If the update of the user space value succeeded, we try to
+        * attach to the owner. If that fails, no harm done, we only
+        * set the FUTEX_WAITERS bit in the user space variable.
         */
-       ret = lookup_pi_state(uval, hb, key, ps);
-
-       if (unlikely(ret)) {
-               switch (ret) {
-               case -ESRCH:
-                       /*
-                        * We failed to find an owner for this
-                        * futex. So we have no pi_state to block
-                        * on. This can happen in two cases:
-                        *
-                        * 1) The owner died
-                        * 2) A stale FUTEX_WAITERS bit
-                        *
-                        * Re-read the futex value.
-                        */
-                       if (get_futex_value_locked(&curval, uaddr))
-                               return -EFAULT;
-
-                       /*
-                        * If the owner died or we have a stale
-                        * WAITERS bit the owner TID in the user space
-                        * futex is 0.
-                        */
-                       if (!(curval & FUTEX_TID_MASK)) {
-                               force_take = 1;
-                               goto retry;
-                       }
-               default:
-                       break;
-               }
-       }
-
-       return ret;
+       return attach_to_pi_owner(uval, key, ps);
 }
 
 /**
@@ -1186,22 +1169,6 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
        return 0;
 }
 
-static int unlock_futex_pi(u32 __user *uaddr, u32 uval)
-{
-       u32 uninitialized_var(oldval);
-
-       /*
-        * There is no waiter, so we unlock the futex. The owner died
-        * bit has not to be preserved here. We are the owner:
-        */
-       if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0))
-               return -EFAULT;
-       if (oldval != uval)
-               return -EAGAIN;
-
-       return 0;
-}
-
 /*
  * Express the locking dependencies for lockdep:
  */
@@ -1659,7 +1626,12 @@ retry_private:
                                goto retry;
                        goto out;
                case -EAGAIN:
-                       /* The owner was exiting, try again. */
+                       /*
+                        * Two reasons for this:
+                        * - Owner is exiting and we just wait for the
+                        *   exit to complete.
+                        * - The user space value changed.
+                        */
                        double_unlock_hb(hb1, hb2);
                        hb_waiters_dec(hb2);
                        put_futex_key(&key2);
@@ -1718,7 +1690,7 @@ retry_private:
                        this->pi_state = pi_state;
                        ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex,
                                                        this->rt_waiter,
-                                                       this->task, 1);
+                                                       this->task);
                        if (ret == 1) {
                                /* We got the lock. */
                                requeue_pi_wake_futex(this, &key2, hb2);
@@ -2316,8 +2288,10 @@ retry_private:
                        goto uaddr_faulted;
                case -EAGAIN:
                        /*
-                        * Task is exiting and we just wait for the
-                        * exit to complete.
+                        * Two reasons for this:
+                        * - Task is exiting and we just wait for the
+                        *   exit to complete.
+                        * - The user space value changed.
                         */
                        queue_unlock(hb);
                        put_futex_key(&q.key);
@@ -2337,9 +2311,9 @@ retry_private:
        /*
         * Block on the PI mutex:
         */
-       if (!trylock)
-               ret = rt_mutex_timed_lock(&q.pi_state->pi_mutex, to, 1);
-       else {
+       if (!trylock) {
+               ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
+       else {
                ret = rt_mutex_trylock(&q.pi_state->pi_mutex);
                /* Fixup the trylock return value: */
                ret = ret ? 0 : -EWOULDBLOCK;
@@ -2401,10 +2375,10 @@ uaddr_faulted:
  */
 static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
 {
-       struct futex_hash_bucket *hb;
-       struct futex_q *this, *next;
+       u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
        union futex_key key = FUTEX_KEY_INIT;
-       u32 uval, vpid = task_pid_vnr(current);
+       struct futex_hash_bucket *hb;
+       struct futex_q *match;
        int ret;
 
 retry:
@@ -2417,57 +2391,47 @@ retry:
                return -EPERM;
 
        ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key, VERIFY_WRITE);
-       if (unlikely(ret != 0))
-               goto out;
+       if (ret)
+               return ret;
 
        hb = hash_futex(&key);
        spin_lock(&hb->lock);
 
        /*
-        * To avoid races, try to do the TID -> 0 atomic transition
-        * again. If it succeeds then we can return without waking
-        * anyone else up. We only try this if neither the waiters nor
-        * the owner died bit are set.
-        */
-       if (!(uval & ~FUTEX_TID_MASK) &&
-           cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0))
-               goto pi_faulted;
-       /*
-        * Rare case: we managed to release the lock atomically,
-        * no need to wake anyone else up:
-        */
-       if (unlikely(uval == vpid))
-               goto out_unlock;
-
-       /*
-        * Ok, other tasks may need to be woken up - check waiters
-        * and do the wakeup if necessary:
+        * Check waiters first. We do not trust user space values at
+        * all and we at least want to know if user space fiddled
+        * with the futex value instead of blindly unlocking.
         */
-       plist_for_each_entry_safe(this, next, &hb->chain, list) {
-               if (!match_futex (&this->key, &key))
-                       continue;
-               ret = wake_futex_pi(uaddr, uval, this);
+       match = futex_top_waiter(hb, &key);
+       if (match) {
+               ret = wake_futex_pi(uaddr, uval, match);
                /*
-                * The atomic access to the futex value
-                * generated a pagefault, so retry the
-                * user-access and the wakeup:
+                * The atomic access to the futex value generated a
+                * pagefault, so retry the user-access and the wakeup:
                 */
                if (ret == -EFAULT)
                        goto pi_faulted;
                goto out_unlock;
        }
+
        /*
-        * No waiters - kernel unlocks the futex:
+        * We have no kernel internal state, i.e. no waiters in the
+        * kernel. Waiters which are about to queue themselves are stuck
+        * on hb->lock. So we can safely ignore them. We do neither
+        * preserve the WAITERS bit not the OWNER_DIED one. We are the
+        * owner.
         */
-       ret = unlock_futex_pi(uaddr, uval);
-       if (ret == -EFAULT)
+       if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0))
                goto pi_faulted;
 
+       /*
+        * If uval has changed, let user space handle it.
+        */
+       ret = (curval == uval) ? 0 : -EAGAIN;
+
 out_unlock:
        spin_unlock(&hb->lock);
        put_futex_key(&key);
-
-out:
        return ret;
 
 pi_faulted:
@@ -2669,7 +2633,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
                 */
                WARN_ON(!q.pi_state);
                pi_mutex = &q.pi_state->pi_mutex;
-               ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
+               ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
                debug_rt_mutex_free_waiter(&rt_waiter);
 
                spin_lock(q.lock_ptr);
index 7339e42..1487a12 100644 (file)
@@ -455,9 +455,9 @@ EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
  */
 void irq_free_hwirqs(unsigned int from, int cnt)
 {
-       int i;
+       int i, j;
 
-       for (i = from; cnt > 0; i++, cnt--) {
+       for (i = from, j = cnt; j > 0; i++, j--) {
                irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
                arch_teardown_hwirq(i);
        }
index a82170e..e6bcbe7 100644 (file)
 #include <linux/tick.h>
 #include <linux/cpu.h>
 #include <linux/notifier.h>
+#include <linux/smp.h>
 #include <asm/processor.h>
 
 
-static DEFINE_PER_CPU(struct llist_head, irq_work_list);
-static DEFINE_PER_CPU(int, irq_work_raised);
+static DEFINE_PER_CPU(struct llist_head, raised_list);
+static DEFINE_PER_CPU(struct llist_head, lazy_list);
 
 /*
  * Claim the entry so that no one else will poke at it.
@@ -55,12 +56,34 @@ void __weak arch_irq_work_raise(void)
         */
 }
 
+#ifdef CONFIG_SMP
 /*
- * Enqueue the irq_work @entry unless it's already pending
+ * Enqueue the irq_work @work on @cpu unless it's already pending
  * somewhere.
  *
  * Can be re-enqueued while the callback is still in progress.
  */
+bool irq_work_queue_on(struct irq_work *work, int cpu)
+{
+       /* All work should have been flushed before going offline */
+       WARN_ON_ONCE(cpu_is_offline(cpu));
+
+       /* Arch remote IPI send/receive backend aren't NMI safe */
+       WARN_ON_ONCE(in_nmi());
+
+       /* Only queue if not already pending */
+       if (!irq_work_claim(work))
+               return false;
+
+       if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
+               arch_send_call_function_single_ipi(cpu);
+
+       return true;
+}
+EXPORT_SYMBOL_GPL(irq_work_queue_on);
+#endif
+
+/* Enqueue the irq work @work on the current CPU */
 bool irq_work_queue(struct irq_work *work)
 {
        /* Only queue if not already pending */
@@ -70,15 +93,13 @@ bool irq_work_queue(struct irq_work *work)
        /* Queue the entry and raise the IPI if needed. */
        preempt_disable();
 
-       llist_add(&work->llnode, &__get_cpu_var(irq_work_list));
-
-       /*
-        * If the work is not "lazy" or the tick is stopped, raise the irq
-        * work interrupt (if supported by the arch), otherwise, just wait
-        * for the next tick.
-        */
-       if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) {
-               if (!this_cpu_cmpxchg(irq_work_raised, 0, 1))
+       /* If the work is "lazy", handle it from next tick if any */
+       if (work->flags & IRQ_WORK_LAZY) {
+               if (llist_add(&work->llnode, &__get_cpu_var(lazy_list)) &&
+                   tick_nohz_tick_stopped())
+                       arch_irq_work_raise();
+       } else {
+               if (llist_add(&work->llnode, &__get_cpu_var(raised_list)))
                        arch_irq_work_raise();
        }
 
@@ -90,10 +111,11 @@ EXPORT_SYMBOL_GPL(irq_work_queue);
 
 bool irq_work_needs_cpu(void)
 {
-       struct llist_head *this_list;
+       struct llist_head *raised, *lazy;
 
-       this_list = &__get_cpu_var(irq_work_list);
-       if (llist_empty(this_list))
+       raised = &__get_cpu_var(raised_list);
+       lazy = &__get_cpu_var(lazy_list);
+       if (llist_empty(raised) && llist_empty(lazy))
                return false;
 
        /* All work should have been flushed before going offline */
@@ -102,28 +124,18 @@ bool irq_work_needs_cpu(void)
        return true;
 }
 
-static void __irq_work_run(void)
+static void irq_work_run_list(struct llist_head *list)
 {
        unsigned long flags;
        struct irq_work *work;
-       struct llist_head *this_list;
        struct llist_node *llnode;
 
+       BUG_ON(!irqs_disabled());
 
-       /*
-        * Reset the "raised" state right before we check the list because
-        * an NMI may enqueue after we find the list empty from the runner.
-        */
-       __this_cpu_write(irq_work_raised, 0);
-       barrier();
-
-       this_list = &__get_cpu_var(irq_work_list);
-       if (llist_empty(this_list))
+       if (llist_empty(list))
                return;
 
-       BUG_ON(!irqs_disabled());
-
-       llnode = llist_del_all(this_list);
+       llnode = llist_del_all(list);
        while (llnode != NULL) {
                work = llist_entry(llnode, struct irq_work, llnode);
 
@@ -149,13 +161,13 @@ static void __irq_work_run(void)
 }
 
 /*
- * Run the irq_work entries on this cpu. Requires to be ran from hardirq
- * context with local IRQs disabled.
+ * hotplug calls this through:
+ *  hotplug_cfd() -> flush_smp_call_function_queue()
  */
 void irq_work_run(void)
 {
-       BUG_ON(!in_irq());
-       __irq_work_run();
+       irq_work_run_list(&__get_cpu_var(raised_list));
+       irq_work_run_list(&__get_cpu_var(lazy_list));
 }
 EXPORT_SYMBOL_GPL(irq_work_run);
 
@@ -171,35 +183,3 @@ void irq_work_sync(struct irq_work *work)
                cpu_relax();
 }
 EXPORT_SYMBOL_GPL(irq_work_sync);
-
-#ifdef CONFIG_HOTPLUG_CPU
-static int irq_work_cpu_notify(struct notifier_block *self,
-                              unsigned long action, void *hcpu)
-{
-       long cpu = (long)hcpu;
-
-       switch (action) {
-       case CPU_DYING:
-               /* Called from stop_machine */
-               if (WARN_ON_ONCE(cpu != smp_processor_id()))
-                       break;
-               __irq_work_run();
-               break;
-       default:
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-static struct notifier_block cpu_notify;
-
-static __init int irq_work_init_cpu_notifier(void)
-{
-       cpu_notify.notifier_call = irq_work_cpu_notify;
-       cpu_notify.priority = 0;
-       register_cpu_notifier(&cpu_notify);
-       return 0;
-}
-device_initcall(irq_work_init_cpu_notifier);
-
-#endif /* CONFIG_HOTPLUG_CPU */
index 6748688..4b8f0c9 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/swap.h>
 #include <linux/syscore_ops.h>
 #include <linux/compiler.h>
+#include <linux/hugetlb.h>
 
 #include <asm/page.h>
 #include <asm/uaccess.h>
@@ -1617,7 +1618,11 @@ static int __init crash_save_vmcoreinfo_init(void)
 #ifdef CONFIG_MEMORY_FAILURE
        VMCOREINFO_NUMBER(PG_hwpoison);
 #endif
+       VMCOREINFO_NUMBER(PG_head_mask);
        VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
+#ifdef CONFIG_HUGETLBFS
+       VMCOREINFO_SYMBOL(free_huge_page);
+#endif
 
        arch_crash_save_vmcoreinfo();
        update_vmcoreinfo_note();
index 3214289..734e9a7 100644 (file)
@@ -2037,19 +2037,23 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
 {
        unsigned long *iter;
        struct kprobe_blacklist_entry *ent;
-       unsigned long offset = 0, size = 0;
+       unsigned long entry, offset = 0, size = 0;
 
        for (iter = start; iter < end; iter++) {
-               if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) {
-                       pr_err("Failed to find blacklist %p\n", (void *)*iter);
+               entry = arch_deref_entry_point((void *)*iter);
+
+               if (!kernel_text_address(entry) ||
+                   !kallsyms_lookup_size_offset(entry, &size, &offset)) {
+                       pr_err("Failed to find blacklist at %p\n",
+                               (void *)entry);
                        continue;
                }
 
                ent = kmalloc(sizeof(*ent), GFP_KERNEL);
                if (!ent)
                        return -ENOMEM;
-               ent->start_addr = *iter;
-               ent->end_addr = *iter + size;
+               ent->start_addr = entry;
+               ent->end_addr = entry + size;
                INIT_LIST_HEAD(&ent->list);
                list_add_tail(&ent->list, &kprobe_blacklist);
        }
index c2390f4..ef48322 100644 (file)
@@ -591,7 +591,7 @@ static void insert_kthread_work(struct kthread_worker *worker,
 
        list_add_tail(&work->node, pos);
        work->worker = worker;
-       if (likely(worker->task))
+       if (!worker->current_work && likely(worker->task))
                wake_up_process(worker->task);
 }
 
index d24e433..88d0d44 100644 (file)
@@ -384,7 +384,9 @@ static void print_lockdep_off(const char *bug_msg)
 {
        printk(KERN_DEBUG "%s\n", bug_msg);
        printk(KERN_DEBUG "turning off the locking correctness validator.\n");
+#ifdef CONFIG_LOCK_STAT
        printk(KERN_DEBUG "Please attach the output of /proc/lock_stat to the bug report\n");
+#endif
 }
 
 static int save_trace(struct stack_trace *trace)
index 838dc9e..9887a90 100644 (file)
@@ -1,6 +1,4 @@
-
 #include <linux/percpu.h>
-#include <linux/mutex.h>
 #include <linux/sched.h>
 #include "mcs_spinlock.h"
 
  * called from interrupt context and we have preemption disabled while
  * spinning.
  */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
+
+/*
+ * We use the value 0 to represent "no CPU", thus the encoded value
+ * will be the CPU number incremented by 1.
+ */
+static inline int encode_cpu(int cpu_nr)
+{
+       return cpu_nr + 1;
+}
+
+static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
+{
+       int cpu_nr = encoded_cpu_val - 1;
+
+       return per_cpu_ptr(&osq_node, cpu_nr);
+}
 
 /*
  * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
  * Can return NULL in case we were the last queued and we updated @lock instead.
  */
-static inline struct optimistic_spin_queue *
-osq_wait_next(struct optimistic_spin_queue **lock,
-             struct optimistic_spin_queue *node,
-             struct optimistic_spin_queue *prev)
+static inline struct optimistic_spin_node *
+osq_wait_next(struct optimistic_spin_queue *lock,
+             struct optimistic_spin_node *node,
+             struct optimistic_spin_node *prev)
 {
-       struct optimistic_spin_queue *next = NULL;
+       struct optimistic_spin_node *next = NULL;
+       int curr = encode_cpu(smp_processor_id());
+       int old;
+
+       /*
+        * If there is a prev node in queue, then the 'old' value will be
+        * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
+        * we're currently last in queue, then the queue will then become empty.
+        */
+       old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
 
        for (;;) {
-               if (*lock == node && cmpxchg(lock, node, prev) == node) {
+               if (atomic_read(&lock->tail) == curr &&
+                   atomic_cmpxchg(&lock->tail, curr, old) == curr) {
                        /*
                         * We were the last queued, we moved @lock back. @prev
                         * will now observe @lock and will complete its
@@ -53,24 +77,29 @@ osq_wait_next(struct optimistic_spin_queue **lock,
                                break;
                }
 
-               arch_mutex_cpu_relax();
+               cpu_relax_lowlatency();
        }
 
        return next;
 }
 
-bool osq_lock(struct optimistic_spin_queue **lock)
+bool osq_lock(struct optimistic_spin_queue *lock)
 {
-       struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
-       struct optimistic_spin_queue *prev, *next;
+       struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
+       struct optimistic_spin_node *prev, *next;
+       int curr = encode_cpu(smp_processor_id());
+       int old;
 
        node->locked = 0;
        node->next = NULL;
+       node->cpu = curr;
 
-       node->prev = prev = xchg(lock, node);
-       if (likely(prev == NULL))
+       old = atomic_xchg(&lock->tail, curr);
+       if (old == OSQ_UNLOCKED_VAL)
                return true;
 
+       prev = decode_cpu(old);
+       node->prev = prev;
        ACCESS_ONCE(prev->next) = node;
 
        /*
@@ -89,7 +118,7 @@ bool osq_lock(struct optimistic_spin_queue **lock)
                if (need_resched())
                        goto unqueue;
 
-               arch_mutex_cpu_relax();
+               cpu_relax_lowlatency();
        }
        return true;
 
@@ -115,7 +144,7 @@ unqueue:
                if (smp_load_acquire(&node->locked))
                        return true;
 
-               arch_mutex_cpu_relax();
+               cpu_relax_lowlatency();
 
                /*
                 * Or we race against a concurrent unqueue()'s step-B, in which
@@ -149,20 +178,21 @@ unqueue:
        return false;
 }
 
-void osq_unlock(struct optimistic_spin_queue **lock)
+void osq_unlock(struct optimistic_spin_queue *lock)
 {
-       struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
-       struct optimistic_spin_queue *next;
+       struct optimistic_spin_node *node, *next;
+       int curr = encode_cpu(smp_processor_id());
 
        /*
         * Fast path for the uncontended case.
         */
-       if (likely(cmpxchg(lock, node, NULL) == node))
+       if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
                return;
 
        /*
         * Second most likely case.
         */
+       node = this_cpu_ptr(&osq_node);
        next = xchg(&node->next, NULL);
        if (next) {
                ACCESS_ONCE(next->locked) = 1;
index a2dbac4..23e89c5 100644 (file)
@@ -27,7 +27,7 @@ struct mcs_spinlock {
 #define arch_mcs_spin_lock_contended(l)                                        \
 do {                                                                   \
        while (!(smp_load_acquire(l)))                                  \
-               arch_mutex_cpu_relax();                                 \
+               cpu_relax_lowlatency();                                 \
 } while (0)
 #endif
 
@@ -104,7 +104,7 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
                        return;
                /* Wait until the next pointer is set */
                while (!(next = ACCESS_ONCE(node->next)))
-                       arch_mutex_cpu_relax();
+                       cpu_relax_lowlatency();
        }
 
        /* Pass lock to next waiter. */
@@ -118,12 +118,13 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
  * mutex_lock()/rwsem_down_{read,write}() etc.
  */
 
-struct optimistic_spin_queue {
-       struct optimistic_spin_queue *next, *prev;
+struct optimistic_spin_node {
+       struct optimistic_spin_node *next, *prev;
        int locked; /* 1 if lock acquired */
+       int cpu; /* encoded CPU # value */
 };
 
-extern bool osq_lock(struct optimistic_spin_queue **lock);
-extern void osq_unlock(struct optimistic_spin_queue **lock);
+extern bool osq_lock(struct optimistic_spin_queue *lock);
+extern void osq_unlock(struct optimistic_spin_queue *lock);
 
 #endif /* __LINUX_MCS_SPINLOCK_H */
index bc73d33..ae712b2 100644 (file)
 # include <asm/mutex.h>
 #endif
 
-/*
- * A negative mutex count indicates that waiters are sleeping waiting for the
- * mutex.
- */
-#define        MUTEX_SHOW_NO_WAITER(mutex)     (atomic_read(&(mutex)->count) >= 0)
-
 void
 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
 {
@@ -60,7 +54,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
        INIT_LIST_HEAD(&lock->wait_list);
        mutex_clear_owner(lock);
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-       lock->osq = NULL;
+       osq_lock_init(&lock->osq);
 #endif
 
        debug_mutex_init(lock, name, key);
@@ -152,7 +146,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
                if (need_resched())
                        break;
 
-               arch_mutex_cpu_relax();
+               cpu_relax_lowlatency();
        }
        rcu_read_unlock();
 
@@ -388,12 +382,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
        /*
         * Optimistic spinning.
         *
-        * We try to spin for acquisition when we find that there are no
-        * pending waiters and the lock owner is currently running on a
-        * (different) CPU.
-        *
-        * The rationale is that if the lock owner is running, it is likely to
-        * release the lock soon.
+        * We try to spin for acquisition when we find that the lock owner
+        * is currently running on a (different) CPU and while we don't
+        * need to reschedule. The rationale is that if the lock owner is
+        * running, it is likely to release the lock soon.
         *
         * Since this needs the lock owner, and this mutex implementation
         * doesn't track the owner atomically in the lock field, we need to
@@ -440,7 +432,8 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                if (owner && !mutex_spin_on_owner(lock, owner))
                        break;
 
-               if ((atomic_read(&lock->count) == 1) &&
+               /* Try to acquire the mutex if it is unlocked. */
+               if (!mutex_is_locked(lock) &&
                    (atomic_cmpxchg(&lock->count, 1, 0) == 1)) {
                        lock_acquired(&lock->dep_map, ip);
                        if (use_ww_ctx) {
@@ -471,7 +464,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
                 * memory barriers as we'll eventually observe the right
                 * values at the cost of a few extra spins.
                 */
-               arch_mutex_cpu_relax();
+               cpu_relax_lowlatency();
        }
        osq_unlock(&lock->osq);
 slowpath:
@@ -485,8 +478,11 @@ slowpath:
 #endif
        spin_lock_mutex(&lock->wait_lock, flags);
 
-       /* once more, can we acquire the lock? */
-       if (MUTEX_SHOW_NO_WAITER(lock) && (atomic_xchg(&lock->count, 0) == 1))
+       /*
+        * Once more, try to acquire the lock. Only try-lock the mutex if
+        * it is unlocked to reduce unnecessary xchg() operations.
+        */
+       if (!mutex_is_locked(lock) && (atomic_xchg(&lock->count, 0) == 1))
                goto skip_wait;
 
        debug_mutex_lock_common(lock, &waiter);
@@ -506,9 +502,10 @@ slowpath:
                 * it's unlocked. Later on, if we sleep, this is the
                 * operation that gives us the lock. We xchg it to -1, so
                 * that when we release the lock, we properly wake up the
-                * other waiters:
+                * other waiters. We only attempt the xchg if the count is
+                * non-negative in order to avoid unnecessary xchg operations:
                 */
-               if (MUTEX_SHOW_NO_WAITER(lock) &&
+               if (atomic_read(&lock->count) >= 0 &&
                    (atomic_xchg(&lock->count, -1) == 1))
                        break;
 
@@ -823,6 +820,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
        unsigned long flags;
        int prev;
 
+       /* No need to trylock if the mutex is locked. */
+       if (mutex_is_locked(lock))
+               return 0;
+
        spin_lock_mutex(&lock->wait_lock, flags);
 
        prev = atomic_xchg(&lock->count, -1);
index fb5b8ac..f956ede 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/cpumask.h>
 #include <linux/percpu.h>
 #include <linux/hardirq.h>
-#include <linux/mutex.h>
 #include <asm/qrwlock.h>
 
 /**
@@ -35,7 +34,7 @@ static __always_inline void
 rspin_until_writer_unlock(struct qrwlock *lock, u32 cnts)
 {
        while ((cnts & _QW_WMASK) == _QW_LOCKED) {
-               arch_mutex_cpu_relax();
+               cpu_relax_lowlatency();
                cnts = smp_load_acquire((u32 *)&lock->cnts);
        }
 }
@@ -75,7 +74,7 @@ void queue_read_lock_slowpath(struct qrwlock *lock)
         * to make sure that the write lock isn't taken.
         */
        while (atomic_read(&lock->cnts) & _QW_WMASK)
-               arch_mutex_cpu_relax();
+               cpu_relax_lowlatency();
 
        cnts = atomic_add_return(_QR_BIAS, &lock->cnts) - _QR_BIAS;
        rspin_until_writer_unlock(lock, cnts);
@@ -114,7 +113,7 @@ void queue_write_lock_slowpath(struct qrwlock *lock)
                                    cnts | _QW_WAITING) == cnts))
                        break;
 
-               arch_mutex_cpu_relax();
+               cpu_relax_lowlatency();
        }
 
        /* When no more readers, set the locked flag */
@@ -125,7 +124,7 @@ void queue_write_lock_slowpath(struct qrwlock *lock)
                                    _QW_LOCKED) == _QW_WAITING))
                        break;
 
-               arch_mutex_cpu_relax();
+               cpu_relax_lowlatency();
        }
 unlock:
        arch_spin_unlock(&lock->lock);
index 49b2ed3..62b6cee 100644 (file)
@@ -66,12 +66,13 @@ void rt_mutex_debug_task_free(struct task_struct *task)
  * the deadlock. We print when we return. act_waiter can be NULL in
  * case of a remove waiter operation.
  */
-void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
+void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
+                            struct rt_mutex_waiter *act_waiter,
                             struct rt_mutex *lock)
 {
        struct task_struct *task;
 
-       if (!debug_locks || detect || !act_waiter)
+       if (!debug_locks || chwalk == RT_MUTEX_FULL_CHAINWALK || !act_waiter)
                return;
 
        task = rt_mutex_owner(act_waiter->lock);
index 14193d5..d0519c3 100644 (file)
@@ -20,14 +20,20 @@ extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
 extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
                                      struct task_struct *powner);
 extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
-extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter,
+extern void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
+                                   struct rt_mutex_waiter *waiter,
                                    struct rt_mutex *lock);
 extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
 # define debug_rt_mutex_reset_waiter(w)                        \
        do { (w)->deadlock_lock = NULL; } while (0)
 
-static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
-                                                int detect)
+static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
+                                                 enum rtmutex_chainwalk walk)
 {
        return (waiter != NULL);
 }
+
+static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
+{
+       debug_rt_mutex_print_deadlock(w);
+}
index a620d4d..a0ea2a1 100644 (file)
@@ -83,6 +83,47 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
                owner = *p;
        } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
 }
+
+/*
+ * Safe fastpath aware unlock:
+ * 1) Clear the waiters bit
+ * 2) Drop lock->wait_lock
+ * 3) Try to unlock the lock with cmpxchg
+ */
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+       __releases(lock->wait_lock)
+{
+       struct task_struct *owner = rt_mutex_owner(lock);
+
+       clear_rt_mutex_waiters(lock);
+       raw_spin_unlock(&lock->wait_lock);
+       /*
+        * If a new waiter comes in between the unlock and the cmpxchg
+        * we have two situations:
+        *
+        * unlock(wait_lock);
+        *                                      lock(wait_lock);
+        * cmpxchg(p, owner, 0) == owner
+        *                                      mark_rt_mutex_waiters(lock);
+        *                                      acquire(lock);
+        * or:
+        *
+        * unlock(wait_lock);
+        *                                      lock(wait_lock);
+        *                                      mark_rt_mutex_waiters(lock);
+        *
+        * cmpxchg(p, owner, 0) != owner
+        *                                      enqueue_waiter();
+        *                                      unlock(wait_lock);
+        * lock(wait_lock);
+        * wake waiter();
+        * unlock(wait_lock);
+        *                                      lock(wait_lock);
+        *                                      acquire(lock);
+        */
+       return rt_mutex_cmpxchg(lock, owner, NULL);
+}
+
 #else
 # define rt_mutex_cmpxchg(l,c,n)       (0)
 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
@@ -90,6 +131,17 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
        lock->owner = (struct task_struct *)
                        ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
 }
+
+/*
+ * Simple slow path only version: lock->owner is protected by lock->wait_lock.
+ */
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+       __releases(lock->wait_lock)
+{
+       lock->owner = NULL;
+       raw_spin_unlock(&lock->wait_lock);
+       return true;
+}
 #endif
 
 static inline int
@@ -255,42 +307,121 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 }
 
+/*
+ * Deadlock detection is conditional:
+ *
+ * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
+ * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
+ *
+ * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
+ * conducted independent of the detect argument.
+ *
+ * If the waiter argument is NULL this indicates the deboost path and
+ * deadlock detection is disabled independent of the detect argument
+ * and the config settings.
+ */
+static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
+                                         enum rtmutex_chainwalk chwalk)
+{
+       /*
+        * This is just a wrapper function for the following call,
+        * because debug_rt_mutex_detect_deadlock() smells like a magic
+        * debug feature and I wanted to keep the cond function in the
+        * main source file along with the comments instead of having
+        * two of the same in the headers.
+        */
+       return debug_rt_mutex_detect_deadlock(waiter, chwalk);
+}
+
 /*
  * Max number of times we'll walk the boosting chain:
  */
 int max_lock_depth = 1024;
 
+static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
+{
+       return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
+}
+
 /*
  * Adjust the priority chain. Also used for deadlock detection.
  * Decreases task's usage by one - may thus free the task.
  *
- * @task: the task owning the mutex (owner) for which a chain walk is probably
- *       needed
+ * @task:      the task owning the mutex (owner) for which a chain walk is
+ *             probably needed
  * @deadlock_detect: do we have to carry out deadlock detection?
- * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
- *            things for a task that has just got its priority adjusted, and
- *            is waiting on a mutex)
+ * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
+ *             things for a task that has just got its priority adjusted, and
+ *             is waiting on a mutex)
+ * @next_lock: the mutex on which the owner of @orig_lock was blocked before
+ *             we dropped its pi_lock. Is never dereferenced, only used for
+ *             comparison to detect lock chain changes.
  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
- *              its priority to the mutex owner (can be NULL in the case
- *              depicted above or if the top waiter is gone away and we are
- *              actually deboosting the owner)
- * @top_task: the current top waiter
+ *             its priority to the mutex owner (can be NULL in the case
+ *             depicted above or if the top waiter is gone away and we are
+ *             actually deboosting the owner)
+ * @top_task:  the current top waiter
  *
  * Returns 0 or -EDEADLK.
+ *
+ * Chain walk basics and protection scope
+ *
+ * [R] refcount on task
+ * [P] task->pi_lock held
+ * [L] rtmutex->wait_lock held
+ *
+ * Step        Description                             Protected by
+ *     function arguments:
+ *     @task                                   [R]
+ *     @orig_lock if != NULL                   @top_task is blocked on it
+ *     @next_lock                              Unprotected. Cannot be
+ *                                             dereferenced. Only used for
+ *                                             comparison.
+ *     @orig_waiter if != NULL                 @top_task is blocked on it
+ *     @top_task                               current, or in case of proxy
+ *                                             locking protected by calling
+ *                                             code
+ *     again:
+ *       loop_sanity_check();
+ *     retry:
+ * [1]   lock(task->pi_lock);                  [R] acquire [P]
+ * [2]   waiter = task->pi_blocked_on;         [P]
+ * [3]   check_exit_conditions_1();            [P]
+ * [4]   lock = waiter->lock;                  [P]
+ * [5]   if (!try_lock(lock->wait_lock)) {     [P] try to acquire [L]
+ *         unlock(task->pi_lock);              release [P]
+ *         goto retry;
+ *       }
+ * [6]   check_exit_conditions_2();            [P] + [L]
+ * [7]   requeue_lock_waiter(lock, waiter);    [P] + [L]
+ * [8]   unlock(task->pi_lock);                release [P]
+ *       put_task_struct(task);                release [R]
+ * [9]   check_exit_conditions_3();            [L]
+ * [10]          task = owner(lock);                   [L]
+ *       get_task_struct(task);                [L] acquire [R]
+ *       lock(task->pi_lock);                  [L] acquire [P]
+ * [11]          requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
+ * [12]          check_exit_conditions_4();            [P] + [L]
+ * [13]          unlock(task->pi_lock);                release [P]
+ *       unlock(lock->wait_lock);              release [L]
+ *       goto again;
  */
 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
-                                     int deadlock_detect,
+                                     enum rtmutex_chainwalk chwalk,
                                      struct rt_mutex *orig_lock,
+                                     struct rt_mutex *next_lock,
                                      struct rt_mutex_waiter *orig_waiter,
                                      struct task_struct *top_task)
 {
-       struct rt_mutex *lock;
        struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
-       int detect_deadlock, ret = 0, depth = 0;
+       struct rt_mutex_waiter *prerequeue_top_waiter;
+       int ret = 0, depth = 0;
+       struct rt_mutex *lock;
+       bool detect_deadlock;
        unsigned long flags;
+       bool requeue = true;
 
-       detect_deadlock = debug_rt_mutex_detect_deadlock(orig_waiter,
-                                                        deadlock_detect);
+       detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
 
        /*
         * The (de)boosting is a step by step approach with a lot of
@@ -299,6 +430,9 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
         * carefully whether things change under us.
         */
  again:
+       /*
+        * We limit the lock chain length for each invocation.
+        */
        if (++depth > max_lock_depth) {
                static int prev_max;
 
@@ -314,15 +448,30 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                }
                put_task_struct(task);
 
-               return deadlock_detect ? -EDEADLK : 0;
+               return -EDEADLK;
        }
+
+       /*
+        * We are fully preemptible here and only hold the refcount on
+        * @task. So everything can have changed under us since the
+        * caller or our own code below (goto retry/again) dropped all
+        * locks.
+        */
  retry:
        /*
-        * Task can not go away as we did a get_task() before !
+        * [1] Task cannot go away as we did a get_task() before !
         */
        raw_spin_lock_irqsave(&task->pi_lock, flags);
 
+       /*
+        * [2] Get the waiter on which @task is blocked on.
+        */
        waiter = task->pi_blocked_on;
+
+       /*
+        * [3] check_exit_conditions_1() protected by task->pi_lock.
+        */
+
        /*
         * Check whether the end of the boosting chain has been
         * reached or the state of the chain has changed while we
@@ -338,6 +487,18 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        if (orig_waiter && !rt_mutex_owner(orig_lock))
                goto out_unlock_pi;
 
+       /*
+        * We dropped all locks after taking a refcount on @task, so
+        * the task might have moved on in the lock chain or even left
+        * the chain completely and blocks now on an unrelated lock or
+        * on @orig_lock.
+        *
+        * We stored the lock on which @task was blocked in @next_lock,
+        * so we can detect the chain change.
+        */
+       if (next_lock != waiter->lock)
+               goto out_unlock_pi;
+
        /*
         * Drop out, when the task has no waiters. Note,
         * top_waiter can be NULL, when we are in the deboosting
@@ -348,20 +509,41 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                        goto out_unlock_pi;
                /*
                 * If deadlock detection is off, we stop here if we
-                * are not the top pi waiter of the task.
+                * are not the top pi waiter of the task. If deadlock
+                * detection is enabled we continue, but stop the
+                * requeueing in the chain walk.
                 */
-               if (!detect_deadlock && top_waiter != task_top_pi_waiter(task))
-                       goto out_unlock_pi;
+               if (top_waiter != task_top_pi_waiter(task)) {
+                       if (!detect_deadlock)
+                               goto out_unlock_pi;
+                       else
+                               requeue = false;
+               }
        }
 
        /*
-        * When deadlock detection is off then we check, if further
-        * priority adjustment is necessary.
+        * If the waiter priority is the same as the task priority
+        * then there is no further priority adjustment necessary.  If
+        * deadlock detection is off, we stop the chain walk. If its
+        * enabled we continue, but stop the requeueing in the chain
+        * walk.
         */
-       if (!detect_deadlock && waiter->prio == task->prio)
-               goto out_unlock_pi;
+       if (waiter->prio == task->prio) {
+               if (!detect_deadlock)
+                       goto out_unlock_pi;
+               else
+                       requeue = false;
+       }
 
+       /*
+        * [4] Get the next lock
+        */
        lock = waiter->lock;
+       /*
+        * [5] We need to trylock here as we are holding task->pi_lock,
+        * which is the reverse lock order versus the other rtmutex
+        * operations.
+        */
        if (!raw_spin_trylock(&lock->wait_lock)) {
                raw_spin_unlock_irqrestore(&task->pi_lock, flags);
                cpu_relax();
@@ -369,64 +551,180 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        }
 
        /*
+        * [6] check_exit_conditions_2() protected by task->pi_lock and
+        * lock->wait_lock.
+        *
         * Deadlock detection. If the lock is the same as the original
         * lock which caused us to walk the lock chain or if the
         * current lock is owned by the task which initiated the chain
         * walk, we detected a deadlock.
         */
        if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
-               debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
+               debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
                raw_spin_unlock(&lock->wait_lock);
-               ret = deadlock_detect ? -EDEADLK : 0;
+               ret = -EDEADLK;
                goto out_unlock_pi;
        }
 
-       top_waiter = rt_mutex_top_waiter(lock);
+       /*
+        * If we just follow the lock chain for deadlock detection, no
+        * need to do all the requeue operations. To avoid a truckload
+        * of conditionals around the various places below, just do the
+        * minimum chain walk checks.
+        */
+       if (!requeue) {
+               /*
+                * No requeue[7] here. Just release @task [8]
+                */
+               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+               put_task_struct(task);
+
+               /*
+                * [9] check_exit_conditions_3 protected by lock->wait_lock.
+                * If there is no owner of the lock, end of chain.
+                */
+               if (!rt_mutex_owner(lock)) {
+                       raw_spin_unlock(&lock->wait_lock);
+                       return 0;
+               }
+
+               /* [10] Grab the next task, i.e. owner of @lock */
+               task = rt_mutex_owner(lock);
+               get_task_struct(task);
+               raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+               /*
+                * No requeue [11] here. We just do deadlock detection.
+                *
+                * [12] Store whether owner is blocked
+                * itself. Decision is made after dropping the locks
+                */
+               next_lock = task_blocked_on_lock(task);
+               /*
+                * Get the top waiter for the next iteration
+                */
+               top_waiter = rt_mutex_top_waiter(lock);
+
+               /* [13] Drop locks */
+               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+               raw_spin_unlock(&lock->wait_lock);
+
+               /* If owner is not blocked, end of chain. */
+               if (!next_lock)
+                       goto out_put_task;
+               goto again;
+       }
+
+       /*
+        * Store the current top waiter before doing the requeue
+        * operation on @lock. We need it for the boost/deboost
+        * decision below.
+        */
+       prerequeue_top_waiter = rt_mutex_top_waiter(lock);
 
-       /* Requeue the waiter */
+       /* [7] Requeue the waiter in the lock waiter list. */
        rt_mutex_dequeue(lock, waiter);
        waiter->prio = task->prio;
        rt_mutex_enqueue(lock, waiter);
 
-       /* Release the task */
+       /* [8] Release the task */
        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+       put_task_struct(task);
+
+       /*
+        * [9] check_exit_conditions_3 protected by lock->wait_lock.
+        *
+        * We must abort the chain walk if there is no lock owner even
+        * in the dead lock detection case, as we have nothing to
+        * follow here. This is the end of the chain we are walking.
+        */
        if (!rt_mutex_owner(lock)) {
                /*
-                * If the requeue above changed the top waiter, then we need
-                * to wake the new top waiter up to try to get the lock.
+                * If the requeue [7] above changed the top waiter,
+                * then we need to wake the new top waiter up to try
+                * to get the lock.
                 */
-
-               if (top_waiter != rt_mutex_top_waiter(lock))
+               if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
                        wake_up_process(rt_mutex_top_waiter(lock)->task);
                raw_spin_unlock(&lock->wait_lock);
-               goto out_put_task;
+               return 0;
        }
-       put_task_struct(task);
 
-       /* Grab the next task */
+       /* [10] Grab the next task, i.e. the owner of @lock */
        task = rt_mutex_owner(lock);
        get_task_struct(task);
        raw_spin_lock_irqsave(&task->pi_lock, flags);
 
+       /* [11] requeue the pi waiters if necessary */
        if (waiter == rt_mutex_top_waiter(lock)) {
-               /* Boost the owner */
-               rt_mutex_dequeue_pi(task, top_waiter);
+               /*
+                * The waiter became the new top (highest priority)
+                * waiter on the lock. Replace the previous top waiter
+                * in the owner tasks pi waiters list with this waiter
+                * and adjust the priority of the owner.
+                */
+               rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
                rt_mutex_enqueue_pi(task, waiter);
                __rt_mutex_adjust_prio(task);
 
-       } else if (top_waiter == waiter) {
-               /* Deboost the owner */
+       } else if (prerequeue_top_waiter == waiter) {
+               /*
+                * The waiter was the top waiter on the lock, but is
+                * no longer the top prority waiter. Replace waiter in
+                * the owner tasks pi waiters list with the new top
+                * (highest priority) waiter and adjust the priority
+                * of the owner.
+                * The new top waiter is stored in @waiter so that
+                * @waiter == @top_waiter evaluates to true below and
+                * we continue to deboost the rest of the chain.
+                */
                rt_mutex_dequeue_pi(task, waiter);
                waiter = rt_mutex_top_waiter(lock);
                rt_mutex_enqueue_pi(task, waiter);
                __rt_mutex_adjust_prio(task);
+       } else {
+               /*
+                * Nothing changed. No need to do any priority
+                * adjustment.
+                */
        }
 
-       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
-
+       /*
+        * [12] check_exit_conditions_4() protected by task->pi_lock
+        * and lock->wait_lock. The actual decisions are made after we
+        * dropped the locks.
+        *
+        * Check whether the task which owns the current lock is pi
+        * blocked itself. If yes we store a pointer to the lock for
+        * the lock chain change detection above. After we dropped
+        * task->pi_lock next_lock cannot be dereferenced anymore.
+        */
+       next_lock = task_blocked_on_lock(task);
+       /*
+        * Store the top waiter of @lock for the end of chain walk
+        * decision below.
+        */
        top_waiter = rt_mutex_top_waiter(lock);
+
+       /* [13] Drop the locks */
+       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
        raw_spin_unlock(&lock->wait_lock);
 
+       /*
+        * Make the actual exit decisions [12], based on the stored
+        * values.
+        *
+        * We reached the end of the lock chain. Stop right here. No
+        * point to go back just to figure that out.
+        */
+       if (!next_lock)
+               goto out_put_task;
+
+       /*
+        * If the current waiter is not the top waiter on the lock,
+        * then we can stop the chain walk here if we are not in full
+        * deadlock detection mode.
+        */
        if (!detect_deadlock && waiter != top_waiter)
                goto out_put_task;
 
@@ -445,76 +743,119 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
  *
  * Must be called with lock->wait_lock held.
  *
- * @lock:   the lock to be acquired.
- * @task:   the task which wants to acquire the lock
- * @waiter: the waiter that is queued to the lock's wait list. (could be NULL)
+ * @lock:   The lock to be acquired.
+ * @task:   The task which wants to acquire the lock
+ * @waiter: The waiter that is queued to the lock's wait list if the
+ *         callsite called task_blocked_on_lock(), otherwise NULL
  */
 static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
-               struct rt_mutex_waiter *waiter)
+                               struct rt_mutex_waiter *waiter)
 {
+       unsigned long flags;
+
        /*
-        * We have to be careful here if the atomic speedups are
-        * enabled, such that, when
-        *  - no other waiter is on the lock
-        *  - the lock has been released since we did the cmpxchg
-        * the lock can be released or taken while we are doing the
-        * checks and marking the lock with RT_MUTEX_HAS_WAITERS.
+        * Before testing whether we can acquire @lock, we set the
+        * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
+        * other tasks which try to modify @lock into the slow path
+        * and they serialize on @lock->wait_lock.
+        *
+        * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
+        * as explained at the top of this file if and only if:
         *
-        * The atomic acquire/release aware variant of
-        * mark_rt_mutex_waiters uses a cmpxchg loop. After setting
-        * the WAITERS bit, the atomic release / acquire can not
-        * happen anymore and lock->wait_lock protects us from the
-        * non-atomic case.
+        * - There is a lock owner. The caller must fixup the
+        *   transient state if it does a trylock or leaves the lock
+        *   function due to a signal or timeout.
         *
-        * Note, that this might set lock->owner =
-        * RT_MUTEX_HAS_WAITERS in the case the lock is not contended
-        * any more. This is fixed up when we take the ownership.
-        * This is the transitional state explained at the top of this file.
+        * - @task acquires the lock and there are no other
+        *   waiters. This is undone in rt_mutex_set_owner(@task) at
+        *   the end of this function.
         */
        mark_rt_mutex_waiters(lock);
 
+       /*
+        * If @lock has an owner, give up.
+        */
        if (rt_mutex_owner(lock))
                return 0;
 
        /*
-        * It will get the lock because of one of these conditions:
-        * 1) there is no waiter
-        * 2) higher priority than waiters
-        * 3) it is top waiter
+        * If @waiter != NULL, @task has already enqueued the waiter
+        * into @lock waiter list. If @waiter == NULL then this is a
+        * trylock attempt.
         */
-       if (rt_mutex_has_waiters(lock)) {
-               if (task->prio >= rt_mutex_top_waiter(lock)->prio) {
-                       if (!waiter || waiter != rt_mutex_top_waiter(lock))
-                               return 0;
-               }
-       }
-
-       if (waiter || rt_mutex_has_waiters(lock)) {
-               unsigned long flags;
-               struct rt_mutex_waiter *top;
-
-               raw_spin_lock_irqsave(&task->pi_lock, flags);
+       if (waiter) {
+               /*
+                * If waiter is not the highest priority waiter of
+                * @lock, give up.
+                */
+               if (waiter != rt_mutex_top_waiter(lock))
+                       return 0;
 
-               /* remove the queued waiter. */
-               if (waiter) {
-                       rt_mutex_dequeue(lock, waiter);
-                       task->pi_blocked_on = NULL;
-               }
+               /*
+                * We can acquire the lock. Remove the waiter from the
+                * lock waiters list.
+                */
+               rt_mutex_dequeue(lock, waiter);
 
+       } else {
                /*
-                * We have to enqueue the top waiter(if it exists) into
-                * task->pi_waiters list.
+                * If the lock has waiters already we check whether @task is
+                * eligible to take over the lock.
+                *
+                * If there are no other waiters, @task can acquire
+                * the lock.  @task->pi_blocked_on is NULL, so it does
+                * not need to be dequeued.
                 */
                if (rt_mutex_has_waiters(lock)) {
-                       top = rt_mutex_top_waiter(lock);
-                       rt_mutex_enqueue_pi(task, top);
+                       /*
+                        * If @task->prio is greater than or equal to
+                        * the top waiter priority (kernel view),
+                        * @task lost.
+                        */
+                       if (task->prio >= rt_mutex_top_waiter(lock)->prio)
+                               return 0;
+
+                       /*
+                        * The current top waiter stays enqueued. We
+                        * don't have to change anything in the lock
+                        * waiters order.
+                        */
+               } else {
+                       /*
+                        * No waiters. Take the lock without the
+                        * pi_lock dance.@task->pi_blocked_on is NULL
+                        * and we have no waiters to enqueue in @task
+                        * pi waiters list.
+                        */
+                       goto takeit;
                }
-               raw_spin_unlock_irqrestore(&task->pi_lock, flags);
        }
 
+       /*
+        * Clear @task->pi_blocked_on. Requires protection by
+        * @task->pi_lock. Redundant operation for the @waiter == NULL
+        * case, but conditionals are more expensive than a redundant
+        * store.
+        */
+       raw_spin_lock_irqsave(&task->pi_lock, flags);
+       task->pi_blocked_on = NULL;
+       /*
+        * Finish the lock acquisition. @task is the new owner. If
+        * other waiters exist we have to insert the highest priority
+        * waiter into @task->pi_waiters list.
+        */
+       if (rt_mutex_has_waiters(lock))
+               rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
+       raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+
+takeit:
        /* We got the lock. */
        debug_rt_mutex_lock(lock);
 
+       /*
+        * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
+        * are still waiters or clears it.
+        */
        rt_mutex_set_owner(lock, task);
 
        rt_mutex_deadlock_account_lock(lock, task);
@@ -532,12 +873,13 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
 static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
                                   struct rt_mutex_waiter *waiter,
                                   struct task_struct *task,
-                                  int detect_deadlock)
+                                  enum rtmutex_chainwalk chwalk)
 {
        struct task_struct *owner = rt_mutex_owner(lock);
        struct rt_mutex_waiter *top_waiter = waiter;
-       unsigned long flags;
+       struct rt_mutex *next_lock;
        int chain_walk = 0, res;
+       unsigned long flags;
 
        /*
         * Early deadlock detection. We really don't want the task to
@@ -548,7 +890,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
         * which is wrong, as the other waiter is not in a deadlock
         * situation.
         */
-       if (detect_deadlock && owner == task)
+       if (owner == task)
                return -EDEADLK;
 
        raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -569,20 +911,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
        if (!owner)
                return 0;
 
+       raw_spin_lock_irqsave(&owner->pi_lock, flags);
        if (waiter == rt_mutex_top_waiter(lock)) {
-               raw_spin_lock_irqsave(&owner->pi_lock, flags);
                rt_mutex_dequeue_pi(owner, top_waiter);
                rt_mutex_enqueue_pi(owner, waiter);
 
                __rt_mutex_adjust_prio(owner);
                if (owner->pi_blocked_on)
                        chain_walk = 1;
-               raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
-       }
-       else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
+       } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
                chain_walk = 1;
+       }
+
+       /* Store the lock on which owner is blocked or NULL */
+       next_lock = task_blocked_on_lock(owner);
 
-       if (!chain_walk)
+       raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+       /*
+        * Even if full deadlock detection is on, if the owner is not
+        * blocked itself, we can avoid finding this out in the chain
+        * walk.
+        */
+       if (!chain_walk || !next_lock)
                return 0;
 
        /*
@@ -594,8 +944,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 
        raw_spin_unlock(&lock->wait_lock);
 
-       res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
-                                        task);
+       res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
+                                        next_lock, waiter, task);
 
        raw_spin_lock(&lock->wait_lock);
 
@@ -605,7 +955,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 /*
  * Wake up the next waiter on the lock.
  *
- * Remove the top waiter from the current tasks waiter list and wake it up.
+ * Remove the top waiter from the current tasks pi waiter list and
+ * wake it up.
  *
  * Called with lock->wait_lock held.
  */
@@ -626,10 +977,23 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
         */
        rt_mutex_dequeue_pi(current, waiter);
 
-       rt_mutex_set_owner(lock, NULL);
+       /*
+        * As we are waking up the top waiter, and the waiter stays
+        * queued on the lock until it gets the lock, this lock
+        * obviously has waiters. Just set the bit here and this has
+        * the added benefit of forcing all new tasks into the
+        * slow path making sure no task of lower priority than
+        * the top waiter can steal this lock.
+        */
+       lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
 
        raw_spin_unlock_irqrestore(&current->pi_lock, flags);
 
+       /*
+        * It's safe to dereference waiter as it cannot go away as
+        * long as we hold lock->wait_lock. The waiter task needs to
+        * acquire it in order to dequeue the waiter.
+        */
        wake_up_process(waiter->task);
 }
 
@@ -642,40 +1006,42 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
 static void remove_waiter(struct rt_mutex *lock,
                          struct rt_mutex_waiter *waiter)
 {
-       int first = (waiter == rt_mutex_top_waiter(lock));
+       bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
        struct task_struct *owner = rt_mutex_owner(lock);
+       struct rt_mutex *next_lock;
        unsigned long flags;
-       int chain_walk = 0;
 
        raw_spin_lock_irqsave(&current->pi_lock, flags);
        rt_mutex_dequeue(lock, waiter);
        current->pi_blocked_on = NULL;
        raw_spin_unlock_irqrestore(&current->pi_lock, flags);
 
-       if (!owner)
+       /*
+        * Only update priority if the waiter was the highest priority
+        * waiter of the lock and there is an owner to update.
+        */
+       if (!owner || !is_top_waiter)
                return;
 
-       if (first) {
-
-               raw_spin_lock_irqsave(&owner->pi_lock, flags);
+       raw_spin_lock_irqsave(&owner->pi_lock, flags);
 
-               rt_mutex_dequeue_pi(owner, waiter);
+       rt_mutex_dequeue_pi(owner, waiter);
 
-               if (rt_mutex_has_waiters(lock)) {
-                       struct rt_mutex_waiter *next;
+       if (rt_mutex_has_waiters(lock))
+               rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
 
-                       next = rt_mutex_top_waiter(lock);
-                       rt_mutex_enqueue_pi(owner, next);
-               }
-               __rt_mutex_adjust_prio(owner);
+       __rt_mutex_adjust_prio(owner);
 
-               if (owner->pi_blocked_on)
-                       chain_walk = 1;
+       /* Store the lock on which owner is blocked or NULL */
+       next_lock = task_blocked_on_lock(owner);
 
-               raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
-       }
+       raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
 
-       if (!chain_walk)
+       /*
+        * Don't walk the chain, if the owner task is not blocked
+        * itself.
+        */
+       if (!next_lock)
                return;
 
        /* gets dropped in rt_mutex_adjust_prio_chain()! */
@@ -683,7 +1049,8 @@ static void remove_waiter(struct rt_mutex *lock,
 
        raw_spin_unlock(&lock->wait_lock);
 
-       rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
+       rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
+                                  next_lock, NULL, current);
 
        raw_spin_lock(&lock->wait_lock);
 }
@@ -696,6 +1063,7 @@ static void remove_waiter(struct rt_mutex *lock,
 void rt_mutex_adjust_pi(struct task_struct *task)
 {
        struct rt_mutex_waiter *waiter;
+       struct rt_mutex *next_lock;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -706,12 +1074,14 @@ void rt_mutex_adjust_pi(struct task_struct *task)
                raw_spin_unlock_irqrestore(&task->pi_lock, flags);
                return;
        }
-
+       next_lock = waiter->lock;
        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 
        /* gets dropped in rt_mutex_adjust_prio_chain()! */
        get_task_struct(task);
-       rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
+
+       rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
+                                  next_lock, NULL, task);
 }
 
 /**
@@ -763,13 +1133,33 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
        return ret;
 }
 
+static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+                                    struct rt_mutex_waiter *w)
+{
+       /*
+        * If the result is not -EDEADLOCK or the caller requested
+        * deadlock detection, nothing to do here.
+        */
+       if (res != -EDEADLOCK || detect_deadlock)
+               return;
+
+       /*
+        * Yell lowdly and stop the task right here.
+        */
+       rt_mutex_print_deadlock(w);
+       while (1) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule();
+       }
+}
+
 /*
  * Slow path lock function:
  */
 static int __sched
 rt_mutex_slowlock(struct rt_mutex *lock, int state,
                  struct hrtimer_sleeper *timeout,
-                 int detect_deadlock)
+                 enum rtmutex_chainwalk chwalk)
 {
        struct rt_mutex_waiter waiter;
        int ret = 0;
@@ -795,15 +1185,17 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
                        timeout->task = NULL;
        }
 
-       ret = task_blocks_on_rt_mutex(lock, &waiter, current, detect_deadlock);
+       ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
 
        if (likely(!ret))
                ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
 
        set_current_state(TASK_RUNNING);
 
-       if (unlikely(ret))
+       if (unlikely(ret)) {
                remove_waiter(lock, &waiter);
+               rt_mutex_handle_deadlock(ret, chwalk, &waiter);
+       }
 
        /*
         * try_to_take_rt_mutex() sets the waiter bit
@@ -825,22 +1217,31 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
 /*
  * Slow path try-lock function:
  */
-static inline int
-rt_mutex_slowtrylock(struct rt_mutex *lock)
+static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
 {
-       int ret = 0;
+       int ret;
 
+       /*
+        * If the lock already has an owner we fail to get the lock.
+        * This can be done without taking the @lock->wait_lock as
+        * it is only being read, and this is a trylock anyway.
+        */
+       if (rt_mutex_owner(lock))
+               return 0;
+
+       /*
+        * The mutex has currently no owner. Lock the wait lock and
+        * try to acquire the lock.
+        */
        raw_spin_lock(&lock->wait_lock);
 
-       if (likely(rt_mutex_owner(lock) != current)) {
+       ret = try_to_take_rt_mutex(lock, current, NULL);
 
-               ret = try_to_take_rt_mutex(lock, current, NULL);
-               /*
-                * try_to_take_rt_mutex() sets the lock waiters
-                * bit unconditionally. Clean this up.
-                */
-               fixup_rt_mutex_waiters(lock);
-       }
+       /*
+        * try_to_take_rt_mutex() sets the lock waiters bit
+        * unconditionally. Clean this up.
+        */
+       fixup_rt_mutex_waiters(lock);
 
        raw_spin_unlock(&lock->wait_lock);
 
@@ -859,12 +1260,49 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
 
        rt_mutex_deadlock_account_unlock(current);
 
-       if (!rt_mutex_has_waiters(lock)) {
-               lock->owner = NULL;
-               raw_spin_unlock(&lock->wait_lock);
-               return;
+       /*
+        * We must be careful here if the fast path is enabled. If we
+        * have no waiters queued we cannot set owner to NULL here
+        * because of:
+        *
+        * foo->lock->owner = NULL;
+        *                      rtmutex_lock(foo->lock);   <- fast path
+        *                      free = atomic_dec_and_test(foo->refcnt);
+        *                      rtmutex_unlock(foo->lock); <- fast path
+        *                      if (free)
+        *                              kfree(foo);
+        * raw_spin_unlock(foo->lock->wait_lock);
+        *
+        * So for the fastpath enabled kernel:
+        *
+        * Nothing can set the waiters bit as long as we hold
+        * lock->wait_lock. So we do the following sequence:
+        *
+        *      owner = rt_mutex_owner(lock);
+        *      clear_rt_mutex_waiters(lock);
+        *      raw_spin_unlock(&lock->wait_lock);
+        *      if (cmpxchg(&lock->owner, owner, 0) == owner)
+        *              return;
+        *      goto retry;
+        *
+        * The fastpath disabled variant is simple as all access to
+        * lock->owner is serialized by lock->wait_lock:
+        *
+        *      lock->owner = NULL;
+        *      raw_spin_unlock(&lock->wait_lock);
+        */
+       while (!rt_mutex_has_waiters(lock)) {
+               /* Drops lock->wait_lock ! */
+               if (unlock_rt_mutex_safe(lock) == true)
+                       return;
+               /* Relock the rtmutex and try again */
+               raw_spin_lock(&lock->wait_lock);
        }
 
+       /*
+        * The wakeup next waiter path does not suffer from the above
+        * race. See the comments there.
+        */
        wakeup_next_waiter(lock);
 
        raw_spin_unlock(&lock->wait_lock);
@@ -881,30 +1319,31 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
  */
 static inline int
 rt_mutex_fastlock(struct rt_mutex *lock, int state,
-                 int detect_deadlock,
                  int (*slowfn)(struct rt_mutex *lock, int state,
                                struct hrtimer_sleeper *timeout,
-                               int detect_deadlock))
+                               enum rtmutex_chainwalk chwalk))
 {
-       if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+       if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
                rt_mutex_deadlock_account_lock(lock, current);
                return 0;
        } else
-               return slowfn(lock, state, NULL, detect_deadlock);
+               return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
 }
 
 static inline int
 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
-                       struct hrtimer_sleeper *timeout, int detect_deadlock,
+                       struct hrtimer_sleeper *timeout,
+                       enum rtmutex_chainwalk chwalk,
                        int (*slowfn)(struct rt_mutex *lock, int state,
                                      struct hrtimer_sleeper *timeout,
-                                     int detect_deadlock))
+                                     enum rtmutex_chainwalk chwalk))
 {
-       if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
+       if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
+           likely(rt_mutex_cmpxchg(lock, NULL, current))) {
                rt_mutex_deadlock_account_lock(lock, current);
                return 0;
        } else
-               return slowfn(lock, state, timeout, detect_deadlock);
+               return slowfn(lock, state, timeout, chwalk);
 }
 
 static inline int
@@ -937,54 +1376,61 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
 {
        might_sleep();
 
-       rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, 0, rt_mutex_slowlock);
+       rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock);
 
 /**
  * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
  *
- * @lock:              the rt_mutex to be locked
- * @detect_deadlock:   deadlock detection on/off
+ * @lock:              the rt_mutex to be locked
  *
  * Returns:
- *  0          on success
- * -EINTR      when interrupted by a signal
- * -EDEADLK    when the lock would deadlock (when deadlock detection is on)
+ *  0          on success
+ * -EINTR      when interrupted by a signal
  */
-int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock,
-                                                int detect_deadlock)
+int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
 {
        might_sleep();
 
-       return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE,
-                                detect_deadlock, rt_mutex_slowlock);
+       return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
 
+/*
+ * Futex variant with full deadlock detection.
+ */
+int rt_mutex_timed_futex_lock(struct rt_mutex *lock,
+                             struct hrtimer_sleeper *timeout)
+{
+       might_sleep();
+
+       return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+                                      RT_MUTEX_FULL_CHAINWALK,
+                                      rt_mutex_slowlock);
+}
+
 /**
  * rt_mutex_timed_lock - lock a rt_mutex interruptible
  *                     the timeout structure is provided
  *                     by the caller
  *
- * @lock:              the rt_mutex to be locked
+ * @lock:              the rt_mutex to be locked
  * @timeout:           timeout structure or NULL (no timeout)
- * @detect_deadlock:   deadlock detection on/off
  *
  * Returns:
- *  0          on success
- * -EINTR      when interrupted by a signal
+ *  0          on success
+ * -EINTR      when interrupted by a signal
  * -ETIMEDOUT  when the timeout expired
- * -EDEADLK    when the lock would deadlock (when deadlock detection is on)
  */
 int
-rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout,
-                   int detect_deadlock)
+rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
 {
        might_sleep();
 
        return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
-                                      detect_deadlock, rt_mutex_slowlock);
+                                      RT_MUTEX_MIN_CHAINWALK,
+                                      rt_mutex_slowlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
 
@@ -1090,7 +1536,6 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
  * @lock:              the rt_mutex to take
  * @waiter:            the pre-initialized rt_mutex_waiter
  * @task:              the task to prepare
- * @detect_deadlock:   perform deadlock detection (1) or not (0)
  *
  * Returns:
  *  0 - task blocked on lock
@@ -1101,7 +1546,7 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
  */
 int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
                              struct rt_mutex_waiter *waiter,
-                             struct task_struct *task, int detect_deadlock)
+                             struct task_struct *task)
 {
        int ret;
 
@@ -1112,7 +1557,9 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
                return 1;
        }
 
-       ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
+       /* We enforce deadlock detection for futexes */
+       ret = task_blocks_on_rt_mutex(lock, waiter, task,
+                                     RT_MUTEX_FULL_CHAINWALK);
 
        if (ret && !rt_mutex_owner(lock)) {
                /*
@@ -1158,22 +1605,20 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
  * rt_mutex_finish_proxy_lock() - Complete lock acquisition
  * @lock:              the rt_mutex we were woken on
  * @to:                        the timeout, null if none. hrtimer should already have
- *                     been started.
+ *                     been started.
  * @waiter:            the pre-initialized rt_mutex_waiter
- * @detect_deadlock:   perform deadlock detection (1) or not (0)
  *
  * Complete the lock acquisition started our behalf by another thread.
  *
  * Returns:
  *  0 - success
- * <0 - error, one of -EINTR, -ETIMEDOUT, or -EDEADLK
+ * <0 - error, one of -EINTR, -ETIMEDOUT
  *
  * Special API call for PI-futex requeue support
  */
 int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
                               struct hrtimer_sleeper *to,
-                              struct rt_mutex_waiter *waiter,
-                              int detect_deadlock)
+                              struct rt_mutex_waiter *waiter)
 {
        int ret;
 
index a1a1dd0..c406058 100644 (file)
 #define debug_rt_mutex_init(m, n)                      do { } while (0)
 #define debug_rt_mutex_deadlock(d, a ,l)               do { } while (0)
 #define debug_rt_mutex_print_deadlock(w)               do { } while (0)
-#define debug_rt_mutex_detect_deadlock(w,d)            (d)
 #define debug_rt_mutex_reset_waiter(w)                 do { } while (0)
+
+static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
+{
+       WARN(1, "rtmutex deadlock detected\n");
+}
+
+static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *w,
+                                                 enum rtmutex_chainwalk walk)
+{
+       return walk == RT_MUTEX_FULL_CHAINWALK;
+}
index 7431a9c..8552125 100644 (file)
@@ -101,6 +101,21 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
                ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL);
 }
 
+/*
+ * Constants for rt mutex functions which have a selectable deadlock
+ * detection.
+ *
+ * RT_MUTEX_MIN_CHAINWALK:     Stops the lock chain walk when there are
+ *                             no further PI adjustments to be made.
+ *
+ * RT_MUTEX_FULL_CHAINWALK:    Invoke deadlock detection with a full
+ *                             walk of the lock chain.
+ */
+enum rtmutex_chainwalk {
+       RT_MUTEX_MIN_CHAINWALK,
+       RT_MUTEX_FULL_CHAINWALK,
+};
+
 /*
  * PI-futex support (proxy locking functions, etc.):
  */
@@ -111,12 +126,11 @@ extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
                                  struct task_struct *proxy_owner);
 extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
                                     struct rt_mutex_waiter *waiter,
-                                    struct task_struct *task,
-                                    int detect_deadlock);
+                                    struct task_struct *task);
 extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
                                      struct hrtimer_sleeper *to,
-                                     struct rt_mutex_waiter *waiter,
-                                     int detect_deadlock);
+                                     struct rt_mutex_waiter *waiter);
+extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
 
 #ifdef CONFIG_DEBUG_RT_MUTEXES
 # include "rtmutex-debug.h"
index 9be8a91..2c93571 100644 (file)
@@ -26,7 +26,7 @@ int rwsem_is_locked(struct rw_semaphore *sem)
        unsigned long flags;
 
        if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
-               ret = (sem->activity != 0);
+               ret = (sem->count != 0);
                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
        }
        return ret;
@@ -46,7 +46,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
        lockdep_init_map(&sem->dep_map, name, key, 0);
 #endif
-       sem->activity = 0;
+       sem->count = 0;
        raw_spin_lock_init(&sem->wait_lock);
        INIT_LIST_HEAD(&sem->wait_list);
 }
@@ -95,7 +95,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
                waiter = list_entry(next, struct rwsem_waiter, list);
        } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
 
-       sem->activity += woken;
+       sem->count += woken;
 
  out:
        return sem;
@@ -126,9 +126,9 @@ void __sched __down_read(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+       if (sem->count >= 0 && list_empty(&sem->wait_list)) {
                /* granted */
-               sem->activity++;
+               sem->count++;
                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
                goto out;
        }
@@ -170,9 +170,9 @@ int __down_read_trylock(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+       if (sem->count >= 0 && list_empty(&sem->wait_list)) {
                /* granted */
-               sem->activity++;
+               sem->count++;
                ret = 1;
        }
 
@@ -206,7 +206,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
                 * itself into sleep and waiting for system woke it or someone
                 * else in the head of the wait list up.
                 */
-               if (sem->activity == 0)
+               if (sem->count == 0)
                        break;
                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -214,7 +214,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
                raw_spin_lock_irqsave(&sem->wait_lock, flags);
        }
        /* got the lock */
-       sem->activity = -1;
+       sem->count = -1;
        list_del(&waiter.list);
 
        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -235,9 +235,9 @@ int __down_write_trylock(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       if (sem->activity == 0) {
+       if (sem->count == 0) {
                /* got the lock */
-               sem->activity = -1;
+               sem->count = -1;
                ret = 1;
        }
 
@@ -255,7 +255,7 @@ void __up_read(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       if (--sem->activity == 0 && !list_empty(&sem->wait_list))
+       if (--sem->count == 0 && !list_empty(&sem->wait_list))
                sem = __rwsem_wake_one_writer(sem);
 
        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -270,7 +270,7 @@ void __up_write(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       sem->activity = 0;
+       sem->count = 0;
        if (!list_empty(&sem->wait_list))
                sem = __rwsem_do_wake(sem, 1);
 
@@ -287,7 +287,7 @@ void __downgrade_write(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       sem->activity = 1;
+       sem->count = 1;
        if (!list_empty(&sem->wait_list))
                sem = __rwsem_do_wake(sem, 0);
 
index dacc321..d6203fa 100644 (file)
@@ -82,9 +82,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
        sem->count = RWSEM_UNLOCKED_VALUE;
        raw_spin_lock_init(&sem->wait_lock);
        INIT_LIST_HEAD(&sem->wait_list);
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
        sem->owner = NULL;
-       sem->osq = NULL;
+       osq_lock_init(&sem->osq);
 #endif
 }
 
@@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
        return false;
 }
 
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 /*
  * Try to acquire write lock before the writer has been put on wait queue.
  */
@@ -285,10 +285,10 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
 {
        struct task_struct *owner;
-       bool on_cpu = true;
+       bool on_cpu = false;
 
        if (need_resched())
-               return 0;
+               return false;
 
        rcu_read_lock();
        owner = ACCESS_ONCE(sem->owner);
@@ -297,9 +297,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
        rcu_read_unlock();
 
        /*
-        * If sem->owner is not set, the rwsem owner may have
-        * just acquired it and not set the owner yet or the rwsem
-        * has been released.
+        * If sem->owner is not set, yet we have just recently entered the
+        * slowpath, then there is a possibility reader(s) may have the lock.
+        * To be safe, avoid spinning in these situations.
         */
        return on_cpu;
 }
@@ -329,7 +329,7 @@ bool rwsem_spin_on_owner(struct rw_semaphore *sem, struct task_struct *owner)
                if (need_resched())
                        break;
 
-               arch_mutex_cpu_relax();
+               cpu_relax_lowlatency();
        }
        rcu_read_unlock();
 
@@ -381,7 +381,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
                 * memory barriers as we'll eventually observe the right
                 * values at the cost of a few extra spins.
                 */
-               arch_mutex_cpu_relax();
+               cpu_relax_lowlatency();
        }
        osq_unlock(&sem->osq);
 done:
index 42f806d..e2d3bc7 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/atomic.h>
 
-#if defined(CONFIG_SMP) && defined(CONFIG_RWSEM_XCHGADD_ALGORITHM)
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 static inline void rwsem_set_owner(struct rw_semaphore *sem)
 {
        sem->owner = current;
index 81e727c..ae79ce6 100644 (file)
@@ -60,7 +60,6 @@
 #include <linux/jump_label.h>
 #include <linux/pfn.h>
 #include <linux/bsearch.h>
-#include <linux/fips.h>
 #include <uapi/linux/module.h>
 #include "module-internal.h"
 
@@ -2448,9 +2447,6 @@ static int module_sig_check(struct load_info *info)
        }
 
        /* Not having a signature is only an error if we're strict. */
-       if (err < 0 && fips_enabled)
-               panic("Module verification failed with error %d in FIPS mode\n",
-                     err);
        if (err == -ENOKEY && !sig_enforce)
                err = 0;
 
index 49e0a20..a9dfa79 100644 (file)
@@ -35,6 +35,7 @@
 
 static int nocompress;
 static int noresume;
+static int nohibernate;
 static int resume_wait;
 static unsigned int resume_delay;
 static char resume_file[256] = CONFIG_PM_STD_PARTITION;
@@ -62,6 +63,11 @@ bool freezer_test_done;
 
 static const struct platform_hibernation_ops *hibernation_ops;
 
+bool hibernation_available(void)
+{
+       return (nohibernate == 0);
+}
+
 /**
  * hibernation_set_ops - Set the global hibernate operations.
  * @ops: Hibernation operations to use in subsequent hibernation transitions.
@@ -365,7 +371,6 @@ int hibernation_snapshot(int platform_mode)
        }
 
        suspend_console();
-       ftrace_stop();
        pm_restrict_gfp_mask();
 
        error = dpm_suspend(PMSG_FREEZE);
@@ -391,7 +396,6 @@ int hibernation_snapshot(int platform_mode)
        if (error || !in_suspend)
                pm_restore_gfp_mask();
 
-       ftrace_start();
        resume_console();
        dpm_complete(msg);
 
@@ -494,7 +498,6 @@ int hibernation_restore(int platform_mode)
 
        pm_prepare_console();
        suspend_console();
-       ftrace_stop();
        pm_restrict_gfp_mask();
        error = dpm_suspend_start(PMSG_QUIESCE);
        if (!error) {
@@ -502,7 +505,6 @@ int hibernation_restore(int platform_mode)
                dpm_resume_end(PMSG_RECOVER);
        }
        pm_restore_gfp_mask();
-       ftrace_start();
        resume_console();
        pm_restore_console();
        return error;
@@ -529,7 +531,6 @@ int hibernation_platform_enter(void)
 
        entering_platform_hibernation = true;
        suspend_console();
-       ftrace_stop();
        error = dpm_suspend_start(PMSG_HIBERNATE);
        if (error) {
                if (hibernation_ops->recover)
@@ -573,7 +574,6 @@ int hibernation_platform_enter(void)
  Resume_devices:
        entering_platform_hibernation = false;
        dpm_resume_end(PMSG_RESTORE);
-       ftrace_start();
        resume_console();
 
  Close:
@@ -642,6 +642,11 @@ int hibernate(void)
 {
        int error;
 
+       if (!hibernation_available()) {
+               pr_debug("PM: Hibernation not available.\n");
+               return -EPERM;
+       }
+
        lock_system_sleep();
        /* The snapshot device should not be opened while we're running */
        if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
@@ -734,7 +739,7 @@ static int software_resume(void)
        /*
         * If the user said "noresume".. bail out early.
         */
-       if (noresume)
+       if (noresume || !hibernation_available())
                return 0;
 
        /*
@@ -900,6 +905,9 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
        int i;
        char *start = buf;
 
+       if (!hibernation_available())
+               return sprintf(buf, "[disabled]\n");
+
        for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
                if (!hibernation_modes[i])
                        continue;
@@ -934,6 +942,9 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
        char *p;
        int mode = HIBERNATION_INVALID;
 
+       if (!hibernation_available())
+               return -EPERM;
+
        p = memchr(buf, '\n', n);
        len = p ? p - buf : n;
 
@@ -1101,6 +1112,10 @@ static int __init hibernate_setup(char *str)
                noresume = 1;
        else if (!strncmp(str, "nocompress", 10))
                nocompress = 1;
+       else if (!strncmp(str, "no", 2)) {
+               noresume = 1;
+               nohibernate = 1;
+       }
        return 1;
 }
 
@@ -1125,9 +1140,23 @@ static int __init resumedelay_setup(char *str)
        return 1;
 }
 
+static int __init nohibernate_setup(char *str)
+{
+       noresume = 1;
+       nohibernate = 1;
+       return 1;
+}
+
+static int __init kaslr_nohibernate_setup(char *str)
+{
+       return nohibernate_setup(str);
+}
+
 __setup("noresume", noresume_setup);
 __setup("resume_offset=", resume_offset_setup);
 __setup("resume=", resume_setup);
 __setup("hibernate=", hibernate_setup);
 __setup("resumewait", resumewait_setup);
 __setup("resumedelay=", resumedelay_setup);
+__setup("nohibernate", nohibernate_setup);
+__setup("kaslr", kaslr_nohibernate_setup);
index 573410d..8e90f33 100644 (file)
@@ -300,13 +300,11 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
                        s += sprintf(s,"%s ", pm_states[i].label);
 
 #endif
-#ifdef CONFIG_HIBERNATION
-       s += sprintf(s, "%s\n", "disk");
-#else
+       if (hibernation_available())
+               s += sprintf(s, "disk ");
        if (s != buf)
                /* convert the last space to a newline */
                *(s-1) = '\n';
-#endif
        return (s - buf);
 }
 
index 0ca8d83..4ee194e 100644 (file)
@@ -186,6 +186,7 @@ void thaw_processes(void)
 
        printk("Restarting tasks ... ");
 
+       __usermodehelper_set_disable_depth(UMH_FREEZING);
        thaw_workqueues();
 
        read_lock(&tasklist_lock);
index 4dd8822..4b736b4 100644 (file)
@@ -248,7 +248,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
                goto Platform_wake;
        }
 
-       ftrace_stop();
        error = disable_nonboot_cpus();
        if (error || suspend_test(TEST_CPUS))
                goto Enable_cpus;
@@ -275,7 +274,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
 
  Enable_cpus:
        enable_nonboot_cpus();
-       ftrace_start();
 
  Platform_wake:
        if (need_suspend_ops(state) && suspend_ops->wake)
@@ -306,7 +304,7 @@ int suspend_devices_and_enter(suspend_state_t state)
                error = suspend_ops->begin(state);
                if (error)
                        goto Close;
-       } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) {
+       } else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) {
                error = freeze_ops->begin();
                if (error)
                        goto Close;
@@ -335,7 +333,7 @@ int suspend_devices_and_enter(suspend_state_t state)
  Close:
        if (need_suspend_ops(state) && suspend_ops->end)
                suspend_ops->end();
-       else if (state == PM_SUSPEND_FREEZE && freeze_ops->end)
+       else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
                freeze_ops->end();
 
        return error;
index 98d3575..526e891 100644 (file)
@@ -49,6 +49,9 @@ static int snapshot_open(struct inode *inode, struct file *filp)
        struct snapshot_data *data;
        int error;
 
+       if (!hibernation_available())
+               return -EPERM;
+
        lock_system_sleep();
 
        if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
index ea2d5f6..13e839d 100644 (file)
@@ -1416,9 +1416,10 @@ static int have_callable_console(void)
 /*
  * Can we actually use the console at this time on this cpu?
  *
- * Console drivers may assume that per-cpu resources have been allocated. So
- * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
- * call them until this CPU is officially up.
+ * Console drivers may assume that per-cpu resources have
+ * been allocated. So unless they're explicitly marked as
+ * being able to cope (CON_ANYTIME) don't call them until
+ * this CPU is officially up.
  */
 static inline int can_use_console(unsigned int cpu)
 {
@@ -1431,10 +1432,8 @@ static inline int can_use_console(unsigned int cpu)
  * console_lock held, and 'console_locked' set) if it
  * is successful, false otherwise.
  */
-static int console_trylock_for_printk(void)
+static int console_trylock_for_printk(unsigned int cpu)
 {
-       unsigned int cpu = smp_processor_id();
-
        if (!console_trylock())
                return 0;
        /*
@@ -1609,8 +1608,7 @@ asmlinkage int vprintk_emit(int facility, int level,
                 */
                if (!oops_in_progress && !lockdep_recursing(current)) {
                        recursion_bug = 1;
-                       local_irq_restore(flags);
-                       return 0;
+                       goto out_restore_irqs;
                }
                zap_locks();
        }
@@ -1718,27 +1716,21 @@ asmlinkage int vprintk_emit(int facility, int level,
 
        logbuf_cpu = UINT_MAX;
        raw_spin_unlock(&logbuf_lock);
-       lockdep_on();
-       local_irq_restore(flags);
 
        /* If called from the scheduler, we can not call up(). */
-       if (in_sched)
-               return printed_len;
-
-       /*
-        * Disable preemption to avoid being preempted while holding
-        * console_sem which would prevent anyone from printing to console
-        */
-       preempt_disable();
-       /*
-        * Try to acquire and then immediately release the console semaphore.
-        * The release will print out buffers and wake up /dev/kmsg and syslog()
-        * users.
-        */
-       if (console_trylock_for_printk())
-               console_unlock();
-       preempt_enable();
+       if (!in_sched) {
+               /*
+                * Try to acquire and then immediately release the console
+                * semaphore.  The release will print out buffers and wake up
+                * /dev/kmsg and syslog() users.
+                */
+               if (console_trylock_for_printk(this_cpu))
+                       console_unlock();
+       }
 
+       lockdep_on();
+out_restore_irqs:
+       local_irq_restore(flags);
        return printed_len;
 }
 EXPORT_SYMBOL(vprintk_emit);
index adf9862..54e7522 100644 (file)
 #include <linux/compat.h>
 
 
-static int ptrace_trapping_sleep_fn(void *flags)
-{
-       schedule();
-       return 0;
-}
-
 /*
  * ptrace a task: make the debugger its new parent and
  * move it to the ptrace list.
@@ -371,7 +365,7 @@ unlock_creds:
 out:
        if (!retval) {
                wait_on_bit(&task->jobctl, JOBCTL_TRAPPING_BIT,
-                           ptrace_trapping_sleep_fn, TASK_UNINTERRUPTIBLE);
+                           TASK_UNINTERRUPTIBLE);
                proc_ptrace_connector(task, PTRACE_ATTACH);
        }
 
index bfda272..ff1a6de 100644 (file)
@@ -99,6 +99,10 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head)
 
 void kfree(const void *);
 
+/*
+ * Reclaim the specified callback, either by invoking it (non-lazy case)
+ * or freeing it directly (lazy case).  Return true if lazy, false otherwise.
+ */
 static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
 {
        unsigned long offset = (unsigned long)head->func;
@@ -108,12 +112,12 @@ static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
                RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset));
                kfree((void *)head - offset);
                rcu_lock_release(&rcu_callback_map);
-               return 1;
+               return true;
        } else {
                RCU_TRACE(trace_rcu_invoke_callback(rn, head));
                head->func(head);
                rcu_lock_release(&rcu_callback_map);
-               return 0;
+               return false;
        }
 }
 
index 7fa34f8..948a769 100644 (file)
@@ -18,7 +18,7 @@
  * Copyright (C) IBM Corporation, 2005, 2006
  *
  * Authors: Paul E. McKenney <paulmck@us.ibm.com>
- *       Josh Triplett <josh@freedesktop.org>
+ *       Josh Triplett <josh@joshtriplett.org>
  *
  * See also:  Documentation/RCU/torture.txt
  */
@@ -51,7 +51,7 @@
 #include <linux/torture.h>
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@freedesktop.org>");
+MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
 
 
 torture_param(int, fqs_duration, 0,
index c639556..e037f3e 100644 (file)
@@ -298,9 +298,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
 
        idx = ACCESS_ONCE(sp->completed) & 0x1;
        preempt_disable();
-       ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
+       __this_cpu_inc(sp->per_cpu_ref->c[idx]);
        smp_mb(); /* B */  /* Avoid leaking the critical section. */
-       ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
+       __this_cpu_inc(sp->per_cpu_ref->seq[idx]);
        preempt_enable();
        return idx;
 }
index f1ba773..1b70cb6 100644 (file)
@@ -206,6 +206,70 @@ void rcu_bh_qs(int cpu)
        rdp->passed_quiesce = 1;
 }
 
+static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
+
+static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
+       .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
+       .dynticks = ATOMIC_INIT(1),
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+       .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
+       .dynticks_idle = ATOMIC_INIT(1),
+#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+};
+
+/*
+ * Let the RCU core know that this CPU has gone through the scheduler,
+ * which is a quiescent state.  This is called when the need for a
+ * quiescent state is urgent, so we burn an atomic operation and full
+ * memory barriers to let the RCU core know about it, regardless of what
+ * this CPU might (or might not) do in the near future.
+ *
+ * We inform the RCU core by emulating a zero-duration dyntick-idle
+ * period, which we in turn do by incrementing the ->dynticks counter
+ * by two.
+ */
+static void rcu_momentary_dyntick_idle(void)
+{
+       unsigned long flags;
+       struct rcu_data *rdp;
+       struct rcu_dynticks *rdtp;
+       int resched_mask;
+       struct rcu_state *rsp;
+
+       local_irq_save(flags);
+
+       /*
+        * Yes, we can lose flag-setting operations.  This is OK, because
+        * the flag will be set again after some delay.
+        */
+       resched_mask = raw_cpu_read(rcu_sched_qs_mask);
+       raw_cpu_write(rcu_sched_qs_mask, 0);
+
+       /* Find the flavor that needs a quiescent state. */
+       for_each_rcu_flavor(rsp) {
+               rdp = raw_cpu_ptr(rsp->rda);
+               if (!(resched_mask & rsp->flavor_mask))
+                       continue;
+               smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
+               if (ACCESS_ONCE(rdp->mynode->completed) !=
+                   ACCESS_ONCE(rdp->cond_resched_completed))
+                       continue;
+
+               /*
+                * Pretend to be momentarily idle for the quiescent state.
+                * This allows the grace-period kthread to record the
+                * quiescent state, with no need for this CPU to do anything
+                * further.
+                */
+               rdtp = this_cpu_ptr(&rcu_dynticks);
+               smp_mb__before_atomic(); /* Earlier stuff before QS. */
+               atomic_add(2, &rdtp->dynticks);  /* QS. */
+               smp_mb__after_atomic(); /* Later stuff after QS. */
+               break;
+       }
+       local_irq_restore(flags);
+}
+
 /*
  * Note a context switch.  This is a quiescent state for RCU-sched,
  * and requires special handling for preemptible RCU.
@@ -216,19 +280,12 @@ void rcu_note_context_switch(int cpu)
        trace_rcu_utilization(TPS("Start context switch"));
        rcu_sched_qs(cpu);
        rcu_preempt_note_context_switch(cpu);
+       if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
+               rcu_momentary_dyntick_idle();
        trace_rcu_utilization(TPS("End context switch"));
 }
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 
-static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
-       .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
-       .dynticks = ATOMIC_INIT(1),
-#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
-       .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
-       .dynticks_idle = ATOMIC_INIT(1),
-#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
-};
-
 static long blimit = 10;       /* Maximum callbacks per rcu_do_batch. */
 static long qhimark = 10000;   /* If this many pending, ignore blimit. */
 static long qlowmark = 100;    /* Once only this many pending, use blimit. */
@@ -243,6 +300,13 @@ static ulong jiffies_till_next_fqs = ULONG_MAX;
 module_param(jiffies_till_first_fqs, ulong, 0644);
 module_param(jiffies_till_next_fqs, ulong, 0644);
 
+/*
+ * How long the grace period must be before we start recruiting
+ * quiescent-state help from rcu_note_context_switch().
+ */
+static ulong jiffies_till_sched_qs = HZ / 20;
+module_param(jiffies_till_sched_qs, ulong, 0644);
+
 static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
                                  struct rcu_data *rdp);
 static void force_qs_rnp(struct rcu_state *rsp,
@@ -853,6 +917,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
                                    bool *isidle, unsigned long *maxj)
 {
        unsigned int curr;
+       int *rcrmp;
        unsigned int snap;
 
        curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
@@ -893,27 +958,43 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
        }
 
        /*
-        * There is a possibility that a CPU in adaptive-ticks state
-        * might run in the kernel with the scheduling-clock tick disabled
-        * for an extended time period.  Invoke rcu_kick_nohz_cpu() to
-        * force the CPU to restart the scheduling-clock tick in this
-        * CPU is in this state.
-        */
-       rcu_kick_nohz_cpu(rdp->cpu);
-
-       /*
-        * Alternatively, the CPU might be running in the kernel
-        * for an extended period of time without a quiescent state.
-        * Attempt to force the CPU through the scheduler to gain the
-        * needed quiescent state, but only if the grace period has gone
-        * on for an uncommonly long time.  If there are many stuck CPUs,
-        * we will beat on the first one until it gets unstuck, then move
-        * to the next.  Only do this for the primary flavor of RCU.
+        * A CPU running for an extended time within the kernel can
+        * delay RCU grace periods.  When the CPU is in NO_HZ_FULL mode,
+        * even context-switching back and forth between a pair of
+        * in-kernel CPU-bound tasks cannot advance grace periods.
+        * So if the grace period is old enough, make the CPU pay attention.
+        * Note that the unsynchronized assignments to the per-CPU
+        * rcu_sched_qs_mask variable are safe.  Yes, setting of
+        * bits can be lost, but they will be set again on the next
+        * force-quiescent-state pass.  So lost bit sets do not result
+        * in incorrect behavior, merely in a grace period lasting
+        * a few jiffies longer than it might otherwise.  Because
+        * there are at most four threads involved, and because the
+        * updates are only once every few jiffies, the probability of
+        * lossage (and thus of slight grace-period extension) is
+        * quite low.
+        *
+        * Note that if the jiffies_till_sched_qs boot/sysfs parameter
+        * is set too high, we override with half of the RCU CPU stall
+        * warning delay.
         */
-       if (rdp->rsp == rcu_state_p &&
+       rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
+       if (ULONG_CMP_GE(jiffies,
+                        rdp->rsp->gp_start + jiffies_till_sched_qs) ||
            ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
-               rdp->rsp->jiffies_resched += 5;
-               resched_cpu(rdp->cpu);
+               if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
+                       ACCESS_ONCE(rdp->cond_resched_completed) =
+                               ACCESS_ONCE(rdp->mynode->completed);
+                       smp_mb(); /* ->cond_resched_completed before *rcrmp. */
+                       ACCESS_ONCE(*rcrmp) =
+                               ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
+                       resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
+                       rdp->rsp->jiffies_resched += 5; /* Enable beating. */
+               } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
+                       /* Time to beat on that CPU again! */
+                       resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
+                       rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
+               }
        }
 
        return 0;
@@ -932,10 +1013,7 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
 }
 
 /*
- * Dump stacks of all tasks running on stalled CPUs.  This is a fallback
- * for architectures that do not implement trigger_all_cpu_backtrace().
- * The NMI-triggered stack traces are more accurate because they are
- * printed by the target CPU.
+ * Dump stacks of all tasks running on stalled CPUs.
  */
 static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
 {
@@ -1013,7 +1091,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
               (long)rsp->gpnum, (long)rsp->completed, totqlen);
        if (ndetected == 0)
                pr_err("INFO: Stall ended before state dump start\n");
-       else if (!trigger_all_cpu_backtrace())
+       else
                rcu_dump_cpu_stacks(rsp);
 
        /* Complain about tasks blocking the grace period. */
@@ -1044,8 +1122,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
        pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
                jiffies - rsp->gp_start,
                (long)rsp->gpnum, (long)rsp->completed, totqlen);
-       if (!trigger_all_cpu_backtrace())
-               dump_stack();
+       rcu_dump_cpu_stacks(rsp);
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
        if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
@@ -1224,10 +1301,16 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
         * believe that a grace period is in progress, then we must wait
         * for the one following, which is in "c".  Because our request
         * will be noticed at the end of the current grace period, we don't
-        * need to explicitly start one.
+        * need to explicitly start one.  We only do the lockless check
+        * of rnp_root's fields if the current rcu_node structure thinks
+        * there is no grace period in flight, and because we hold rnp->lock,
+        * the only possible change is when rnp_root's two fields are
+        * equal, in which case rnp_root->gpnum might be concurrently
+        * incremented.  But that is OK, as it will just result in our
+        * doing some extra useless work.
         */
        if (rnp->gpnum != rnp->completed ||
-           ACCESS_ONCE(rnp->gpnum) != ACCESS_ONCE(rnp->completed)) {
+           ACCESS_ONCE(rnp_root->gpnum) != ACCESS_ONCE(rnp_root->completed)) {
                rnp->need_future_gp[c & 0x1]++;
                trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
                goto out;
@@ -1564,11 +1647,6 @@ static int rcu_gp_init(struct rcu_state *rsp)
                                            rnp->level, rnp->grplo,
                                            rnp->grphi, rnp->qsmask);
                raw_spin_unlock_irq(&rnp->lock);
-#ifdef CONFIG_PROVE_RCU_DELAY
-               if ((prandom_u32() % (rcu_num_nodes + 1)) == 0 &&
-                   system_state == SYSTEM_RUNNING)
-                       udelay(200);
-#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
                cond_resched();
        }
 
@@ -2266,7 +2344,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
        }
        smp_mb(); /* List handling before counting for rcu_barrier(). */
        rdp->qlen_lazy -= count_lazy;
-       ACCESS_ONCE(rdp->qlen) -= count;
+       ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
        rdp->n_cbs_invoked += count;
 
        /* Reinstate batch limit if we have worked down the excess. */
@@ -2404,14 +2482,14 @@ static void force_quiescent_state(struct rcu_state *rsp)
        struct rcu_node *rnp_old = NULL;
 
        /* Funnel through hierarchy to reduce memory contention. */
-       rnp = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode;
+       rnp = __this_cpu_read(rsp->rda->mynode);
        for (; rnp != NULL; rnp = rnp->parent) {
                ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
                      !raw_spin_trylock(&rnp->fqslock);
                if (rnp_old != NULL)
                        raw_spin_unlock(&rnp_old->fqslock);
                if (ret) {
-                       ACCESS_ONCE(rsp->n_force_qs_lh)++;
+                       rsp->n_force_qs_lh++;
                        return;
                }
                rnp_old = rnp;
@@ -2423,7 +2501,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
        smp_mb__after_unlock_lock();
        raw_spin_unlock(&rnp_old->fqslock);
        if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
-               ACCESS_ONCE(rsp->n_force_qs_lh)++;
+               rsp->n_force_qs_lh++;
                raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
                return;  /* Someone beat us to it. */
        }
@@ -2581,7 +2659,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
        unsigned long flags;
        struct rcu_data *rdp;
 
-       WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
+       WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
        if (debug_rcu_head_queue(head)) {
                /* Probable double call_rcu(), so leak the callback. */
                ACCESS_ONCE(head->func) = rcu_leak_callback;
@@ -2612,7 +2690,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
                local_irq_restore(flags);
                return;
        }
-       ACCESS_ONCE(rdp->qlen)++;
+       ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
        if (lazy)
                rdp->qlen_lazy++;
        else
@@ -3176,7 +3254,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
         * ACCESS_ONCE() to prevent the compiler from speculating
         * the increment to precede the early-exit check.
         */
-       ACCESS_ONCE(rsp->n_barrier_done)++;
+       ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
        WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
        _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
        smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
@@ -3226,7 +3304,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
 
        /* Increment ->n_barrier_done to prevent duplicate work. */
        smp_mb(); /* Keep increment after above mechanism. */
-       ACCESS_ONCE(rsp->n_barrier_done)++;
+       ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
        WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
        _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
        smp_mb(); /* Keep increment before caller's subsequent code. */
@@ -3483,14 +3561,17 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp)
 static void __init rcu_init_one(struct rcu_state *rsp,
                struct rcu_data __percpu *rda)
 {
-       static char *buf[] = { "rcu_node_0",
-                              "rcu_node_1",
-                              "rcu_node_2",
-                              "rcu_node_3" };  /* Match MAX_RCU_LVLS */
-       static char *fqs[] = { "rcu_node_fqs_0",
-                              "rcu_node_fqs_1",
-                              "rcu_node_fqs_2",
-                              "rcu_node_fqs_3" };  /* Match MAX_RCU_LVLS */
+       static const char * const buf[] = {
+               "rcu_node_0",
+               "rcu_node_1",
+               "rcu_node_2",
+               "rcu_node_3" };  /* Match MAX_RCU_LVLS */
+       static const char * const fqs[] = {
+               "rcu_node_fqs_0",
+               "rcu_node_fqs_1",
+               "rcu_node_fqs_2",
+               "rcu_node_fqs_3" };  /* Match MAX_RCU_LVLS */
+       static u8 fl_mask = 0x1;
        int cpustride = 1;
        int i;
        int j;
@@ -3509,6 +3590,8 @@ static void __init rcu_init_one(struct rcu_state *rsp,
        for (i = 1; i < rcu_num_lvls; i++)
                rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
        rcu_init_levelspread(rsp);
+       rsp->flavor_mask = fl_mask;
+       fl_mask <<= 1;
 
        /* Initialize the elements themselves, starting from the leaves. */
 
index bf2c1e6..71e64c7 100644 (file)
@@ -172,6 +172,14 @@ struct rcu_node {
                                /*  queued on this rcu_node structure that */
                                /*  are blocking the current grace period, */
                                /*  there can be no such task. */
+       struct completion boost_completion;
+                               /* Used to ensure that the rt_mutex used */
+                               /*  to carry out the boosting is fully */
+                               /*  released with no future boostee accesses */
+                               /*  before that rt_mutex is re-initialized. */
+       struct rt_mutex boost_mtx;
+                               /* Used only for the priority-boosting */
+                               /*  side effect, not as a lock. */
        unsigned long boost_time;
                                /* When to start boosting (jiffies). */
        struct task_struct *boost_kthread_task;
@@ -307,6 +315,9 @@ struct rcu_data {
        /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
        unsigned long dynticks_fqs;     /* Kicked due to dynticks idle. */
        unsigned long offline_fqs;      /* Kicked due to being offline. */
+       unsigned long cond_resched_completed;
+                                       /* Grace period that needs help */
+                                       /*  from cond_resched(). */
 
        /* 5) __rcu_pending() statistics. */
        unsigned long n_rcu_pending;    /* rcu_pending() calls since boot. */
@@ -331,11 +342,29 @@ struct rcu_data {
        struct rcu_head **nocb_tail;
        atomic_long_t nocb_q_count;     /* # CBs waiting for kthread */
        atomic_long_t nocb_q_count_lazy; /*  (approximate). */
+       struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */
+       struct rcu_head **nocb_follower_tail;
+       atomic_long_t nocb_follower_count; /* # CBs ready to invoke. */
+       atomic_long_t nocb_follower_count_lazy; /*  (approximate). */
        int nocb_p_count;               /* # CBs being invoked by kthread */
        int nocb_p_count_lazy;          /*  (approximate). */
        wait_queue_head_t nocb_wq;      /* For nocb kthreads to sleep on. */
        struct task_struct *nocb_kthread;
        bool nocb_defer_wakeup;         /* Defer wakeup of nocb_kthread. */
+
+       /* The following fields are used by the leader, hence own cacheline. */
+       struct rcu_head *nocb_gp_head ____cacheline_internodealigned_in_smp;
+                                       /* CBs waiting for GP. */
+       struct rcu_head **nocb_gp_tail;
+       long nocb_gp_count;
+       long nocb_gp_count_lazy;
+       bool nocb_leader_wake;          /* Is the nocb leader thread awake? */
+       struct rcu_data *nocb_next_follower;
+                                       /* Next follower in wakeup chain. */
+
+       /* The following fields are used by the follower, hence new cachline. */
+       struct rcu_data *nocb_leader ____cacheline_internodealigned_in_smp;
+                                       /* Leader CPU takes GP-end wakeups. */
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 
        /* 8) RCU CPU stall data. */
@@ -392,6 +421,7 @@ struct rcu_state {
        struct rcu_node *level[RCU_NUM_LVLS];   /* Hierarchy levels. */
        u32 levelcnt[MAX_RCU_LVLS + 1];         /* # nodes in each level. */
        u8 levelspread[RCU_NUM_LVLS];           /* kids/node in each level. */
+       u8 flavor_mask;                         /* bit in flavor mask. */
        struct rcu_data __percpu *rda;          /* pointer of percu rcu_data. */
        void (*call)(struct rcu_head *head,     /* call_rcu() flavor. */
                     void (*func)(struct rcu_head *head));
@@ -563,7 +593,7 @@ static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
 static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
 static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
-static void rcu_kick_nohz_cpu(int cpu);
+static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
 static bool init_nocb_callback_list(struct rcu_data *rdp);
 static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
 static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
@@ -583,8 +613,14 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp);
 /* Sum up queue lengths for tracing. */
 static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
 {
-       *ql = atomic_long_read(&rdp->nocb_q_count) + rdp->nocb_p_count;
-       *qll = atomic_long_read(&rdp->nocb_q_count_lazy) + rdp->nocb_p_count_lazy;
+       *ql = atomic_long_read(&rdp->nocb_q_count) +
+             rdp->nocb_p_count +
+             atomic_long_read(&rdp->nocb_follower_count) +
+             rdp->nocb_p_count + rdp->nocb_gp_count;
+       *qll = atomic_long_read(&rdp->nocb_q_count_lazy) +
+              rdp->nocb_p_count_lazy +
+              atomic_long_read(&rdp->nocb_follower_count_lazy) +
+              rdp->nocb_p_count_lazy + rdp->nocb_gp_count_lazy;
 }
 #else /* #ifdef CONFIG_RCU_NOCB_CPU */
 static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
index cbc2c45..00dc411 100644 (file)
@@ -33,6 +33,7 @@
 #define RCU_KTHREAD_PRIO 1
 
 #ifdef CONFIG_RCU_BOOST
+#include "../locking/rtmutex_common.h"
 #define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
 #else
 #define RCU_BOOST_PRIO RCU_KTHREAD_PRIO
@@ -336,7 +337,7 @@ void rcu_read_unlock_special(struct task_struct *t)
        unsigned long flags;
        struct list_head *np;
 #ifdef CONFIG_RCU_BOOST
-       struct rt_mutex *rbmp = NULL;
+       bool drop_boost_mutex = false;
 #endif /* #ifdef CONFIG_RCU_BOOST */
        struct rcu_node *rnp;
        int special;
@@ -398,11 +399,8 @@ void rcu_read_unlock_special(struct task_struct *t)
 #ifdef CONFIG_RCU_BOOST
                if (&t->rcu_node_entry == rnp->boost_tasks)
                        rnp->boost_tasks = np;
-               /* Snapshot/clear ->rcu_boost_mutex with rcu_node lock held. */
-               if (t->rcu_boost_mutex) {
-                       rbmp = t->rcu_boost_mutex;
-                       t->rcu_boost_mutex = NULL;
-               }
+               /* Snapshot ->boost_mtx ownership with rcu_node lock held. */
+               drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
                /*
@@ -427,8 +425,10 @@ void rcu_read_unlock_special(struct task_struct *t)
 
 #ifdef CONFIG_RCU_BOOST
                /* Unboost if we were boosted. */
-               if (rbmp)
-                       rt_mutex_unlock(rbmp);
+               if (drop_boost_mutex) {
+                       rt_mutex_unlock(&rnp->boost_mtx);
+                       complete(&rnp->boost_completion);
+               }
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
                /*
@@ -988,6 +988,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp)
 
 /* Because preemptible RCU does not exist, no quieting of tasks. */
 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
+       __releases(rnp->lock)
 {
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
@@ -1149,7 +1150,6 @@ static void rcu_wake_cond(struct task_struct *t, int status)
 static int rcu_boost(struct rcu_node *rnp)
 {
        unsigned long flags;
-       struct rt_mutex mtx;
        struct task_struct *t;
        struct list_head *tb;
 
@@ -1200,11 +1200,15 @@ static int rcu_boost(struct rcu_node *rnp)
         * section.
         */
        t = container_of(tb, struct task_struct, rcu_node_entry);
-       rt_mutex_init_proxy_locked(&mtx, t);
-       t->rcu_boost_mutex = &mtx;
+       rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
+       init_completion(&rnp->boost_completion);
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
-       rt_mutex_lock(&mtx);  /* Side effect: boosts task t's priority. */
-       rt_mutex_unlock(&mtx);  /* Keep lockdep happy. */
+       /* Lock only for side effect: boosts task t's priority. */
+       rt_mutex_lock(&rnp->boost_mtx);
+       rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
+
+       /* Wait for boostee to be done w/boost_mtx before reinitializing. */
+       wait_for_completion(&rnp->boost_completion);
 
        return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
               ACCESS_ONCE(rnp->boost_tasks) != NULL;
@@ -1256,6 +1260,7 @@ static int rcu_boost_kthread(void *arg)
  * about it going away.
  */
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
+       __releases(rnp->lock)
 {
        struct task_struct *t;
 
@@ -1491,6 +1496,7 @@ static void rcu_prepare_kthreads(int cpu)
 #else /* #ifdef CONFIG_RCU_BOOST */
 
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
+       __releases(rnp->lock)
 {
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
@@ -2059,6 +2065,22 @@ bool rcu_is_nocb_cpu(int cpu)
 }
 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 
+/*
+ * Kick the leader kthread for this NOCB group.
+ */
+static void wake_nocb_leader(struct rcu_data *rdp, bool force)
+{
+       struct rcu_data *rdp_leader = rdp->nocb_leader;
+
+       if (!ACCESS_ONCE(rdp_leader->nocb_kthread))
+               return;
+       if (!ACCESS_ONCE(rdp_leader->nocb_leader_wake) || force) {
+               /* Prior xchg orders against prior callback enqueue. */
+               ACCESS_ONCE(rdp_leader->nocb_leader_wake) = true;
+               wake_up(&rdp_leader->nocb_wq);
+       }
+}
+
 /*
  * Enqueue the specified string of rcu_head structures onto the specified
  * CPU's no-CBs lists.  The CPU is specified by rdp, the head of the
@@ -2093,7 +2115,8 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
        len = atomic_long_read(&rdp->nocb_q_count);
        if (old_rhpp == &rdp->nocb_head) {
                if (!irqs_disabled_flags(flags)) {
-                       wake_up(&rdp->nocb_wq); /* ... if queue was empty ... */
+                       /* ... if queue was empty ... */
+                       wake_nocb_leader(rdp, false);
                        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
                                            TPS("WakeEmpty"));
                } else {
@@ -2103,7 +2126,8 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
                }
                rdp->qlen_last_fqs_check = 0;
        } else if (len > rdp->qlen_last_fqs_check + qhimark) {
-               wake_up_process(t); /* ... or if many callbacks queued. */
+               /* ... or if many callbacks queued. */
+               wake_nocb_leader(rdp, true);
                rdp->qlen_last_fqs_check = LONG_MAX / 2;
                trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("WakeOvf"));
        } else {
@@ -2212,14 +2236,151 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
        smp_mb(); /* Ensure that CB invocation happens after GP end. */
 }
 
+/*
+ * Leaders come here to wait for additional callbacks to show up.
+ * This function does not return until callbacks appear.
+ */
+static void nocb_leader_wait(struct rcu_data *my_rdp)
+{
+       bool firsttime = true;
+       bool gotcbs;
+       struct rcu_data *rdp;
+       struct rcu_head **tail;
+
+wait_again:
+
+       /* Wait for callbacks to appear. */
+       if (!rcu_nocb_poll) {
+               trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
+               wait_event_interruptible(my_rdp->nocb_wq,
+                                        ACCESS_ONCE(my_rdp->nocb_leader_wake));
+               /* Memory barrier handled by smp_mb() calls below and repoll. */
+       } else if (firsttime) {
+               firsttime = false; /* Don't drown trace log with "Poll"! */
+               trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Poll");
+       }
+
+       /*
+        * Each pass through the following loop checks a follower for CBs.
+        * We are our own first follower.  Any CBs found are moved to
+        * nocb_gp_head, where they await a grace period.
+        */
+       gotcbs = false;
+       for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
+               rdp->nocb_gp_head = ACCESS_ONCE(rdp->nocb_head);
+               if (!rdp->nocb_gp_head)
+                       continue;  /* No CBs here, try next follower. */
+
+               /* Move callbacks to wait-for-GP list, which is empty. */
+               ACCESS_ONCE(rdp->nocb_head) = NULL;
+               rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
+               rdp->nocb_gp_count = atomic_long_xchg(&rdp->nocb_q_count, 0);
+               rdp->nocb_gp_count_lazy =
+                       atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
+               gotcbs = true;
+       }
+
+       /*
+        * If there were no callbacks, sleep a bit, rescan after a
+        * memory barrier, and go retry.
+        */
+       if (unlikely(!gotcbs)) {
+               if (!rcu_nocb_poll)
+                       trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu,
+                                           "WokeEmpty");
+               flush_signals(current);
+               schedule_timeout_interruptible(1);
+
+               /* Rescan in case we were a victim of memory ordering. */
+               my_rdp->nocb_leader_wake = false;
+               smp_mb();  /* Ensure _wake false before scan. */
+               for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower)
+                       if (ACCESS_ONCE(rdp->nocb_head)) {
+                               /* Found CB, so short-circuit next wait. */
+                               my_rdp->nocb_leader_wake = true;
+                               break;
+                       }
+               goto wait_again;
+       }
+
+       /* Wait for one grace period. */
+       rcu_nocb_wait_gp(my_rdp);
+
+       /*
+        * We left ->nocb_leader_wake set to reduce cache thrashing.
+        * We clear it now, but recheck for new callbacks while
+        * traversing our follower list.
+        */
+       my_rdp->nocb_leader_wake = false;
+       smp_mb(); /* Ensure _wake false before scan of ->nocb_head. */
+
+       /* Each pass through the following loop wakes a follower, if needed. */
+       for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
+               if (ACCESS_ONCE(rdp->nocb_head))
+                       my_rdp->nocb_leader_wake = true; /* No need to wait. */
+               if (!rdp->nocb_gp_head)
+                       continue; /* No CBs, so no need to wake follower. */
+
+               /* Append callbacks to follower's "done" list. */
+               tail = xchg(&rdp->nocb_follower_tail, rdp->nocb_gp_tail);
+               *tail = rdp->nocb_gp_head;
+               atomic_long_add(rdp->nocb_gp_count, &rdp->nocb_follower_count);
+               atomic_long_add(rdp->nocb_gp_count_lazy,
+                               &rdp->nocb_follower_count_lazy);
+               if (rdp != my_rdp && tail == &rdp->nocb_follower_head) {
+                       /*
+                        * List was empty, wake up the follower.
+                        * Memory barriers supplied by atomic_long_add().
+                        */
+                       wake_up(&rdp->nocb_wq);
+               }
+       }
+
+       /* If we (the leader) don't have CBs, go wait some more. */
+       if (!my_rdp->nocb_follower_head)
+               goto wait_again;
+}
+
+/*
+ * Followers come here to wait for additional callbacks to show up.
+ * This function does not return until callbacks appear.
+ */
+static void nocb_follower_wait(struct rcu_data *rdp)
+{
+       bool firsttime = true;
+
+       for (;;) {
+               if (!rcu_nocb_poll) {
+                       trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+                                           "FollowerSleep");
+                       wait_event_interruptible(rdp->nocb_wq,
+                                                ACCESS_ONCE(rdp->nocb_follower_head));
+               } else if (firsttime) {
+                       /* Don't drown trace log with "Poll"! */
+                       firsttime = false;
+                       trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "Poll");
+               }
+               if (smp_load_acquire(&rdp->nocb_follower_head)) {
+                       /* ^^^ Ensure CB invocation follows _head test. */
+                       return;
+               }
+               if (!rcu_nocb_poll)
+                       trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
+                                           "WokeEmpty");
+               flush_signals(current);
+               schedule_timeout_interruptible(1);
+       }
+}
+
 /*
  * Per-rcu_data kthread, but only for no-CBs CPUs.  Each kthread invokes
- * callbacks queued by the corresponding no-CBs CPU.
+ * callbacks queued by the corresponding no-CBs CPU, however, there is
+ * an optional leader-follower relationship so that the grace-period
+ * kthreads don't have to do quite so many wakeups.
  */
 static int rcu_nocb_kthread(void *arg)
 {
        int c, cl;
-       bool firsttime = 1;
        struct rcu_head *list;
        struct rcu_head *next;
        struct rcu_head **tail;
@@ -2227,41 +2388,22 @@ static int rcu_nocb_kthread(void *arg)
 
        /* Each pass through this loop invokes one batch of callbacks */
        for (;;) {
-               /* If not polling, wait for next batch of callbacks. */
-               if (!rcu_nocb_poll) {
-                       trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
-                                           TPS("Sleep"));
-                       wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
-                       /* Memory barrier provide by xchg() below. */
-               } else if (firsttime) {
-                       firsttime = 0;
-                       trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
-                                           TPS("Poll"));
-               }
-               list = ACCESS_ONCE(rdp->nocb_head);
-               if (!list) {
-                       if (!rcu_nocb_poll)
-                               trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
-                                                   TPS("WokeEmpty"));
-                       schedule_timeout_interruptible(1);
-                       flush_signals(current);
-                       continue;
-               }
-               firsttime = 1;
-               trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
-                                   TPS("WokeNonEmpty"));
-
-               /*
-                * Extract queued callbacks, update counts, and wait
-                * for a grace period to elapse.
-                */
-               ACCESS_ONCE(rdp->nocb_head) = NULL;
-               tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
-               c = atomic_long_xchg(&rdp->nocb_q_count, 0);
-               cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
-               ACCESS_ONCE(rdp->nocb_p_count) += c;
-               ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
-               rcu_nocb_wait_gp(rdp);
+               /* Wait for callbacks. */
+               if (rdp->nocb_leader == rdp)
+                       nocb_leader_wait(rdp);
+               else
+                       nocb_follower_wait(rdp);
+
+               /* Pull the ready-to-invoke callbacks onto local list. */
+               list = ACCESS_ONCE(rdp->nocb_follower_head);
+               BUG_ON(!list);
+               trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
+               ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
+               tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
+               c = atomic_long_xchg(&rdp->nocb_follower_count, 0);
+               cl = atomic_long_xchg(&rdp->nocb_follower_count_lazy, 0);
+               rdp->nocb_p_count += c;
+               rdp->nocb_p_count_lazy += cl;
 
                /* Each pass through the following loop invokes a callback. */
                trace_rcu_batch_start(rdp->rsp->name, cl, c, -1);
@@ -2305,7 +2447,7 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
        if (!rcu_nocb_need_deferred_wakeup(rdp))
                return;
        ACCESS_ONCE(rdp->nocb_defer_wakeup) = false;
-       wake_up(&rdp->nocb_wq);
+       wake_nocb_leader(rdp, false);
        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty"));
 }
 
@@ -2314,19 +2456,57 @@ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
 {
        rdp->nocb_tail = &rdp->nocb_head;
        init_waitqueue_head(&rdp->nocb_wq);
+       rdp->nocb_follower_tail = &rdp->nocb_follower_head;
 }
 
-/* Create a kthread for each RCU flavor for each no-CBs CPU. */
+/* How many follower CPU IDs per leader?  Default of -1 for sqrt(nr_cpu_ids). */
+static int rcu_nocb_leader_stride = -1;
+module_param(rcu_nocb_leader_stride, int, 0444);
+
+/*
+ * Create a kthread for each RCU flavor for each no-CBs CPU.
+ * Also initialize leader-follower relationships.
+ */
 static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
 {
        int cpu;
+       int ls = rcu_nocb_leader_stride;
+       int nl = 0;  /* Next leader. */
        struct rcu_data *rdp;
+       struct rcu_data *rdp_leader = NULL;  /* Suppress misguided gcc warn. */
+       struct rcu_data *rdp_prev = NULL;
        struct task_struct *t;
 
        if (rcu_nocb_mask == NULL)
                return;
+#if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL)
+       if (tick_nohz_full_running)
+               cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask);
+#endif /* #if defined(CONFIG_NO_HZ_FULL) && !defined(CONFIG_NO_HZ_FULL_ALL) */
+       if (ls == -1) {
+               ls = int_sqrt(nr_cpu_ids);
+               rcu_nocb_leader_stride = ls;
+       }
+
+       /*
+        * Each pass through this loop sets up one rcu_data structure and
+        * spawns one rcu_nocb_kthread().
+        */
        for_each_cpu(cpu, rcu_nocb_mask) {
                rdp = per_cpu_ptr(rsp->rda, cpu);
+               if (rdp->cpu >= nl) {
+                       /* New leader, set up for followers & next leader. */
+                       nl = DIV_ROUND_UP(rdp->cpu + 1, ls) * ls;
+                       rdp->nocb_leader = rdp;
+                       rdp_leader = rdp;
+               } else {
+                       /* Another follower, link to previous leader. */
+                       rdp->nocb_leader = rdp_leader;
+                       rdp_prev->nocb_next_follower = rdp;
+               }
+               rdp_prev = rdp;
+
+               /* Spawn the kthread for this CPU. */
                t = kthread_run(rcu_nocb_kthread, rdp,
                                "rcuo%c/%d", rsp->abbr, cpu);
                BUG_ON(IS_ERR(t));
@@ -2404,7 +2584,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
  * if an adaptive-ticks CPU is failing to respond to the current grace
  * period and has not be idle from an RCU perspective, kick it.
  */
-static void rcu_kick_nohz_cpu(int cpu)
+static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
 {
 #ifdef CONFIG_NO_HZ_FULL
        if (tick_nohz_full_cpu(cpu))
@@ -2843,12 +3023,16 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
  */
 static void rcu_bind_gp_kthread(void)
 {
-#ifdef CONFIG_NO_HZ_FULL
-       int cpu = ACCESS_ONCE(tick_do_timer_cpu);
+       int __maybe_unused cpu;
 
-       if (cpu < 0 || cpu >= nr_cpu_ids)
+       if (!tick_nohz_full_enabled())
                return;
-       if (raw_smp_processor_id() != cpu)
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+       cpu = tick_do_timer_cpu;
+       if (cpu >= 0 && cpu < nr_cpu_ids && raw_smp_processor_id() != cpu)
                set_cpus_allowed_ptr(current, cpumask_of(cpu));
-#endif /* #ifdef CONFIG_NO_HZ_FULL */
+#else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+       if (!is_housekeeping_cpu(raw_smp_processor_id()))
+               housekeeping_affine(current);
+#endif /* #else #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
 }
index a2aeb4d..4056d79 100644 (file)
@@ -90,9 +90,6 @@ void __rcu_read_unlock(void)
        } else {
                barrier();  /* critical section before exit code. */
                t->rcu_read_lock_nesting = INT_MIN;
-#ifdef CONFIG_PROVE_RCU_DELAY
-               udelay(10); /* Make preemption more probable. */
-#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
                barrier();  /* assign before ->rcu_read_unlock_special load */
                if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
                        rcu_read_unlock_special(t);
@@ -200,12 +197,12 @@ void wait_rcu_gp(call_rcu_func_t crf)
 EXPORT_SYMBOL_GPL(wait_rcu_gp);
 
 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
-static inline void debug_init_rcu_head(struct rcu_head *head)
+void init_rcu_head(struct rcu_head *head)
 {
        debug_object_init(head, &rcuhead_debug_descr);
 }
 
-static inline void debug_rcu_head_free(struct rcu_head *head)
+void destroy_rcu_head(struct rcu_head *head)
 {
        debug_object_free(head, &rcuhead_debug_descr);
 }
@@ -350,21 +347,3 @@ static int __init check_cpu_stall_init(void)
 early_initcall(check_cpu_stall_init);
 
 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
-
-/*
- * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
- */
-
-DEFINE_PER_CPU(int, rcu_cond_resched_count);
-
-/*
- * Report a set of RCU quiescent states, for use by cond_resched()
- * and friends.  Out of line due to being called infrequently.
- */
-void rcu_resched(void)
-{
-       preempt_disable();
-       __this_cpu_write(rcu_cond_resched_count, 0);
-       rcu_note_context_switch(smp_processor_id());
-       preempt_enable();
-}
index 3bdf01b..1211575 100644 (file)
@@ -139,6 +139,8 @@ void update_rq_clock(struct rq *rq)
                return;
 
        delta = sched_clock_cpu(cpu_of(rq)) - rq->clock;
+       if (delta < 0)
+               return;
        rq->clock += delta;
        update_rq_clock_task(rq, delta);
 }
@@ -243,6 +245,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
        char buf[64];
        char *cmp;
        int i;
+       struct inode *inode;
 
        if (cnt > 63)
                cnt = 63;
@@ -253,7 +256,11 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
        buf[cnt] = 0;
        cmp = strstrip(buf);
 
+       /* Ensure the static_key remains in a consistent state */
+       inode = file_inode(filp);
+       mutex_lock(&inode->i_mutex);
        i = sched_feat_set(cmp);
+       mutex_unlock(&inode->i_mutex);
        if (i == __SCHED_FEAT_NR)
                return -EINVAL;
 
@@ -587,30 +594,31 @@ static bool set_nr_if_polling(struct task_struct *p)
 #endif
 
 /*
- * resched_task - mark a task 'to be rescheduled now'.
+ * resched_curr - mark rq's current task 'to be rescheduled now'.
  *
  * On UP this means the setting of the need_resched flag, on SMP it
  * might also involve a cross-CPU call to trigger the scheduler on
  * the target CPU.
  */
-void resched_task(struct task_struct *p)
+void resched_curr(struct rq *rq)
 {
+       struct task_struct *curr = rq->curr;
        int cpu;
 
-       lockdep_assert_held(&task_rq(p)->lock);
+       lockdep_assert_held(&rq->lock);
 
-       if (test_tsk_need_resched(p))
+       if (test_tsk_need_resched(curr))
                return;
 
-       cpu = task_cpu(p);
+       cpu = cpu_of(rq);
 
        if (cpu == smp_processor_id()) {
-               set_tsk_need_resched(p);
+               set_tsk_need_resched(curr);
                set_preempt_need_resched();
                return;
        }
 
-       if (set_nr_and_not_polling(p))
+       if (set_nr_and_not_polling(curr))
                smp_send_reschedule(cpu);
        else
                trace_sched_wake_idle_without_ipi(cpu);
@@ -623,7 +631,7 @@ void resched_cpu(int cpu)
 
        if (!raw_spin_trylock_irqsave(&rq->lock, flags))
                return;
-       resched_task(cpu_curr(cpu));
+       resched_curr(rq);
        raw_spin_unlock_irqrestore(&rq->lock, flags);
 }
 
@@ -684,10 +692,16 @@ static void wake_up_idle_cpu(int cpu)
 
 static bool wake_up_full_nohz_cpu(int cpu)
 {
+       /*
+        * We just need the target to call irq_exit() and re-evaluate
+        * the next tick. The nohz full kick at least implies that.
+        * If needed we can still optimize that later with an
+        * empty IRQ.
+        */
        if (tick_nohz_full_cpu(cpu)) {
                if (cpu != smp_processor_id() ||
                    tick_nohz_tick_stopped())
-                       smp_send_reschedule(cpu);
+                       tick_nohz_full_kick_cpu(cpu);
                return true;
        }
 
@@ -730,18 +744,15 @@ static inline bool got_nohz_idle_kick(void)
 #ifdef CONFIG_NO_HZ_FULL
 bool sched_can_stop_tick(void)
 {
-       struct rq *rq;
-
-       rq = this_rq();
-
-       /* Make sure rq->nr_running update is visible after the IPI */
-       smp_rmb();
-
-       /* More than one running task need preemption */
-       if (rq->nr_running > 1)
-               return false;
+       /*
+        * More than one running task need preemption.
+        * nr_running update is assumed to be visible
+        * after IPI is sent from wakers.
+        */
+       if (this_rq()->nr_running > 1)
+               return false;
 
-       return true;
+       return true;
 }
 #endif /* CONFIG_NO_HZ_FULL */
 
@@ -1022,7 +1033,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
                        if (class == rq->curr->sched_class)
                                break;
                        if (class == p->sched_class) {
-                               resched_task(rq->curr);
+                               resched_curr(rq);
                                break;
                        }
                }
@@ -1568,9 +1579,7 @@ void scheduler_ipi(void)
         */
        preempt_fold_need_resched();
 
-       if (llist_empty(&this_rq()->wake_list)
-                       && !tick_nohz_full_cpu(smp_processor_id())
-                       && !got_nohz_idle_kick())
+       if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
                return;
 
        /*
@@ -1587,7 +1596,6 @@ void scheduler_ipi(void)
         * somewhat pessimize the simple resched case.
         */
        irq_enter();
-       tick_nohz_full_check();
        sched_ttwu_pending();
 
        /*
@@ -2431,7 +2439,12 @@ static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
 {
        u64 ns = 0;
 
-       if (task_current(rq, p)) {
+       /*
+        * Must be ->curr _and_ ->on_rq.  If dequeued, we would
+        * project cycles that may never be accounted to this
+        * thread, breaking clock_gettime().
+        */
+       if (task_current(rq, p) && p->on_rq) {
                update_rq_clock(rq);
                ns = rq_clock_task(rq) - p->se.exec_start;
                if ((s64)ns < 0)
@@ -2474,8 +2487,10 @@ unsigned long long task_sched_runtime(struct task_struct *p)
         * If we race with it leaving cpu, we'll take a lock. So we're correct.
         * If we race with it entering cpu, unaccounted time is 0. This is
         * indistinguishable from the read occurring a few cycles earlier.
+        * If we see ->on_cpu without ->on_rq, the task is leaving, and has
+        * been accounted, so we're correct here as well.
         */
-       if (!p->on_cpu)
+       if (!p->on_cpu || !p->on_rq)
                return p->se.sum_exec_runtime;
 #endif
 
@@ -2971,7 +2986,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
        }
 
        trace_sched_pi_setprio(p, prio);
-       p->pi_top_task = rt_mutex_get_top_task(p);
        oldprio = p->prio;
        prev_class = p->sched_class;
        on_rq = p->on_rq;
@@ -2991,8 +3005,9 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
         *          running task
         */
        if (dl_prio(prio)) {
-               if (!dl_prio(p->normal_prio) || (p->pi_top_task &&
-                       dl_entity_preempt(&p->pi_top_task->dl, &p->dl))) {
+               struct task_struct *pi_task = rt_mutex_get_top_task(p);
+               if (!dl_prio(p->normal_prio) ||
+                   (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
                        p->dl.dl_boosted = 1;
                        p->dl.dl_throttled = 0;
                        enqueue_flag = ENQUEUE_REPLENISH;
@@ -3064,7 +3079,7 @@ void set_user_nice(struct task_struct *p, long nice)
                 * lowered its priority, then reschedule its CPU:
                 */
                if (delta < 0 || (delta > 0 && task_running(rq, p)))
-                       resched_task(rq->curr);
+                       resched_curr(rq);
        }
 out_unlock:
        task_rq_unlock(rq, p, &flags);
@@ -3203,12 +3218,18 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
        dl_se->dl_yielded = 0;
 }
 
+/*
+ * sched_setparam() passes in -1 for its policy, to let the functions
+ * it calls know not to change it.
+ */
+#define SETPARAM_POLICY        -1
+
 static void __setscheduler_params(struct task_struct *p,
                const struct sched_attr *attr)
 {
        int policy = attr->sched_policy;
 
-       if (policy == -1) /* setparam */
+       if (policy == SETPARAM_POLICY)
                policy = p->policy;
 
        p->policy = policy;
@@ -3557,10 +3578,8 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
                .sched_nice     = PRIO_TO_NICE(p->static_prio),
        };
 
-       /*
-        * Fixup the legacy SCHED_RESET_ON_FORK hack
-        */
-       if (policy & SCHED_RESET_ON_FORK) {
+       /* Fixup the legacy SCHED_RESET_ON_FORK hack. */
+       if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
                attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
                policy &= ~SCHED_RESET_ON_FORK;
                attr.sched_policy = policy;
@@ -3730,7 +3749,7 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
  */
 SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
 {
-       return do_sched_setscheduler(pid, -1, param);
+       return do_sched_setscheduler(pid, SETPARAM_POLICY, param);
 }
 
 /**
@@ -4147,7 +4166,6 @@ static void __cond_resched(void)
 
 int __sched _cond_resched(void)
 {
-       rcu_cond_resched();
        if (should_resched()) {
                __cond_resched();
                return 1;
@@ -4166,18 +4184,15 @@ EXPORT_SYMBOL(_cond_resched);
  */
 int __cond_resched_lock(spinlock_t *lock)
 {
-       bool need_rcu_resched = rcu_should_resched();
        int resched = should_resched();
        int ret = 0;
 
        lockdep_assert_held(lock);
 
-       if (spin_needbreak(lock) || resched || need_rcu_resched) {
+       if (spin_needbreak(lock) || resched) {
                spin_unlock(lock);
                if (resched)
                        __cond_resched();
-               else if (unlikely(need_rcu_resched))
-                       rcu_resched();
                else
                        cpu_relax();
                ret = 1;
@@ -4191,7 +4206,6 @@ int __sched __cond_resched_softirq(void)
 {
        BUG_ON(!in_softirq());
 
-       rcu_cond_resched();  /* BH disabled OK, just recording QSes. */
        if (should_resched()) {
                local_bh_enable();
                __cond_resched();
@@ -4290,7 +4304,7 @@ again:
                 * fairness.
                 */
                if (preempt && rq != p_rq)
-                       resched_task(p_rq->curr);
+                       resched_curr(p_rq);
        }
 
 out_unlock:
@@ -6470,6 +6484,20 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
                sched_domain_level_max = max(sched_domain_level_max, sd->level);
                child->parent = sd;
                sd->child = child;
+
+               if (!cpumask_subset(sched_domain_span(child),
+                                   sched_domain_span(sd))) {
+                       pr_err("BUG: arch topology borken\n");
+#ifdef CONFIG_SCHED_DEBUG
+                       pr_err("     the %s domain not a subset of the %s domain\n",
+                                       child->name, sd->name);
+#endif
+                       /* Fixup, ensure @sd has at least @child cpus. */
+                       cpumask_or(sched_domain_span(sd),
+                                  sched_domain_span(sd),
+                                  sched_domain_span(child));
+               }
+
        }
        set_domain_attribute(sd, attr);
 
@@ -7097,7 +7125,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
        __setscheduler(rq, p, &attr);
        if (on_rq) {
                enqueue_task(rq, p, 0);
-               resched_task(rq->curr);
+               resched_curr(rq);
        }
 
        check_class_changed(rq, p, prev_class, old_prio);
@@ -7808,6 +7836,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
        if (period > max_cfs_quota_period)
                return -EINVAL;
 
+       /*
+        * Prevent race between setting of cfs_rq->runtime_enabled and
+        * unthrottle_offline_cfs_rqs().
+        */
+       get_online_cpus();
        mutex_lock(&cfs_constraints_mutex);
        ret = __cfs_schedulable(tg, period, quota);
        if (ret)
@@ -7833,7 +7866,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
        }
        raw_spin_unlock_irq(&cfs_b->lock);
 
-       for_each_possible_cpu(i) {
+       for_each_online_cpu(i) {
                struct cfs_rq *cfs_rq = tg->cfs_rq[i];
                struct rq *rq = cfs_rq->rq;
 
@@ -7849,6 +7882,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
                cfs_bandwidth_usage_dec();
 out_unlock:
        mutex_unlock(&cfs_constraints_mutex);
+       put_online_cpus();
 
        return ret;
 }
@@ -8088,7 +8122,7 @@ struct cgroup_subsys cpu_cgrp_subsys = {
        .can_attach     = cpu_cgroup_can_attach,
        .attach         = cpu_cgroup_attach,
        .exit           = cpu_cgroup_exit,
-       .base_cftypes   = cpu_files,
+       .legacy_cftypes = cpu_files,
        .early_init     = 1,
 };
 
index 9cf350c..dd7cbb5 100644 (file)
@@ -278,6 +278,6 @@ void cpuacct_account_field(struct task_struct *p, int index, u64 val)
 struct cgroup_subsys cpuacct_cgrp_subsys = {
        .css_alloc      = cpuacct_css_alloc,
        .css_free       = cpuacct_css_free,
-       .base_cftypes   = files,
+       .legacy_cftypes = files,
        .early_init     = 1,
 };
index fc4f98b..255ce13 100644 (file)
@@ -306,7 +306,7 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
  * the overrunning entity can't interfere with other entity in the system and
  * can't make them miss their deadlines. Reasons why this kind of overruns
  * could happen are, typically, a entity voluntarily trying to overcome its
- * runtime, or it just underestimated it during sched_setscheduler_ex().
+ * runtime, or it just underestimated it during sched_setattr().
  */
 static void replenish_dl_entity(struct sched_dl_entity *dl_se,
                                struct sched_dl_entity *pi_se)
@@ -535,7 +535,7 @@ again:
                if (task_has_dl_policy(rq->curr))
                        check_preempt_curr_dl(rq, p, 0);
                else
-                       resched_task(rq->curr);
+                       resched_curr(rq);
 #ifdef CONFIG_SMP
                /*
                 * Queueing this task back might have overloaded rq,
@@ -634,7 +634,7 @@ static void update_curr_dl(struct rq *rq)
                        enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
 
                if (!is_leftmost(curr, &rq->dl))
-                       resched_task(curr);
+                       resched_curr(rq);
        }
 
        /*
@@ -964,7 +964,7 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
            cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
                return;
 
-       resched_task(rq->curr);
+       resched_curr(rq);
 }
 
 static int pull_dl_task(struct rq *this_rq);
@@ -979,7 +979,7 @@ static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
                                  int flags)
 {
        if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
-               resched_task(rq->curr);
+               resched_curr(rq);
                return;
        }
 
@@ -1333,7 +1333,7 @@ retry:
        if (dl_task(rq->curr) &&
            dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
            rq->curr->nr_cpus_allowed > 1) {
-               resched_task(rq->curr);
+               resched_curr(rq);
                return 0;
        }
 
@@ -1373,7 +1373,7 @@ retry:
        set_task_cpu(next_task, later_rq->cpu);
        activate_task(later_rq, next_task, 0);
 
-       resched_task(later_rq->curr);
+       resched_curr(later_rq);
 
        double_unlock_balance(rq, later_rq);
 
@@ -1632,14 +1632,14 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
                 */
                if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
                    rq->curr == p)
-                       resched_task(p);
+                       resched_curr(rq);
 #else
                /*
                 * Again, we don't know if p has a earlier
                 * or later deadline, so let's blindly set a
                 * (maybe not needed) rescheduling point.
                 */
-               resched_task(p);
+               resched_curr(rq);
 #endif /* CONFIG_SMP */
        } else
                switched_to_dl(rq, p);
index 695f977..627b3c3 100644 (file)
@@ -608,7 +608,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 
                avg_atom = p->se.sum_exec_runtime;
                if (nr_switches)
-                       do_div(avg_atom, nr_switches);
+                       avg_atom = div64_ul(avg_atom, nr_switches);
                else
                        avg_atom = -1LL;
 
index fea7d33..bfa3c86 100644 (file)
@@ -1062,7 +1062,6 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
        if (!cpus)
                return;
 
-       ns->load = (ns->load * SCHED_CAPACITY_SCALE) / ns->compute_capacity;
        ns->task_capacity =
                DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE);
        ns->has_free_capacity = (ns->nr_running < ns->task_capacity);
@@ -1096,18 +1095,30 @@ static void task_numa_assign(struct task_numa_env *env,
        env->best_cpu = env->dst_cpu;
 }
 
-static bool load_too_imbalanced(long orig_src_load, long orig_dst_load,
-                               long src_load, long dst_load,
+static bool load_too_imbalanced(long src_load, long dst_load,
                                struct task_numa_env *env)
 {
        long imb, old_imb;
+       long orig_src_load, orig_dst_load;
+       long src_capacity, dst_capacity;
+
+       /*
+        * The load is corrected for the CPU capacity available on each node.
+        *
+        * src_load        dst_load
+        * ------------ vs ---------
+        * src_capacity    dst_capacity
+        */
+       src_capacity = env->src_stats.compute_capacity;
+       dst_capacity = env->dst_stats.compute_capacity;
 
        /* We care about the slope of the imbalance, not the direction. */
        if (dst_load < src_load)
                swap(dst_load, src_load);
 
        /* Is the difference below the threshold? */
-       imb = dst_load * 100 - src_load * env->imbalance_pct;
+       imb = dst_load * src_capacity * 100 -
+             src_load * dst_capacity * env->imbalance_pct;
        if (imb <= 0)
                return false;
 
@@ -1115,10 +1126,14 @@ static bool load_too_imbalanced(long orig_src_load, long orig_dst_load,
         * The imbalance is above the allowed threshold.
         * Compare it with the old imbalance.
         */
+       orig_src_load = env->src_stats.load;
+       orig_dst_load = env->dst_stats.load;
+
        if (orig_dst_load < orig_src_load)
                swap(orig_dst_load, orig_src_load);
 
-       old_imb = orig_dst_load * 100 - orig_src_load * env->imbalance_pct;
+       old_imb = orig_dst_load * src_capacity * 100 -
+                 orig_src_load * dst_capacity * env->imbalance_pct;
 
        /* Would this change make things worse? */
        return (imb > old_imb);
@@ -1136,10 +1151,10 @@ static void task_numa_compare(struct task_numa_env *env,
        struct rq *src_rq = cpu_rq(env->src_cpu);
        struct rq *dst_rq = cpu_rq(env->dst_cpu);
        struct task_struct *cur;
-       long orig_src_load, src_load;
-       long orig_dst_load, dst_load;
+       long src_load, dst_load;
        long load;
-       long imp = (groupimp > 0) ? groupimp : taskimp;
+       long imp = env->p->numa_group ? groupimp : taskimp;
+       long moveimp = imp;
 
        rcu_read_lock();
        cur = ACCESS_ONCE(dst_rq->curr);
@@ -1177,11 +1192,6 @@ static void task_numa_compare(struct task_numa_env *env,
                         * itself (not part of a group), use the task weight
                         * instead.
                         */
-                       if (env->p->numa_group)
-                               imp = groupimp;
-                       else
-                               imp = taskimp;
-
                        if (cur->numa_group)
                                imp += group_weight(cur, env->src_nid) -
                                       group_weight(cur, env->dst_nid);
@@ -1191,7 +1201,7 @@ static void task_numa_compare(struct task_numa_env *env,
                }
        }
 
-       if (imp < env->best_imp)
+       if (imp <= env->best_imp && moveimp <= env->best_imp)
                goto unlock;
 
        if (!cur) {
@@ -1204,20 +1214,34 @@ static void task_numa_compare(struct task_numa_env *env,
        }
 
        /* Balance doesn't matter much if we're running a task per cpu */
-       if (src_rq->nr_running == 1 && dst_rq->nr_running == 1)
+       if (imp > env->best_imp && src_rq->nr_running == 1 &&
+                       dst_rq->nr_running == 1)
                goto assign;
 
        /*
         * In the overloaded case, try and keep the load balanced.
         */
 balance:
-       orig_dst_load = env->dst_stats.load;
-       orig_src_load = env->src_stats.load;
-
-       /* XXX missing capacity terms */
        load = task_h_load(env->p);
-       dst_load = orig_dst_load + load;
-       src_load = orig_src_load - load;
+       dst_load = env->dst_stats.load + load;
+       src_load = env->src_stats.load - load;
+
+       if (moveimp > imp && moveimp > env->best_imp) {
+               /*
+                * If the improvement from just moving env->p direction is
+                * better than swapping tasks around, check if a move is
+                * possible. Store a slightly smaller score than moveimp,
+                * so an actually idle CPU will win.
+                */
+               if (!load_too_imbalanced(src_load, dst_load, env)) {
+                       imp = moveimp - 1;
+                       cur = NULL;
+                       goto assign;
+               }
+       }
+
+       if (imp <= env->best_imp)
+               goto unlock;
 
        if (cur) {
                load = task_h_load(cur);
@@ -1225,8 +1249,7 @@ balance:
                src_load += load;
        }
 
-       if (load_too_imbalanced(orig_src_load, orig_dst_load,
-                               src_load, dst_load, env))
+       if (load_too_imbalanced(src_load, dst_load, env))
                goto unlock;
 
 assign:
@@ -1302,9 +1325,8 @@ static int task_numa_migrate(struct task_struct *p)
        groupimp = group_weight(p, env.dst_nid) - groupweight;
        update_numa_stats(&env.dst_stats, env.dst_nid);
 
-       /* If the preferred nid has free capacity, try to use it. */
-       if (env.dst_stats.has_free_capacity)
-               task_numa_find_cpu(&env, taskimp, groupimp);
+       /* Try to find a spot on the preferred nid. */
+       task_numa_find_cpu(&env, taskimp, groupimp);
 
        /* No space available on the preferred nid. Look elsewhere. */
        if (env.best_cpu == -1) {
@@ -1324,10 +1346,6 @@ static int task_numa_migrate(struct task_struct *p)
                }
        }
 
-       /* No better CPU than the current one was found. */
-       if (env.best_cpu == -1)
-               return -EAGAIN;
-
        /*
         * If the task is part of a workload that spans multiple NUMA nodes,
         * and is migrating into one of the workload's active nodes, remember
@@ -1336,8 +1354,19 @@ static int task_numa_migrate(struct task_struct *p)
         * A task that migrated to a second choice node will be better off
         * trying for a better one later. Do not set the preferred node here.
         */
-       if (p->numa_group && node_isset(env.dst_nid, p->numa_group->active_nodes))
-               sched_setnuma(p, env.dst_nid);
+       if (p->numa_group) {
+               if (env.best_cpu == -1)
+                       nid = env.src_nid;
+               else
+                       nid = env.dst_nid;
+
+               if (node_isset(nid, p->numa_group->active_nodes))
+                       sched_setnuma(p, env.dst_nid);
+       }
+
+       /* No better CPU than the current one was found. */
+       if (env.best_cpu == -1)
+               return -EAGAIN;
 
        /*
         * Reset the scan period if the task is being rescheduled on an
@@ -1415,12 +1444,12 @@ static void update_numa_active_node_mask(struct numa_group *numa_group)
 /*
  * When adapting the scan rate, the period is divided into NUMA_PERIOD_SLOTS
  * increments. The more local the fault statistics are, the higher the scan
- * period will be for the next scan window. If local/remote ratio is below
- * NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS) the
- * scan period will decrease
+ * period will be for the next scan window. If local/(local+remote) ratio is
+ * below NUMA_PERIOD_THRESHOLD (where range of ratio is 1..NUMA_PERIOD_SLOTS)
+ * the scan period will decrease. Aim for 70% local accesses.
  */
 #define NUMA_PERIOD_SLOTS 10
-#define NUMA_PERIOD_THRESHOLD 3
+#define NUMA_PERIOD_THRESHOLD 7
 
 /*
  * Increase the scan period (slow down scanning) if the majority of
@@ -1595,30 +1624,17 @@ static void task_numa_placement(struct task_struct *p)
 
        if (p->numa_group) {
                update_numa_active_node_mask(p->numa_group);
-               /*
-                * If the preferred task and group nids are different,
-                * iterate over the nodes again to find the best place.
-                */
-               if (max_nid != max_group_nid) {
-                       unsigned long weight, max_weight = 0;
-
-                       for_each_online_node(nid) {
-                               weight = task_weight(p, nid) + group_weight(p, nid);
-                               if (weight > max_weight) {
-                                       max_weight = weight;
-                                       max_nid = nid;
-                               }
-                       }
-               }
-
                spin_unlock_irq(group_lock);
+               max_nid = max_group_nid;
        }
 
-       /* Preferred node as the node with the most faults */
-       if (max_faults && max_nid != p->numa_preferred_nid) {
-               /* Update the preferred nid and migrate task if possible */
-               sched_setnuma(p, max_nid);
-               numa_migrate_preferred(p);
+       if (max_faults) {
+               /* Set the new preferred node */
+               if (max_nid != p->numa_preferred_nid)
+                       sched_setnuma(p, max_nid);
+
+               if (task_node(p) != p->numa_preferred_nid)
+                       numa_migrate_preferred(p);
        }
 }
 
@@ -2899,7 +2915,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
        ideal_runtime = sched_slice(cfs_rq, curr);
        delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
        if (delta_exec > ideal_runtime) {
-               resched_task(rq_of(cfs_rq)->curr);
+               resched_curr(rq_of(cfs_rq));
                /*
                 * The current task ran long enough, ensure it doesn't get
                 * re-elected due to buddy favours.
@@ -2923,7 +2939,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
                return;
 
        if (delta > ideal_runtime)
-               resched_task(rq_of(cfs_rq)->curr);
+               resched_curr(rq_of(cfs_rq));
 }
 
 static void
@@ -3063,7 +3079,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
         * validating it and just reschedule.
         */
        if (queued) {
-               resched_task(rq_of(cfs_rq)->curr);
+               resched_curr(rq_of(cfs_rq));
                return;
        }
        /*
@@ -3254,7 +3270,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
         * hierarchy can be throttled
         */
        if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
-               resched_task(rq_of(cfs_rq)->curr);
+               resched_curr(rq_of(cfs_rq));
 }
 
 static __always_inline
@@ -3360,7 +3376,11 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
        cfs_rq->throttled = 1;
        cfs_rq->throttled_clock = rq_clock(rq);
        raw_spin_lock(&cfs_b->lock);
-       list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
+       /*
+        * Add to the _head_ of the list, so that an already-started
+        * distribute_cfs_runtime will not see us
+        */
+       list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
        if (!cfs_b->timer_active)
                __start_cfs_bandwidth(cfs_b, false);
        raw_spin_unlock(&cfs_b->lock);
@@ -3410,14 +3430,15 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
 
        /* determine whether we need to wake up potentially idle cpu */
        if (rq->curr == rq->idle && rq->cfs.nr_running)
-               resched_task(rq->curr);
+               resched_curr(rq);
 }
 
 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
                u64 remaining, u64 expires)
 {
        struct cfs_rq *cfs_rq;
-       u64 runtime = remaining;
+       u64 runtime;
+       u64 starting_runtime = remaining;
 
        rcu_read_lock();
        list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq,
@@ -3448,7 +3469,7 @@ next:
        }
        rcu_read_unlock();
 
-       return remaining;
+       return starting_runtime - remaining;
 }
 
 /*
@@ -3494,22 +3515,17 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
        /* account preceding periods in which throttling occurred */
        cfs_b->nr_throttled += overrun;
 
-       /*
-        * There are throttled entities so we must first use the new bandwidth
-        * to unthrottle them before making it generally available.  This
-        * ensures that all existing debts will be paid before a new cfs_rq is
-        * allowed to run.
-        */
-       runtime = cfs_b->runtime;
        runtime_expires = cfs_b->runtime_expires;
-       cfs_b->runtime = 0;
 
        /*
-        * This check is repeated as we are holding onto the new bandwidth
-        * while we unthrottle.  This can potentially race with an unthrottled
-        * group trying to acquire new bandwidth from the global pool.
+        * This check is repeated as we are holding onto the new bandwidth while
+        * we unthrottle. This can potentially race with an unthrottled group
+        * trying to acquire new bandwidth from the global pool. This can result
+        * in us over-using our runtime if it is all used during this loop, but
+        * only by limited amounts in that extreme case.
         */
-       while (throttled && runtime > 0) {
+       while (throttled && cfs_b->runtime > 0) {
+               runtime = cfs_b->runtime;
                raw_spin_unlock(&cfs_b->lock);
                /* we can't nest cfs_b->lock while distributing bandwidth */
                runtime = distribute_cfs_runtime(cfs_b, runtime,
@@ -3517,10 +3533,10 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
                raw_spin_lock(&cfs_b->lock);
 
                throttled = !list_empty(&cfs_b->throttled_cfs_rq);
+
+               cfs_b->runtime -= min(runtime, cfs_b->runtime);
        }
 
-       /* return (any) remaining runtime */
-       cfs_b->runtime = runtime;
        /*
         * While we are ensured activity in the period following an
         * unthrottle, this also covers the case in which the new bandwidth is
@@ -3631,10 +3647,9 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
                return;
        }
 
-       if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) {
+       if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice)
                runtime = cfs_b->runtime;
-               cfs_b->runtime = 0;
-       }
+
        expires = cfs_b->runtime_expires;
        raw_spin_unlock(&cfs_b->lock);
 
@@ -3645,7 +3660,7 @@ static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b)
 
        raw_spin_lock(&cfs_b->lock);
        if (expires == cfs_b->runtime_expires)
-               cfs_b->runtime = runtime;
+               cfs_b->runtime -= min(runtime, cfs_b->runtime);
        raw_spin_unlock(&cfs_b->lock);
 }
 
@@ -3775,6 +3790,19 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
        hrtimer_cancel(&cfs_b->slack_timer);
 }
 
+static void __maybe_unused update_runtime_enabled(struct rq *rq)
+{
+       struct cfs_rq *cfs_rq;
+
+       for_each_leaf_cfs_rq(rq, cfs_rq) {
+               struct cfs_bandwidth *cfs_b = &cfs_rq->tg->cfs_bandwidth;
+
+               raw_spin_lock(&cfs_b->lock);
+               cfs_rq->runtime_enabled = cfs_b->quota != RUNTIME_INF;
+               raw_spin_unlock(&cfs_b->lock);
+       }
+}
+
 static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
 {
        struct cfs_rq *cfs_rq;
@@ -3788,6 +3816,12 @@ static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
                 * there's some valid quota amount
                 */
                cfs_rq->runtime_remaining = 1;
+               /*
+                * Offline rq is schedulable till cpu is completely disabled
+                * in take_cpu_down(), so we prevent new cfs throttling here.
+                */
+               cfs_rq->runtime_enabled = 0;
+
                if (cfs_rq_throttled(cfs_rq))
                        unthrottle_cfs_rq(cfs_rq);
        }
@@ -3831,6 +3865,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
        return NULL;
 }
 static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
+static inline void update_runtime_enabled(struct rq *rq) {}
 static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
 
 #endif /* CONFIG_CFS_BANDWIDTH */
@@ -3854,7 +3889,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
 
                if (delta < 0) {
                        if (rq->curr == p)
-                               resched_task(p);
+                               resched_curr(rq);
                        return;
                }
 
@@ -4723,7 +4758,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
        return;
 
 preempt:
-       resched_task(curr);
+       resched_curr(rq);
        /*
         * Only set the backward buddy when the current task is still
         * on the rq. This can happen when a wakeup gets interleaved
@@ -5094,8 +5129,7 @@ static void move_task(struct task_struct *p, struct lb_env *env)
 /*
  * Is this task likely cache-hot:
  */
-static int
-task_hot(struct task_struct *p, u64 now)
+static int task_hot(struct task_struct *p, struct lb_env *env)
 {
        s64 delta;
 
@@ -5108,7 +5142,7 @@ task_hot(struct task_struct *p, u64 now)
        /*
         * Buddy candidates are cache hot:
         */
-       if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
+       if (sched_feat(CACHE_HOT_BUDDY) && env->dst_rq->nr_running &&
                        (&p->se == cfs_rq_of(&p->se)->next ||
                         &p->se == cfs_rq_of(&p->se)->last))
                return 1;
@@ -5118,7 +5152,7 @@ task_hot(struct task_struct *p, u64 now)
        if (sysctl_sched_migration_cost == 0)
                return 0;
 
-       delta = now - p->se.exec_start;
+       delta = rq_clock_task(env->src_rq) - p->se.exec_start;
 
        return delta < (s64)sysctl_sched_migration_cost;
 }
@@ -5272,7 +5306,7 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
         * 2) task is cache cold, or
         * 3) too many balance attempts have failed.
         */
-       tsk_cache_hot = task_hot(p, rq_clock_task(env->src_rq));
+       tsk_cache_hot = task_hot(p, env);
        if (!tsk_cache_hot)
                tsk_cache_hot = migrate_degrades_locality(p, env);
 
@@ -5864,10 +5898,12 @@ static inline int sg_capacity_factor(struct lb_env *env, struct sched_group *gro
  * @load_idx: Load index of sched_domain of this_cpu for load calc.
  * @local_group: Does group contain this_cpu.
  * @sgs: variable to hold the statistics for this group.
+ * @overload: Indicate more than one runnable task for any CPU.
  */
 static inline void update_sg_lb_stats(struct lb_env *env,
                        struct sched_group *group, int load_idx,
-                       int local_group, struct sg_lb_stats *sgs)
+                       int local_group, struct sg_lb_stats *sgs,
+                       bool *overload)
 {
        unsigned long load;
        int i;
@@ -5885,6 +5921,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
 
                sgs->group_load += load;
                sgs->sum_nr_running += rq->nr_running;
+
+               if (rq->nr_running > 1)
+                       *overload = true;
+
 #ifdef CONFIG_NUMA_BALANCING
                sgs->nr_numa_running += rq->nr_numa_running;
                sgs->nr_preferred_running += rq->nr_preferred_running;
@@ -5995,6 +6035,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
        struct sched_group *sg = env->sd->groups;
        struct sg_lb_stats tmp_sgs;
        int load_idx, prefer_sibling = 0;
+       bool overload = false;
 
        if (child && child->flags & SD_PREFER_SIBLING)
                prefer_sibling = 1;
@@ -6015,7 +6056,8 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
                                update_group_capacity(env->sd, env->dst_cpu);
                }
 
-               update_sg_lb_stats(env, sg, load_idx, local_group, sgs);
+               update_sg_lb_stats(env, sg, load_idx, local_group, sgs,
+                                               &overload);
 
                if (local_group)
                        goto next_group;
@@ -6049,6 +6091,13 @@ next_group:
 
        if (env->sd->flags & SD_NUMA)
                env->fbq_type = fbq_classify_group(&sds->busiest_stat);
+
+       if (!env->sd->parent) {
+               /* update overload indicator if we are at root domain */
+               if (env->dst_rq->rd->overload != overload)
+                       env->dst_rq->rd->overload = overload;
+       }
+
 }
 
 /**
@@ -6767,7 +6816,8 @@ static int idle_balance(struct rq *this_rq)
         */
        this_rq->idle_stamp = rq_clock(this_rq);
 
-       if (this_rq->avg_idle < sysctl_sched_migration_cost) {
+       if (this_rq->avg_idle < sysctl_sched_migration_cost ||
+           !this_rq->rd->overload) {
                rcu_read_lock();
                sd = rcu_dereference_check_sched_domain(this_rq->sd);
                if (sd)
@@ -7325,6 +7375,8 @@ void trigger_load_balance(struct rq *rq)
 static void rq_online_fair(struct rq *rq)
 {
        update_sysctl();
+
+       update_runtime_enabled(rq);
 }
 
 static void rq_offline_fair(struct rq *rq)
@@ -7398,7 +7450,7 @@ static void task_fork_fair(struct task_struct *p)
                 * 'current' within the tree based on its new key value.
                 */
                swap(curr->vruntime, se->vruntime);
-               resched_task(rq->curr);
+               resched_curr(rq);
        }
 
        se->vruntime -= cfs_rq->min_vruntime;
@@ -7423,7 +7475,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
         */
        if (rq->curr == p) {
                if (p->prio > oldprio)
-                       resched_task(rq->curr);
+                       resched_curr(rq);
        } else
                check_preempt_curr(rq, p, 0);
 }
@@ -7486,7 +7538,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
         * if we can still preempt the current task.
         */
        if (rq->curr == p)
-               resched_task(rq->curr);
+               resched_curr(rq);
        else
                check_preempt_curr(rq, p, 0);
 }
index cf009fb..9f1608f 100644 (file)
@@ -79,7 +79,7 @@ static void cpuidle_idle_call(void)
        struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
        struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
        int next_state, entered_state;
-       bool broadcast;
+       unsigned int broadcast;
 
        /*
         * Check if the idle task must be rescheduled. If it is the
@@ -135,7 +135,7 @@ use_default:
                goto exit_idle;
        }
 
-       broadcast = !!(drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP);
+       broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP;
 
        /*
         * Tell the time framework to switch to a broadcast timer
index 879f2b7..67ad4e7 100644 (file)
@@ -20,7 +20,7 @@ select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
  */
 static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
 {
-       resched_task(rq->idle);
+       resched_curr(rq);
 }
 
 static struct task_struct *
index a490831..5f6edca 100644 (file)
@@ -463,9 +463,10 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
 {
        struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
+       struct rq *rq = rq_of_rt_rq(rt_rq);
        struct sched_rt_entity *rt_se;
 
-       int cpu = cpu_of(rq_of_rt_rq(rt_rq));
+       int cpu = cpu_of(rq);
 
        rt_se = rt_rq->tg->rt_se[cpu];
 
@@ -476,7 +477,7 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
                        enqueue_rt_entity(rt_se, false);
 
                if (rt_rq->highest_prio.curr < curr->prio)
-                       resched_task(curr);
+                       resched_curr(rq);
        }
 }
 
@@ -566,7 +567,7 @@ static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
                return;
 
        enqueue_top_rt_rq(rt_rq);
-       resched_task(rq->curr);
+       resched_curr(rq);
 }
 
 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
@@ -740,6 +741,9 @@ balanced:
                rt_rq->rt_throttled = 0;
                raw_spin_unlock(&rt_rq->rt_runtime_lock);
                raw_spin_unlock(&rt_b->rt_runtime_lock);
+
+               /* Make rt_rq available for pick_next_task() */
+               sched_rt_rq_enqueue(rt_rq);
        }
 }
 
@@ -948,7 +952,7 @@ static void update_curr_rt(struct rq *rq)
                        raw_spin_lock(&rt_rq->rt_runtime_lock);
                        rt_rq->rt_time += delta_exec;
                        if (sched_rt_runtime_exceeded(rt_rq))
-                               resched_task(curr);
+                               resched_curr(rq);
                        raw_spin_unlock(&rt_rq->rt_runtime_lock);
                }
        }
@@ -1363,7 +1367,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
         * to try and push current away:
         */
        requeue_task_rt(rq, p, 1);
-       resched_task(rq->curr);
+       resched_curr(rq);
 }
 
 #endif /* CONFIG_SMP */
@@ -1374,7 +1378,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
 static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
 {
        if (p->prio < rq->curr->prio) {
-               resched_task(rq->curr);
+               resched_curr(rq);
                return;
        }
 
@@ -1690,7 +1694,7 @@ retry:
         * just reschedule current.
         */
        if (unlikely(next_task->prio < rq->curr->prio)) {
-               resched_task(rq->curr);
+               resched_curr(rq);
                return 0;
        }
 
@@ -1737,7 +1741,7 @@ retry:
        activate_task(lowest_rq, next_task, 0);
        ret = 1;
 
-       resched_task(lowest_rq->curr);
+       resched_curr(lowest_rq);
 
        double_unlock_balance(rq, lowest_rq);
 
@@ -1936,7 +1940,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
                return;
 
        if (pull_rt_task(rq))
-               resched_task(rq->curr);
+               resched_curr(rq);
 }
 
 void __init init_sched_rt_class(void)
@@ -1974,7 +1978,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
                        check_resched = 0;
 #endif /* CONFIG_SMP */
                if (check_resched && p->prio < rq->curr->prio)
-                       resched_task(rq->curr);
+                       resched_curr(rq);
        }
 }
 
@@ -2003,11 +2007,11 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
                 * Only reschedule if p is still on the same runqueue.
                 */
                if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
-                       resched_task(p);
+                       resched_curr(rq);
 #else
                /* For UP simply resched on drop of prio */
                if (oldprio < p->prio)
-                       resched_task(p);
+                       resched_curr(rq);
 #endif /* CONFIG_SMP */
        } else {
                /*
@@ -2016,7 +2020,7 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
                 * then reschedule.
                 */
                if (p->prio < rq->curr->prio)
-                       resched_task(rq->curr);
+                       resched_curr(rq);
        }
 }
 
index 31cc02e..579712f 100644 (file)
@@ -477,6 +477,9 @@ struct root_domain {
        cpumask_var_t span;
        cpumask_var_t online;
 
+       /* Indicate more than one runnable task for any CPU */
+       bool overload;
+
        /*
         * The bit corresponding to a CPU gets set here if such CPU has more
         * than one runnable -deadline task (as it is below for RT tasks).
@@ -884,20 +887,10 @@ enum {
 #undef SCHED_FEAT
 
 #if defined(CONFIG_SCHED_DEBUG) && defined(HAVE_JUMP_LABEL)
-static __always_inline bool static_branch__true(struct static_key *key)
-{
-       return static_key_true(key); /* Not out of line branch. */
-}
-
-static __always_inline bool static_branch__false(struct static_key *key)
-{
-       return static_key_false(key); /* Out of line branch. */
-}
-
 #define SCHED_FEAT(name, enabled)                                      \
 static __always_inline bool static_branch_##name(struct static_key *key) \
 {                                                                      \
-       return static_branch__##enabled(key);                           \
+       return static_key_##enabled(key);                               \
 }
 
 #include "features.h"
@@ -1196,7 +1189,7 @@ extern void init_sched_rt_class(void);
 extern void init_sched_fair_class(void);
 extern void init_sched_dl_class(void);
 
-extern void resched_task(struct task_struct *p);
+extern void resched_curr(struct rq *rq);
 extern void resched_cpu(int cpu);
 
 extern struct rt_bandwidth def_rt_bandwidth;
@@ -1218,15 +1211,26 @@ static inline void add_nr_running(struct rq *rq, unsigned count)
 
        rq->nr_running = prev_nr + count;
 
-#ifdef CONFIG_NO_HZ_FULL
        if (prev_nr < 2 && rq->nr_running >= 2) {
+#ifdef CONFIG_SMP
+               if (!rq->rd->overload)
+                       rq->rd->overload = true;
+#endif
+
+#ifdef CONFIG_NO_HZ_FULL
                if (tick_nohz_full_cpu(rq->cpu)) {
-                       /* Order rq->nr_running write against the IPI */
-                       smp_wmb();
-                       smp_send_reschedule(rq->cpu);
+                       /*
+                        * Tick is needed if more than one task runs on a CPU.
+                        * Send the target an IPI to kick it out of nohz mode.
+                        *
+                        * We assume that IPI implies full memory barrier and the
+                        * new value of rq->nr_running is visible on reception
+                        * from the target.
+                        */
+                       tick_nohz_full_kick_cpu(rq->cpu);
                }
-       }
 #endif
+       }
 }
 
 static inline void sub_nr_running(struct rq *rq, unsigned count)
index 0ffa20a..15cab1a 100644 (file)
@@ -319,14 +319,14 @@ EXPORT_SYMBOL(wake_bit_function);
  */
 int __sched
 __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
-                       int (*action)(void *), unsigned mode)
+             wait_bit_action_f *action, unsigned mode)
 {
        int ret = 0;
 
        do {
                prepare_to_wait(wq, &q->wait, mode);
                if (test_bit(q->key.bit_nr, q->key.flags))
-                       ret = (*action)(q->key.flags);
+                       ret = (*action)(&q->key);
        } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
        finish_wait(wq, &q->wait);
        return ret;
@@ -334,7 +334,7 @@ __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
 EXPORT_SYMBOL(__wait_on_bit);
 
 int __sched out_of_line_wait_on_bit(void *word, int bit,
-                                       int (*action)(void *), unsigned mode)
+                                   wait_bit_action_f *action, unsigned mode)
 {
        wait_queue_head_t *wq = bit_waitqueue(word, bit);
        DEFINE_WAIT_BIT(wait, word, bit);
@@ -345,7 +345,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_bit);
 
 int __sched
 __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
-                       int (*action)(void *), unsigned mode)
+                       wait_bit_action_f *action, unsigned mode)
 {
        do {
                int ret;
@@ -353,7 +353,7 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
                prepare_to_wait_exclusive(wq, &q->wait, mode);
                if (!test_bit(q->key.bit_nr, q->key.flags))
                        continue;
-               ret = action(q->key.flags);
+               ret = action(&q->key);
                if (!ret)
                        continue;
                abort_exclusive_wait(wq, &q->wait, mode, &q->key);
@@ -365,7 +365,7 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
 EXPORT_SYMBOL(__wait_on_bit_lock);
 
 int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
-                                       int (*action)(void *), unsigned mode)
+                                        wait_bit_action_f *action, unsigned mode)
 {
        wait_queue_head_t *wq = bit_waitqueue(word, bit);
        DEFINE_WAIT_BIT(wait, word, bit);
@@ -502,3 +502,21 @@ void wake_up_atomic_t(atomic_t *p)
        __wake_up_bit(atomic_t_waitqueue(p), p, WAIT_ATOMIC_T_BIT_NR);
 }
 EXPORT_SYMBOL(wake_up_atomic_t);
+
+__sched int bit_wait(struct wait_bit_key *word)
+{
+       if (signal_pending_state(current->state, current))
+               return 1;
+       schedule();
+       return 0;
+}
+EXPORT_SYMBOL(bit_wait);
+
+__sched int bit_wait_io(struct wait_bit_key *word)
+{
+       if (signal_pending_state(current->state, current))
+               return 1;
+       io_schedule();
+       return 0;
+}
+EXPORT_SYMBOL(bit_wait_io);
index a4077e9..40b76e3 100644 (file)
@@ -1263,6 +1263,10 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
        struct sighand_struct *sighand;
 
        for (;;) {
+               /*
+                * Disable interrupts early to avoid deadlocks.
+                * See rcu_read_unlock() comment header for details.
+                */
                local_irq_save(*flags);
                rcu_read_lock();
                sighand = rcu_dereference(tsk->sighand);
index 306f818..487653b 100644 (file)
@@ -3,6 +3,7 @@
  *
  * (C) Jens Axboe <jens.axboe@oracle.com> 2008
  */
+#include <linux/irq_work.h>
 #include <linux/rcupdate.h>
 #include <linux/rculist.h>
 #include <linux/kernel.h>
@@ -29,6 +30,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 
+static void flush_smp_call_function_queue(bool warn_cpu_offline);
+
 static int
 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
@@ -51,12 +54,27 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
        case CPU_UP_CANCELED_FROZEN:
+               /* Fall-through to the CPU_DEAD[_FROZEN] case. */
 
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
                free_cpumask_var(cfd->cpumask);
                free_percpu(cfd->csd);
                break;
+
+       case CPU_DYING:
+       case CPU_DYING_FROZEN:
+               /*
+                * The IPIs for the smp-call-function callbacks queued by other
+                * CPUs might arrive late, either due to hardware latencies or
+                * because this CPU disabled interrupts (inside stop-machine)
+                * before the IPIs were sent. So flush out any pending callbacks
+                * explicitly (without waiting for the IPIs to arrive), to
+                * ensure that the outgoing CPU doesn't go offline with work
+                * still pending.
+                */
+               flush_smp_call_function_queue(false);
+               break;
 #endif
        };
 
@@ -177,23 +195,47 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
        return 0;
 }
 
-/*
- * Invoked by arch to handle an IPI for call function single. Must be
- * called from the arch with interrupts disabled.
+/**
+ * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
+ *
+ * Invoked by arch to handle an IPI for call function single.
+ * Must be called with interrupts disabled.
  */
 void generic_smp_call_function_single_interrupt(void)
 {
+       flush_smp_call_function_queue(true);
+}
+
+/**
+ * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
+ *
+ * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
+ *                   offline CPU. Skip this check if set to 'false'.
+ *
+ * Flush any pending smp-call-function callbacks queued on this CPU. This is
+ * invoked by the generic IPI handler, as well as by a CPU about to go offline,
+ * to ensure that all pending IPI callbacks are run before it goes completely
+ * offline.
+ *
+ * Loop through the call_single_queue and run all the queued callbacks.
+ * Must be called with interrupts disabled.
+ */
+static void flush_smp_call_function_queue(bool warn_cpu_offline)
+{
+       struct llist_head *head;
        struct llist_node *entry;
        struct call_single_data *csd, *csd_next;
        static bool warned;
 
-       entry = llist_del_all(&__get_cpu_var(call_single_queue));
+       WARN_ON(!irqs_disabled());
+
+       head = &__get_cpu_var(call_single_queue);
+       entry = llist_del_all(head);
        entry = llist_reverse_order(entry);
 
-       /*
-        * Shouldn't receive this interrupt on a cpu that is not yet online.
-        */
-       if (unlikely(!cpu_online(smp_processor_id()) && !warned)) {
+       /* There shouldn't be any pending callbacks on an offline CPU. */
+       if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
+                    !warned && !llist_empty(head))) {
                warned = true;
                WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
 
@@ -210,6 +252,14 @@ void generic_smp_call_function_single_interrupt(void)
                csd->func(csd->info);
                csd_unlock(csd);
        }
+
+       /*
+        * Handle irq works queued remotely by irq_work_queue_on().
+        * Smp functions above are typically synchronous so they
+        * better run first since some other CPUs may be busy waiting
+        * for them.
+        */
+       irq_work_run();
 }
 
 /*
index ba9ed45..75b22e2 100644 (file)
@@ -136,7 +136,6 @@ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
 /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
 static int maxolduid = 65535;
 static int minolduid;
-static int min_percpu_pagelist_fract = 8;
 
 static int ngroups_max = NGROUPS_MAX;
 static const int cap_last_cap = CAP_LAST_CAP;
@@ -152,10 +151,6 @@ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
 #ifdef CONFIG_SPARC
 #endif
 
-#ifdef CONFIG_SPARC64
-extern int sysctl_tsb_ratio;
-#endif
-
 #ifdef __hppa__
 extern int pwrsw_enabled;
 #endif
@@ -865,6 +860,17 @@ static struct ctl_table kern_table[] = {
                .extra1         = &zero,
                .extra2         = &one,
        },
+#ifdef CONFIG_SMP
+       {
+               .procname       = "softlockup_all_cpu_backtrace",
+               .data           = &sysctl_softlockup_all_cpu_backtrace,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
+#endif /* CONFIG_SMP */
        {
                .procname       = "nmi_watchdog",
                .data           = &watchdog_user_enabled,
@@ -1321,7 +1327,7 @@ static struct ctl_table vm_table[] = {
                .maxlen         = sizeof(percpu_pagelist_fraction),
                .mode           = 0644,
                .proc_handler   = percpu_pagelist_fraction_sysctl_handler,
-               .extra1         = &min_percpu_pagelist_fract,
+               .extra1         = &zero,
        },
 #ifdef CONFIG_MMU
        {
index 88c9c65..fe75444 100644 (file)
@@ -585,9 +585,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
                                struct itimerspec *new_setting,
                                struct itimerspec *old_setting)
 {
+       ktime_t exp;
+
        if (!rtcdev)
                return -ENOTSUPP;
 
+       if (flags & ~TIMER_ABSTIME)
+               return -EINVAL;
+
        if (old_setting)
                alarm_timer_get(timr, old_setting);
 
@@ -597,8 +602,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
 
        /* start the timer */
        timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
-       alarm_start(&timr->it.alarm.alarmtimer,
-                       timespec_to_ktime(new_setting->it_value));
+       exp = timespec_to_ktime(new_setting->it_value);
+       /* Convert (if necessary) to absolute time */
+       if (flags != TIMER_ABSTIME) {
+               ktime_t now;
+
+               now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
+               exp = ktime_add(now, exp);
+       }
+
+       alarm_start(&timr->it.alarm.alarmtimer, exp);
        return 0;
 }
 
@@ -730,6 +743,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
        if (!alarmtimer_get_rtcdev())
                return -ENOTSUPP;
 
+       if (flags & ~TIMER_ABSTIME)
+               return -EINVAL;
+
        if (!capable(CAP_WAKE_ALARM))
                return -EPERM;
 
index ad362c2..9c94c19 100644 (file)
@@ -146,7 +146,8 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
 {
        /* Nothing to do if we already reached the limit */
        if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
-               printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
+               printk_deferred(KERN_WARNING
+                               "CE: Reprogramming failure. Giving up\n");
                dev->next_event.tv64 = KTIME_MAX;
                return -ETIME;
        }
@@ -159,9 +160,10 @@ static int clockevents_increase_min_delta(struct clock_event_device *dev)
        if (dev->min_delta_ns > MIN_DELTA_LIMIT)
                dev->min_delta_ns = MIN_DELTA_LIMIT;
 
-       printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
-              dev->name ? dev->name : "?",
-              (unsigned long long) dev->min_delta_ns);
+       printk_deferred(KERN_WARNING
+                       "CE: %s increased min_delta_ns to %llu nsec\n",
+                       dev->name ? dev->name : "?",
+                       (unsigned long long) dev->min_delta_ns);
        return 0;
 }
 
index 445106d..01d2d15 100644 (file)
@@ -191,7 +191,8 @@ void __init sched_clock_postinit(void)
 
 static int sched_clock_suspend(void)
 {
-       sched_clock_poll(&sched_clock_timer);
+       update_sched_clock();
+       hrtimer_cancel(&sched_clock_timer);
        cd.suspended = true;
        return 0;
 }
@@ -199,6 +200,7 @@ static int sched_clock_suspend(void)
 static void sched_clock_resume(void)
 {
        cd.epoch_cyc = read_sched_clock();
+       hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
        cd.suspended = false;
 }
 
index 6558b7a..99aa6ee 100644 (file)
@@ -154,6 +154,7 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
 
 #ifdef CONFIG_NO_HZ_FULL
 cpumask_var_t tick_nohz_full_mask;
+cpumask_var_t housekeeping_mask;
 bool tick_nohz_full_running;
 
 static bool can_stop_full_tick(void)
@@ -224,13 +225,15 @@ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
 };
 
 /*
- * Kick the current CPU if it's full dynticks in order to force it to
+ * Kick the CPU if it's full dynticks in order to force it to
  * re-evaluate its dependency on the tick and restart it if necessary.
  */
-void tick_nohz_full_kick(void)
+void tick_nohz_full_kick_cpu(int cpu)
 {
-       if (tick_nohz_full_cpu(smp_processor_id()))
-               irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
+       if (!tick_nohz_full_cpu(cpu))
+               return;
+
+       irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
 }
 
 static void nohz_full_kick_ipi(void *info)
@@ -281,6 +284,7 @@ static int __init tick_nohz_full_setup(char *str)
        int cpu;
 
        alloc_bootmem_cpumask_var(&tick_nohz_full_mask);
+       alloc_bootmem_cpumask_var(&housekeeping_mask);
        if (cpulist_parse(str, tick_nohz_full_mask) < 0) {
                pr_warning("NOHZ: Incorrect nohz_full cpumask\n");
                return 1;
@@ -291,6 +295,8 @@ static int __init tick_nohz_full_setup(char *str)
                pr_warning("NO_HZ: Clearing %d from nohz_full range for timekeeping\n", cpu);
                cpumask_clear_cpu(cpu, tick_nohz_full_mask);
        }
+       cpumask_andnot(housekeeping_mask,
+                      cpu_possible_mask, tick_nohz_full_mask);
        tick_nohz_full_running = true;
 
        return 1;
@@ -332,9 +338,15 @@ static int tick_nohz_init_all(void)
                pr_err("NO_HZ: Can't allocate full dynticks cpumask\n");
                return err;
        }
+       if (!alloc_cpumask_var(&housekeeping_mask, GFP_KERNEL)) {
+               pr_err("NO_HZ: Can't allocate not-full dynticks cpumask\n");
+               return err;
+       }
        err = 0;
        cpumask_setall(tick_nohz_full_mask);
        cpumask_clear_cpu(smp_processor_id(), tick_nohz_full_mask);
+       cpumask_clear(housekeeping_mask);
+       cpumask_set_cpu(smp_processor_id(), housekeeping_mask);
        tick_nohz_full_running = true;
 #endif
        return err;
index 40bb511..d600af2 100644 (file)
@@ -708,7 +708,7 @@ int _torture_create_kthread(int (*fn)(void *arg), void *arg, char *s, char *m,
        int ret = 0;
 
        VERBOSE_TOROUT_STRING(m);
-       *tp = kthread_run(fn, arg, s);
+       *tp = kthread_run(fn, arg, "%s", s);
        if (IS_ERR(*tp)) {
                ret = PTR_ERR(*tp);
                VERBOSE_TOROUT_ERRSTRING(f);
index d440935..a5da09c 100644 (file)
@@ -29,11 +29,6 @@ config HAVE_FUNCTION_GRAPH_FP_TEST
        help
          See Documentation/trace/ftrace-design.txt
 
-config HAVE_FUNCTION_TRACE_MCOUNT_TEST
-       bool
-       help
-         See Documentation/trace/ftrace-design.txt
-
 config HAVE_DYNAMIC_FTRACE
        bool
        help
index 2611613..67d6369 100644 (file)
@@ -28,6 +28,7 @@ obj-$(CONFIG_RING_BUFFER_BENCHMARK) += ring_buffer_benchmark.o
 
 obj-$(CONFIG_TRACING) += trace.o
 obj-$(CONFIG_TRACING) += trace_output.o
+obj-$(CONFIG_TRACING) += trace_seq.o
 obj-$(CONFIG_TRACING) += trace_stat.o
 obj-$(CONFIG_TRACING) += trace_printk.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
index 5b372e3..1654b12 100644 (file)
@@ -80,9 +80,6 @@ static struct ftrace_ops ftrace_list_end __read_mostly = {
 int ftrace_enabled __read_mostly;
 static int last_ftrace_enabled;
 
-/* Quick disabling of function tracer. */
-int function_trace_stop __read_mostly;
-
 /* Current function tracing op */
 struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
 /* What to set function_trace_op to */
@@ -265,12 +262,12 @@ static void update_ftrace_function(void)
                func = ftrace_ops_list_func;
        }
 
+       update_function_graph_func();
+
        /* If there's no change, then do nothing more here */
        if (ftrace_trace_function == func)
                return;
 
-       update_function_graph_func();
-
        /*
         * If we are using the list function, it doesn't care
         * about the function_trace_ops.
@@ -1042,6 +1039,8 @@ static struct pid * const ftrace_swapper_pid = &init_struct_pid;
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 
+static struct ftrace_ops *removed_ops;
+
 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
 # error Dynamic ftrace depends on MCOUNT_RECORD
 #endif
@@ -1304,25 +1303,15 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        struct ftrace_hash *new_hash;
        int size = src->count;
        int bits = 0;
-       int ret;
        int i;
 
-       /*
-        * Remove the current set, update the hash and add
-        * them back.
-        */
-       ftrace_hash_rec_disable(ops, enable);
-
        /*
         * If the new source is empty, just free dst and assign it
         * the empty_hash.
         */
        if (!src->count) {
-               free_ftrace_hash_rcu(*dst);
-               rcu_assign_pointer(*dst, EMPTY_HASH);
-               /* still need to update the function records */
-               ret = 0;
-               goto out;
+               new_hash = EMPTY_HASH;
+               goto update;
        }
 
        /*
@@ -1335,10 +1324,9 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
        if (bits > FTRACE_HASH_MAX_BITS)
                bits = FTRACE_HASH_MAX_BITS;
 
-       ret = -ENOMEM;
        new_hash = alloc_ftrace_hash(bits);
        if (!new_hash)
-               goto out;
+               return -ENOMEM;
 
        size = 1 << src->size_bits;
        for (i = 0; i < size; i++) {
@@ -1349,20 +1337,20 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
                }
        }
 
+update:
+       /*
+        * Remove the current set, update the hash and add
+        * them back.
+        */
+       ftrace_hash_rec_disable(ops, enable);
+
        old_hash = *dst;
        rcu_assign_pointer(*dst, new_hash);
        free_ftrace_hash_rcu(old_hash);
 
-       ret = 0;
- out:
-       /*
-        * Enable regardless of ret:
-        *  On success, we enable the new hash.
-        *  On failure, we re-enable the original hash.
-        */
        ftrace_hash_rec_enable(ops, enable);
 
-       return ret;
+       return 0;
 }
 
 /*
@@ -1492,6 +1480,53 @@ int ftrace_text_reserved(const void *start, const void *end)
        return (int)!!ret;
 }
 
+/* Test if ops registered to this rec needs regs */
+static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
+{
+       struct ftrace_ops *ops;
+       bool keep_regs = false;
+
+       for (ops = ftrace_ops_list;
+            ops != &ftrace_list_end; ops = ops->next) {
+               /* pass rec in as regs to have non-NULL val */
+               if (ftrace_ops_test(ops, rec->ip, rec)) {
+                       if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
+                               keep_regs = true;
+                               break;
+                       }
+               }
+       }
+
+       return  keep_regs;
+}
+
+static void ftrace_remove_tramp(struct ftrace_ops *ops,
+                               struct dyn_ftrace *rec)
+{
+       struct ftrace_func_entry *entry;
+
+       entry = ftrace_lookup_ip(ops->tramp_hash, rec->ip);
+       if (!entry)
+               return;
+
+       /*
+        * The tramp_hash entry will be removed at time
+        * of update.
+        */
+       ops->nr_trampolines--;
+       rec->flags &= ~FTRACE_FL_TRAMP;
+}
+
+static void ftrace_clear_tramps(struct dyn_ftrace *rec)
+{
+       struct ftrace_ops *op;
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (op->nr_trampolines)
+                       ftrace_remove_tramp(op, rec);
+       } while_for_each_ftrace_op(op);
+}
+
 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
                                     int filter_hash,
                                     bool inc)
@@ -1572,8 +1607,30 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
 
                if (inc) {
                        rec->flags++;
-                       if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
+                       if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
                                return;
+
+                       /*
+                        * If there's only a single callback registered to a
+                        * function, and the ops has a trampoline registered
+                        * for it, then we can call it directly.
+                        */
+                       if (ftrace_rec_count(rec) == 1 && ops->trampoline) {
+                               rec->flags |= FTRACE_FL_TRAMP;
+                               ops->nr_trampolines++;
+                       } else {
+                               /*
+                                * If we are adding another function callback
+                                * to this function, and the previous had a
+                                * trampoline used, then we need to go back to
+                                * the default trampoline.
+                                */
+                               rec->flags &= ~FTRACE_FL_TRAMP;
+
+                               /* remove trampolines from any ops for this rec */
+                               ftrace_clear_tramps(rec);
+                       }
+
                        /*
                         * If any ops wants regs saved for this function
                         * then all ops will get saved regs.
@@ -1581,9 +1638,30 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
                        if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
                                rec->flags |= FTRACE_FL_REGS;
                } else {
-                       if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
+                       if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
                                return;
                        rec->flags--;
+
+                       if (ops->trampoline && !ftrace_rec_count(rec))
+                               ftrace_remove_tramp(ops, rec);
+
+                       /*
+                        * If the rec had REGS enabled and the ops that is
+                        * being removed had REGS set, then see if there is
+                        * still any ops for this record that wants regs.
+                        * If not, we can stop recording them.
+                        */
+                       if (ftrace_rec_count(rec) > 0 &&
+                           rec->flags & FTRACE_FL_REGS &&
+                           ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
+                               if (!test_rec_ops_needs_regs(rec))
+                                       rec->flags &= ~FTRACE_FL_REGS;
+                       }
+
+                       /*
+                        * flags will be cleared in ftrace_check_record()
+                        * if rec count is zero.
+                        */
                }
                count++;
                /* Shortcut, if we handled all records, we are done. */
@@ -1668,17 +1746,23 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
         * If we are disabling calls, then disable all records that
         * are enabled.
         */
-       if (enable && (rec->flags & ~FTRACE_FL_MASK))
+       if (enable && ftrace_rec_count(rec))
                flag = FTRACE_FL_ENABLED;
 
        /*
-        * If enabling and the REGS flag does not match the REGS_EN, then
-        * do not ignore this record. Set flags to fail the compare against
-        * ENABLED.
+        * If enabling and the REGS flag does not match the REGS_EN, or
+        * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
+        * this record. Set flags to fail the compare against ENABLED.
         */
-       if (flag &&
-           (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
-               flag |= FTRACE_FL_REGS;
+       if (flag) {
+               if (!(rec->flags & FTRACE_FL_REGS) != 
+                   !(rec->flags & FTRACE_FL_REGS_EN))
+                       flag |= FTRACE_FL_REGS;
+
+               if (!(rec->flags & FTRACE_FL_TRAMP) != 
+                   !(rec->flags & FTRACE_FL_TRAMP_EN))
+                       flag |= FTRACE_FL_TRAMP;
+       }
 
        /* If the state of this record hasn't changed, then do nothing */
        if ((rec->flags & FTRACE_FL_ENABLED) == flag)
@@ -1696,6 +1780,12 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
                                else
                                        rec->flags &= ~FTRACE_FL_REGS_EN;
                        }
+                       if (flag & FTRACE_FL_TRAMP) {
+                               if (rec->flags & FTRACE_FL_TRAMP)
+                                       rec->flags |= FTRACE_FL_TRAMP_EN;
+                               else
+                                       rec->flags &= ~FTRACE_FL_TRAMP_EN;
+                       }
                }
 
                /*
@@ -1704,7 +1794,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
                 * Otherwise,
                 *   return UPDATE_MODIFY_CALL to tell the caller to convert
                 *   from the save regs, to a non-save regs function or
-                *   vice versa.
+                *   vice versa, or from a trampoline call.
                 */
                if (flag & FTRACE_FL_ENABLED)
                        return FTRACE_UPDATE_MAKE_CALL;
@@ -1714,7 +1804,7 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
 
        if (update) {
                /* If there's no more users, clear all flags */
-               if (!(rec->flags & ~FTRACE_FL_MASK))
+               if (!ftrace_rec_count(rec))
                        rec->flags = 0;
                else
                        /* Just disable the record (keep REGS state) */
@@ -1751,6 +1841,43 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable)
        return ftrace_check_record(rec, enable, 0);
 }
 
+static struct ftrace_ops *
+ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
+{
+       struct ftrace_ops *op;
+
+       /* Removed ops need to be tested first */
+       if (removed_ops && removed_ops->tramp_hash) {
+               if (ftrace_lookup_ip(removed_ops->tramp_hash, rec->ip))
+                       return removed_ops;
+       }
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               if (!op->tramp_hash)
+                       continue;
+
+               if (ftrace_lookup_ip(op->tramp_hash, rec->ip))
+                       return op;
+
+       } while_for_each_ftrace_op(op);
+
+       return NULL;
+}
+
+static struct ftrace_ops *
+ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
+{
+       struct ftrace_ops *op;
+
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+               /* pass rec in as regs to have non-NULL val */
+               if (ftrace_ops_test(op, rec->ip, rec))
+                       return op;
+       } while_for_each_ftrace_op(op);
+
+       return NULL;
+}
+
 /**
  * ftrace_get_addr_new - Get the call address to set to
  * @rec:  The ftrace record descriptor
@@ -1763,6 +1890,20 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable)
  */
 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
 {
+       struct ftrace_ops *ops;
+
+       /* Trampolines take precedence over regs */
+       if (rec->flags & FTRACE_FL_TRAMP) {
+               ops = ftrace_find_tramp_ops_new(rec);
+               if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
+                       pr_warning("Bad trampoline accounting at: %p (%pS)\n",
+                                   (void *)rec->ip, (void *)rec->ip);
+                       /* Ftrace is shutting down, return anything */
+                       return (unsigned long)FTRACE_ADDR;
+               }
+               return ops->trampoline;
+       }
+
        if (rec->flags & FTRACE_FL_REGS)
                return (unsigned long)FTRACE_REGS_ADDR;
        else
@@ -1781,6 +1922,20 @@ unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
  */
 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
 {
+       struct ftrace_ops *ops;
+
+       /* Trampolines take precedence over regs */
+       if (rec->flags & FTRACE_FL_TRAMP_EN) {
+               ops = ftrace_find_tramp_ops_curr(rec);
+               if (FTRACE_WARN_ON(!ops)) {
+                       pr_warning("Bad trampoline accounting at: %p (%pS)\n",
+                                   (void *)rec->ip, (void *)rec->ip);
+                       /* Ftrace is shutting down, return anything */
+                       return (unsigned long)FTRACE_ADDR;
+               }
+               return ops->trampoline;
+       }
+
        if (rec->flags & FTRACE_FL_REGS_EN)
                return (unsigned long)FTRACE_REGS_ADDR;
        else
@@ -2023,6 +2178,89 @@ void __weak arch_ftrace_update_code(int command)
        ftrace_run_stop_machine(command);
 }
 
+static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops)
+{
+       struct ftrace_page *pg;
+       struct dyn_ftrace *rec;
+       int size, bits;
+       int ret;
+
+       size = ops->nr_trampolines;
+       bits = 0;
+       /*
+        * Make the hash size about 1/2 the # found
+        */
+       for (size /= 2; size; size >>= 1)
+               bits++;
+
+       ops->tramp_hash = alloc_ftrace_hash(bits);
+       /*
+        * TODO: a failed allocation is going to screw up
+        * the accounting of what needs to be modified
+        * and not. For now, we kill ftrace if we fail
+        * to allocate here. But there are ways around this,
+        * but that will take a little more work.
+        */
+       if (!ops->tramp_hash)
+               return -ENOMEM;
+
+       do_for_each_ftrace_rec(pg, rec) {
+               if (ftrace_rec_count(rec) == 1 &&
+                   ftrace_ops_test(ops, rec->ip, rec)) {
+
+                       /*
+                        * If another ops adds to a rec, the rec will
+                        * lose its trampoline and never get it back
+                        * until all ops are off of it.
+                        */
+                       if (!(rec->flags & FTRACE_FL_TRAMP))
+                               continue;
+
+                       /* This record had better have a trampoline */
+                       if (FTRACE_WARN_ON(!(rec->flags & FTRACE_FL_TRAMP_EN)))
+                               return -1;
+
+                       ret = add_hash_entry(ops->tramp_hash, rec->ip);
+                       if (ret < 0)
+                               return ret;
+               }
+       } while_for_each_ftrace_rec();
+
+       /* The number of recs in the hash must match nr_trampolines */
+       FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines);
+
+       return 0;
+}
+
+static int ftrace_save_tramp_hashes(void)
+{
+       struct ftrace_ops *op;
+       int ret;
+
+       /*
+        * Now that any trampoline is being used, we need to save the
+        * hashes for the ops that have them. This allows the mapping
+        * back from the record to the ops that has the trampoline to
+        * know what code is being replaced. Modifying code must always
+        * verify what it is changing.
+        */
+       do_for_each_ftrace_op(op, ftrace_ops_list) {
+
+               /* The tramp_hash is recreated each time. */
+               free_ftrace_hash(op->tramp_hash);
+               op->tramp_hash = NULL;
+
+               if (op->nr_trampolines) {
+                       ret = ftrace_save_ops_tramp_hash(op);
+                       if (ret)
+                               return ret;
+               }
+
+       } while_for_each_ftrace_op(op);
+
+       return 0;
+}
+
 static void ftrace_run_update_code(int command)
 {
        int ret;
@@ -2031,11 +2269,6 @@ static void ftrace_run_update_code(int command)
        FTRACE_WARN_ON(ret);
        if (ret)
                return;
-       /*
-        * Do not call function tracer while we update the code.
-        * We are in stop machine.
-        */
-       function_trace_stop++;
 
        /*
         * By default we use stop_machine() to modify the code.
@@ -2045,15 +2278,15 @@ static void ftrace_run_update_code(int command)
         */
        arch_ftrace_update_code(command);
 
-       function_trace_stop--;
-
        ret = ftrace_arch_code_modify_post_process();
        FTRACE_WARN_ON(ret);
+
+       ret = ftrace_save_tramp_hashes();
+       FTRACE_WARN_ON(ret);
 }
 
 static ftrace_func_t saved_ftrace_func;
 static int ftrace_start_up;
-static int global_start_up;
 
 static void control_ops_free(struct ftrace_ops *ops)
 {
@@ -2117,8 +2350,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
 
        ftrace_hash_rec_disable(ops, 1);
 
-       if (!global_start_up)
-               ops->flags &= ~FTRACE_OPS_FL_ENABLED;
+       ops->flags &= ~FTRACE_OPS_FL_ENABLED;
 
        command |= FTRACE_UPDATE_CALLS;
 
@@ -2139,8 +2371,16 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
                return 0;
        }
 
+       /*
+        * If the ops uses a trampoline, then it needs to be
+        * tested first on update.
+        */
+       removed_ops = ops;
+
        ftrace_run_update_code(command);
 
+       removed_ops = NULL;
+
        /*
         * Dynamic ops may be freed, we must make sure that all
         * callers are done before leaving this function.
@@ -2398,7 +2638,8 @@ ftrace_allocate_pages(unsigned long num_to_init)
        return start_pg;
 
  free_pages:
-       while (start_pg) {
+       pg = start_pg;
+       while (pg) {
                order = get_count_order(pg->size / ENTRIES_PER_PAGE);
                free_pages((unsigned long)pg->records, order);
                start_pg = pg->next;
@@ -2595,8 +2836,10 @@ static void *t_start(struct seq_file *m, loff_t *pos)
         * off, we can short cut and just print out that all
         * functions are enabled.
         */
-       if (iter->flags & FTRACE_ITER_FILTER &&
-           ftrace_hash_empty(ops->filter_hash)) {
+       if ((iter->flags & FTRACE_ITER_FILTER &&
+            ftrace_hash_empty(ops->filter_hash)) ||
+           (iter->flags & FTRACE_ITER_NOTRACE &&
+            ftrace_hash_empty(ops->notrace_hash))) {
                if (*pos > 0)
                        return t_hash_start(m, pos);
                iter->flags |= FTRACE_ITER_PRINTALL;
@@ -2641,7 +2884,10 @@ static int t_show(struct seq_file *m, void *v)
                return t_hash_show(m, iter);
 
        if (iter->flags & FTRACE_ITER_PRINTALL) {
-               seq_printf(m, "#### all functions enabled ####\n");
+               if (iter->flags & FTRACE_ITER_NOTRACE)
+                       seq_printf(m, "#### no functions disabled ####\n");
+               else
+                       seq_printf(m, "#### all functions enabled ####\n");
                return 0;
        }
 
@@ -2651,10 +2897,22 @@ static int t_show(struct seq_file *m, void *v)
                return 0;
 
        seq_printf(m, "%ps", (void *)rec->ip);
-       if (iter->flags & FTRACE_ITER_ENABLED)
+       if (iter->flags & FTRACE_ITER_ENABLED) {
                seq_printf(m, " (%ld)%s",
-                          rec->flags & ~FTRACE_FL_MASK,
-                          rec->flags & FTRACE_FL_REGS ? " R" : "");
+                          ftrace_rec_count(rec),
+                          rec->flags & FTRACE_FL_REGS ? " R" : "  ");
+               if (rec->flags & FTRACE_FL_TRAMP_EN) {
+                       struct ftrace_ops *ops;
+
+                       ops = ftrace_find_tramp_ops_curr(rec);
+                       if (ops && ops->trampoline)
+                               seq_printf(m, "\ttramp: %pS",
+                                          (void *)ops->trampoline);
+                       else
+                               seq_printf(m, "\ttramp: ERROR!");
+               }
+       }       
+
        seq_printf(m, "\n");
 
        return 0;
@@ -2702,13 +2960,6 @@ ftrace_enabled_open(struct inode *inode, struct file *file)
        return iter ? 0 : -ENOMEM;
 }
 
-static void ftrace_filter_reset(struct ftrace_hash *hash)
-{
-       mutex_lock(&ftrace_lock);
-       ftrace_hash_clear(hash);
-       mutex_unlock(&ftrace_lock);
-}
-
 /**
  * ftrace_regex_open - initialize function tracer filter files
  * @ops: The ftrace_ops that hold the hash filters
@@ -2758,7 +3009,13 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                hash = ops->filter_hash;
 
        if (file->f_mode & FMODE_WRITE) {
-               iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
+               const int size_bits = FTRACE_HASH_DEFAULT_BITS;
+
+               if (file->f_flags & O_TRUNC)
+                       iter->hash = alloc_ftrace_hash(size_bits);
+               else
+                       iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
+
                if (!iter->hash) {
                        trace_parser_put(&iter->parser);
                        kfree(iter);
@@ -2767,10 +3024,6 @@ ftrace_regex_open(struct ftrace_ops *ops, int flag,
                }
        }
 
-       if ((file->f_mode & FMODE_WRITE) &&
-           (file->f_flags & O_TRUNC))
-               ftrace_filter_reset(iter->hash);
-
        if (file->f_mode & FMODE_READ) {
                iter->pg = ftrace_pages_start;
 
@@ -3471,14 +3724,16 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
        else
                orig_hash = &ops->notrace_hash;
 
-       hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+       if (reset)
+               hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
+       else
+               hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
+
        if (!hash) {
                ret = -ENOMEM;
                goto out_regex_unlock;
        }
 
-       if (reset)
-               ftrace_filter_reset(hash);
        if (buf && !ftrace_match_records(hash, buf, len)) {
                ret = -EINVAL;
                goto out_regex_unlock;
@@ -3630,6 +3885,7 @@ __setup("ftrace_filter=", set_ftrace_filter);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
+static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
 static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
 
 static int __init set_graph_function(char *str)
@@ -3639,16 +3895,29 @@ static int __init set_graph_function(char *str)
 }
 __setup("ftrace_graph_filter=", set_graph_function);
 
-static void __init set_ftrace_early_graph(char *buf)
+static int __init set_graph_notrace_function(char *str)
+{
+       strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
+       return 1;
+}
+__setup("ftrace_graph_notrace=", set_graph_notrace_function);
+
+static void __init set_ftrace_early_graph(char *buf, int enable)
 {
        int ret;
        char *func;
+       unsigned long *table = ftrace_graph_funcs;
+       int *count = &ftrace_graph_count;
+
+       if (!enable) {
+               table = ftrace_graph_notrace_funcs;
+               count = &ftrace_graph_notrace_count;
+       }
 
        while (buf) {
                func = strsep(&buf, ",");
                /* we allow only one expression at a time */
-               ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
-                                     FTRACE_GRAPH_MAX_FUNCS, func);
+               ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
                if (ret)
                        printk(KERN_DEBUG "ftrace: function %s not "
                                          "traceable\n", func);
@@ -3677,7 +3946,9 @@ static void __init set_ftrace_early_filters(void)
                ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        if (ftrace_graph_buf[0])
-               set_ftrace_early_graph(ftrace_graph_buf);
+               set_ftrace_early_graph(ftrace_graph_buf, 1);
+       if (ftrace_graph_notrace_buf[0])
+               set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 }
 
@@ -3819,7 +4090,12 @@ static int g_show(struct seq_file *m, void *v)
                return 0;
 
        if (ptr == (unsigned long *)1) {
-               seq_printf(m, "#### all functions enabled ####\n");
+               struct ftrace_graph_data *fgd = m->private;
+
+               if (fgd->table == ftrace_graph_funcs)
+                       seq_printf(m, "#### all functions enabled ####\n");
+               else
+                       seq_printf(m, "#### no functions disabled ####\n");
                return 0;
        }
 
@@ -4447,9 +4723,6 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
        struct ftrace_ops *op;
        int bit;
 
-       if (function_trace_stop)
-               return;
-
        bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
        if (bit < 0)
                return;
@@ -4461,9 +4734,8 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
        preempt_disable_notrace();
        do_for_each_ftrace_op(op, ftrace_ops_list) {
                if (ftrace_ops_test(op, ip, regs)) {
-                       if (WARN_ON(!op->func)) {
-                               function_trace_stop = 1;
-                               printk("op=%p %pS\n", op, op);
+                       if (FTRACE_WARN_ON(!op->func)) {
+                               pr_warn("op=%p %pS\n", op, op);
                                goto out;
                        }
                        op->func(ip, parent_ip, op, regs);
@@ -5084,6 +5356,12 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
        /* Function graph doesn't use the .func field of global_ops */
        global_ops.flags |= FTRACE_OPS_FL_STUB;
 
+#ifdef CONFIG_DYNAMIC_FTRACE
+       /* Optimize function graph calling (if implemented by arch) */
+       if (FTRACE_GRAPH_TRAMP_ADDR != 0)
+               global_ops.trampoline = FTRACE_GRAPH_TRAMP_ADDR;
+#endif
+
        ret = ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
 
 out:
@@ -5104,6 +5382,10 @@ void unregister_ftrace_graph(void)
        __ftrace_graph_entry = ftrace_graph_entry_stub;
        ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
        global_ops.flags &= ~FTRACE_OPS_FL_STUB;
+#ifdef CONFIG_DYNAMIC_FTRACE
+       if (FTRACE_GRAPH_TRAMP_ADDR != 0)
+               global_ops.trampoline = 0;
+#endif
        unregister_pm_notifier(&ftrace_suspend_notifier);
        unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
 
@@ -5183,9 +5465,4 @@ void ftrace_graph_exit_task(struct task_struct *t)
 
        kfree(ret_stack);
 }
-
-void ftrace_graph_stop(void)
-{
-       ftrace_stop();
-}
 #endif
index 7c56c3d..925f629 100644 (file)
@@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
        struct ring_buffer_per_cpu *cpu_buffer;
        struct rb_irq_work *work;
 
-       if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
-           (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
-               return POLLIN | POLLRDNORM;
-
        if (cpu == RING_BUFFER_ALL_CPUS)
                work = &buffer->irq_work;
        else {
@@ -1693,22 +1689,14 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
                        if (!cpu_buffer->nr_pages_to_update)
                                continue;
 
-                       /* The update must run on the CPU that is being updated. */
-                       preempt_disable();
-                       if (cpu == smp_processor_id() || !cpu_online(cpu)) {
+                       /* Can't run something on an offline CPU. */
+                       if (!cpu_online(cpu)) {
                                rb_update_pages(cpu_buffer);
                                cpu_buffer->nr_pages_to_update = 0;
                        } else {
-                               /*
-                                * Can not disable preemption for schedule_work_on()
-                                * on PREEMPT_RT.
-                                */
-                               preempt_enable();
                                schedule_work_on(cpu,
                                                &cpu_buffer->update_pages_work);
-                               preempt_disable();
                        }
-                       preempt_enable();
                }
 
                /* wait for all the updates to complete */
@@ -1746,22 +1734,14 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
 
                get_online_cpus();
 
-               preempt_disable();
-               /* The update must run on the CPU that is being updated. */
-               if (cpu_id == smp_processor_id() || !cpu_online(cpu_id))
+               /* Can't run something on an offline CPU. */
+               if (!cpu_online(cpu_id))
                        rb_update_pages(cpu_buffer);
                else {
-                       /*
-                        * Can not disable preemption for schedule_work_on()
-                        * on PREEMPT_RT.
-                        */
-                       preempt_enable();
                        schedule_work_on(cpu_id,
                                         &cpu_buffer->update_pages_work);
                        wait_for_completion(&cpu_buffer->update_done);
-                       preempt_disable();
                }
-               preempt_enable();
 
                cpu_buffer->nr_pages_to_update = 0;
                put_online_cpus();
@@ -3779,7 +3759,7 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
        if (rb_per_cpu_empty(cpu_buffer))
                return NULL;
 
-       if (iter->head >= local_read(&iter->head_page->page->commit)) {
+       if (iter->head >= rb_page_size(iter->head_page)) {
                rb_inc_iter(iter);
                goto again;
        }
index 384ede3..8bb80fe 100644 (file)
@@ -466,6 +466,12 @@ int __trace_puts(unsigned long ip, const char *str, int size)
        struct print_entry *entry;
        unsigned long irq_flags;
        int alloc;
+       int pc;
+
+       if (!(trace_flags & TRACE_ITER_PRINTK))
+               return 0;
+
+       pc = preempt_count();
 
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
@@ -475,7 +481,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
        local_save_flags(irq_flags);
        buffer = global_trace.trace_buffer.buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
-                                         irq_flags, preempt_count());
+                                         irq_flags, pc);
        if (!event)
                return 0;
 
@@ -492,6 +498,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
                entry->buf[size] = '\0';
 
        __buffer_unlock_commit(buffer, event);
+       ftrace_trace_stack(buffer, irq_flags, 4, pc);
 
        return size;
 }
@@ -509,6 +516,12 @@ int __trace_bputs(unsigned long ip, const char *str)
        struct bputs_entry *entry;
        unsigned long irq_flags;
        int size = sizeof(struct bputs_entry);
+       int pc;
+
+       if (!(trace_flags & TRACE_ITER_PRINTK))
+               return 0;
+
+       pc = preempt_count();
 
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
@@ -516,7 +529,7 @@ int __trace_bputs(unsigned long ip, const char *str)
        local_save_flags(irq_flags);
        buffer = global_trace.trace_buffer.buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
-                                         irq_flags, preempt_count());
+                                         irq_flags, pc);
        if (!event)
                return 0;
 
@@ -525,6 +538,7 @@ int __trace_bputs(unsigned long ip, const char *str)
        entry->str                      = str;
 
        __buffer_unlock_commit(buffer, event);
+       ftrace_trace_stack(buffer, irq_flags, 4, pc);
 
        return 1;
 }
@@ -809,7 +823,7 @@ static struct {
        { trace_clock_local,    "local",        1 },
        { trace_clock_global,   "global",       1 },
        { trace_clock_counter,  "counter",      0 },
-       { trace_clock_jiffies,  "uptime",       1 },
+       { trace_clock_jiffies,  "uptime",       0 },
        { trace_clock,          "perf",         1 },
        ARCH_TRACE_CLOCKS
 };
@@ -923,30 +937,6 @@ out:
        return ret;
 }
 
-ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
-{
-       int len;
-       int ret;
-
-       if (!cnt)
-               return 0;
-
-       if (s->len <= s->readpos)
-               return -EBUSY;
-
-       len = s->len - s->readpos;
-       if (cnt > len)
-               cnt = len;
-       ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
-       if (ret == cnt)
-               return -EFAULT;
-
-       cnt -= ret;
-
-       s->readpos += cnt;
-       return cnt;
-}
-
 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
 {
        int len;
@@ -1396,7 +1386,6 @@ void tracing_start(void)
 
        arch_spin_unlock(&global_trace.max_lock);
 
-       ftrace_start();
  out:
        raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
 }
@@ -1443,7 +1432,6 @@ void tracing_stop(void)
        struct ring_buffer *buffer;
        unsigned long flags;
 
-       ftrace_stop();
        raw_spin_lock_irqsave(&global_trace.start_lock, flags);
        if (global_trace.stop_count++)
                goto out;
@@ -3687,6 +3675,7 @@ static const char readme_msg[] =
 #endif
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
        "  set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
+       "  set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
        "  max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
 #endif
 #ifdef CONFIG_TRACER_SNAPSHOT
@@ -4226,10 +4215,9 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
 }
 
 static ssize_t
-tracing_max_lat_read(struct file *filp, char __user *ubuf,
-                    size_t cnt, loff_t *ppos)
+tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
+                  size_t cnt, loff_t *ppos)
 {
-       unsigned long *ptr = filp->private_data;
        char buf[64];
        int r;
 
@@ -4241,10 +4229,9 @@ tracing_max_lat_read(struct file *filp, char __user *ubuf,
 }
 
 static ssize_t
-tracing_max_lat_write(struct file *filp, const char __user *ubuf,
-                     size_t cnt, loff_t *ppos)
+tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
+                   size_t cnt, loff_t *ppos)
 {
-       unsigned long *ptr = filp->private_data;
        unsigned long val;
        int ret;
 
@@ -4257,6 +4244,52 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
        return cnt;
 }
 
+static ssize_t
+tracing_thresh_read(struct file *filp, char __user *ubuf,
+                   size_t cnt, loff_t *ppos)
+{
+       return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
+}
+
+static ssize_t
+tracing_thresh_write(struct file *filp, const char __user *ubuf,
+                    size_t cnt, loff_t *ppos)
+{
+       struct trace_array *tr = filp->private_data;
+       int ret;
+
+       mutex_lock(&trace_types_lock);
+       ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
+       if (ret < 0)
+               goto out;
+
+       if (tr->current_trace->update_thresh) {
+               ret = tr->current_trace->update_thresh(tr);
+               if (ret < 0)
+                       goto out;
+       }
+
+       ret = cnt;
+out:
+       mutex_unlock(&trace_types_lock);
+
+       return ret;
+}
+
+static ssize_t
+tracing_max_lat_read(struct file *filp, char __user *ubuf,
+                    size_t cnt, loff_t *ppos)
+{
+       return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
+}
+
+static ssize_t
+tracing_max_lat_write(struct file *filp, const char __user *ubuf,
+                     size_t cnt, loff_t *ppos)
+{
+       return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
+}
+
 static int tracing_open_pipe(struct inode *inode, struct file *filp)
 {
        struct trace_array *tr = inode->i_private;
@@ -5158,6 +5191,13 @@ static int snapshot_raw_open(struct inode *inode, struct file *filp)
 #endif /* CONFIG_TRACER_SNAPSHOT */
 
 
+static const struct file_operations tracing_thresh_fops = {
+       .open           = tracing_open_generic,
+       .read           = tracing_thresh_read,
+       .write          = tracing_thresh_write,
+       .llseek         = generic_file_llseek,
+};
+
 static const struct file_operations tracing_max_lat_fops = {
        .open           = tracing_open_generic,
        .read           = tracing_max_lat_read,
@@ -6095,10 +6135,8 @@ destroy_trace_option_files(struct trace_option_dentry *topts)
        if (!topts)
                return;
 
-       for (cnt = 0; topts[cnt].opt; cnt++) {
-               if (topts[cnt].entry)
-                       debugfs_remove(topts[cnt].entry);
-       }
+       for (cnt = 0; topts[cnt].opt; cnt++)
+               debugfs_remove(topts[cnt].entry);
 
        kfree(topts);
 }
@@ -6521,7 +6559,7 @@ static __init int tracer_init_debugfs(void)
        init_tracer_debugfs(&global_trace, d_tracer);
 
        trace_create_file("tracing_thresh", 0644, d_tracer,
-                       &tracing_thresh, &tracing_max_lat_fops);
+                       &global_trace, &tracing_thresh_fops);
 
        trace_create_file("README", 0444, d_tracer,
                        NULL, &tracing_readme_fops);
index 9258f5a..385391f 100644 (file)
@@ -339,6 +339,7 @@ struct tracer_flags {
  * @reset: called when one switches to another tracer
  * @start: called when tracing is unpaused (echo 1 > tracing_enabled)
  * @stop: called when tracing is paused (echo 0 > tracing_enabled)
+ * @update_thresh: called when tracing_thresh is updated
  * @open: called when the trace file is opened
  * @pipe_open: called when the trace_pipe file is opened
  * @close: called when the trace file is released
@@ -357,6 +358,7 @@ struct tracer {
        void                    (*reset)(struct trace_array *tr);
        void                    (*start)(struct trace_array *tr);
        void                    (*stop)(struct trace_array *tr);
+       int                     (*update_thresh)(struct trace_array *tr);
        void                    (*open)(struct trace_iterator *iter);
        void                    (*pipe_open)(struct trace_iterator *iter);
        void                    (*close)(struct trace_iterator *iter);
index 26dc348..57b67b1 100644 (file)
@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
 
 /*
  * trace_jiffy_clock(): Simply use jiffies as a clock counter.
+ * Note that this use of jiffies_64 is not completely safe on
+ * 32-bit systems. But the window is tiny, and the effect if
+ * we are affected is that we will have an obviously bogus
+ * timestamp on a trace event - i.e. not life threatening.
  */
 u64 notrace trace_clock_jiffies(void)
 {
-       u64 jiffy = jiffies - INITIAL_JIFFIES;
-
-       /* Return nsecs */
-       return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
+       return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
 }
 
 /*
index 5d12bb4..4b9c114 100644 (file)
@@ -30,6 +30,18 @@ static int perf_trace_event_perm(struct ftrace_event_call *tp_event,
                        return ret;
        }
 
+       /*
+        * We checked and allowed to create parent,
+        * allow children without checking.
+        */
+       if (p_event->parent)
+               return 0;
+
+       /*
+        * It's ok to check current process (owner) permissions in here,
+        * because code below is called only via perf_event_open syscall.
+        */
+
        /* The ftrace function trace is allowed only for root. */
        if (ftrace_event_is_function(tp_event)) {
                if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
index f99e0b3..ef06ce7 100644 (file)
@@ -8,6 +8,8 @@
  *
  */
 
+#define pr_fmt(fmt) fmt
+
 #include <linux/workqueue.h>
 #include <linux/spinlock.h>
 #include <linux/kthread.h>
@@ -470,6 +472,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
 
        list_del(&file->list);
        remove_subsystem(file->system);
+       free_event_filter(file->filter);
        kmem_cache_free(file_cachep, file);
 }
 
@@ -1490,7 +1493,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
 
        dir->entry = debugfs_create_dir(name, parent);
        if (!dir->entry) {
-               pr_warning("Failed to create system directory %s\n", name);
+               pr_warn("Failed to create system directory %s\n", name);
                __put_system(system);
                goto out_free;
        }
@@ -1506,7 +1509,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
        if (!entry) {
                kfree(system->filter);
                system->filter = NULL;
-               pr_warning("Could not create debugfs '%s/filter' entry\n", name);
+               pr_warn("Could not create debugfs '%s/filter' entry\n", name);
        }
 
        trace_create_file("enable", 0644, dir->entry, dir,
@@ -1521,8 +1524,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name,
  out_fail:
        /* Only print this message if failed on memory allocation */
        if (!dir || !system)
-               pr_warning("No memory to create event subsystem %s\n",
-                          name);
+               pr_warn("No memory to create event subsystem %s\n", name);
        return NULL;
 }
 
@@ -1550,8 +1552,7 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
        name = ftrace_event_name(call);
        file->dir = debugfs_create_dir(name, d_events);
        if (!file->dir) {
-               pr_warning("Could not create debugfs '%s' directory\n",
-                          name);
+               pr_warn("Could not create debugfs '%s' directory\n", name);
                return -1;
        }
 
@@ -1574,8 +1575,8 @@ event_create_dir(struct dentry *parent, struct ftrace_event_file *file)
        if (list_empty(head)) {
                ret = call->class->define_fields(call);
                if (ret < 0) {
-                       pr_warning("Could not initialize trace point"
-                                  " events/%s\n", name);
+                       pr_warn("Could not initialize trace point events/%s\n",
+                               name);
                        return -1;
                }
        }
@@ -1620,7 +1621,6 @@ static void event_remove(struct ftrace_event_call *call)
                if (file->event_call != call)
                        continue;
                ftrace_event_enable_disable(file, 0);
-               destroy_preds(file);
                /*
                 * The do_for_each_event_file() is
                 * a double loop. After finding the call for this
@@ -1648,8 +1648,7 @@ static int event_init(struct ftrace_event_call *call)
        if (call->class->raw_init) {
                ret = call->class->raw_init(call);
                if (ret < 0 && ret != -ENOSYS)
-                       pr_warn("Could not initialize trace events/%s\n",
-                               name);
+                       pr_warn("Could not initialize trace events/%s\n", name);
        }
 
        return ret;
@@ -1748,7 +1747,8 @@ static void __trace_remove_event_call(struct ftrace_event_call *call)
 {
        event_remove(call);
        trace_destroy_fields(call);
-       destroy_call_preds(call);
+       free_event_filter(call->filter);
+       call->filter = NULL;
 }
 
 static int probe_remove_event_call(struct ftrace_event_call *call)
@@ -1894,8 +1894,8 @@ __trace_add_event_dirs(struct trace_array *tr)
        list_for_each_entry(call, &ftrace_events, list) {
                ret = __trace_add_new_event(call, tr);
                if (ret < 0)
-                       pr_warning("Could not create directory for event %s\n",
-                                  ftrace_event_name(call));
+                       pr_warn("Could not create directory for event %s\n",
+                               ftrace_event_name(call));
        }
 }
 
@@ -2207,8 +2207,8 @@ __trace_early_add_event_dirs(struct trace_array *tr)
        list_for_each_entry(file, &tr->events, list) {
                ret = event_create_dir(tr->event_dir, file);
                if (ret < 0)
-                       pr_warning("Could not create directory for event %s\n",
-                                  ftrace_event_name(file->event_call));
+                       pr_warn("Could not create directory for event %s\n",
+                               ftrace_event_name(file->event_call));
        }
 }
 
@@ -2231,8 +2231,8 @@ __trace_early_add_events(struct trace_array *tr)
 
                ret = __trace_early_add_new_event(call, tr);
                if (ret < 0)
-                       pr_warning("Could not create early event %s\n",
-                                  ftrace_event_name(call));
+                       pr_warn("Could not create early event %s\n",
+                               ftrace_event_name(call));
        }
 }
 
@@ -2279,13 +2279,13 @@ create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
        entry = debugfs_create_file("set_event", 0644, parent,
                                    tr, &ftrace_set_event_fops);
        if (!entry) {
-               pr_warning("Could not create debugfs 'set_event' entry\n");
+               pr_warn("Could not create debugfs 'set_event' entry\n");
                return -ENOMEM;
        }
 
        d_events = debugfs_create_dir("events", parent);
        if (!d_events) {
-               pr_warning("Could not create debugfs 'events' directory\n");
+               pr_warn("Could not create debugfs 'events' directory\n");
                return -ENOMEM;
        }
 
@@ -2461,11 +2461,10 @@ static __init int event_trace_init(void)
        entry = debugfs_create_file("available_events", 0444, d_tracer,
                                    tr, &ftrace_avail_fops);
        if (!entry)
-               pr_warning("Could not create debugfs "
-                          "'available_events' entry\n");
+               pr_warn("Could not create debugfs 'available_events' entry\n");
 
        if (trace_define_common_fields())
-               pr_warning("tracing: Failed to allocate common fields");
+               pr_warn("tracing: Failed to allocate common fields");
 
        ret = early_event_add_tracer(d_tracer, tr);
        if (ret)
@@ -2474,7 +2473,7 @@ static __init int event_trace_init(void)
 #ifdef CONFIG_MODULES
        ret = register_module_notifier(&trace_module_nb);
        if (ret)
-               pr_warning("Failed to register trace events module notifier\n");
+               pr_warn("Failed to register trace events module notifier\n");
 #endif
        return 0;
 }
@@ -2578,7 +2577,7 @@ static __init void event_trace_self_tests(void)
                 * it and the self test should not be on.
                 */
                if (file->flags & FTRACE_EVENT_FL_ENABLED) {
-                       pr_warning("Enabled event during self test!\n");
+                       pr_warn("Enabled event during self test!\n");
                        WARN_ON_ONCE(1);
                        continue;
                }
@@ -2606,8 +2605,8 @@ static __init void event_trace_self_tests(void)
 
                ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
                if (WARN_ON_ONCE(ret)) {
-                       pr_warning("error enabling system %s\n",
-                                  system->name);
+                       pr_warn("error enabling system %s\n",
+                               system->name);
                        continue;
                }
 
@@ -2615,8 +2614,8 @@ static __init void event_trace_self_tests(void)
 
                ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
                if (WARN_ON_ONCE(ret)) {
-                       pr_warning("error disabling system %s\n",
-                                  system->name);
+                       pr_warn("error disabling system %s\n",
+                               system->name);
                        continue;
                }
 
@@ -2630,7 +2629,7 @@ static __init void event_trace_self_tests(void)
 
        ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error enabling all events\n");
+               pr_warn("error enabling all events\n");
                return;
        }
 
@@ -2639,7 +2638,7 @@ static __init void event_trace_self_tests(void)
        /* reset sysname */
        ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
        if (WARN_ON_ONCE(ret)) {
-               pr_warning("error disabling all events\n");
+               pr_warn("error disabling all events\n");
                return;
        }
 
index 8a86319..7a8c152 100644 (file)
@@ -774,17 +774,12 @@ static void __free_preds(struct event_filter *filter)
        filter->n_preds = 0;
 }
 
-static void call_filter_disable(struct ftrace_event_call *call)
-{
-       call->flags &= ~TRACE_EVENT_FL_FILTERED;
-}
-
 static void filter_disable(struct ftrace_event_file *file)
 {
        struct ftrace_event_call *call = file->event_call;
 
        if (call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
-               call_filter_disable(call);
+               call->flags &= ~TRACE_EVENT_FL_FILTERED;
        else
                file->flags &= ~FTRACE_EVENT_FL_FILTERED;
 }
@@ -804,32 +799,6 @@ void free_event_filter(struct event_filter *filter)
        __free_filter(filter);
 }
 
-void destroy_call_preds(struct ftrace_event_call *call)
-{
-       __free_filter(call->filter);
-       call->filter = NULL;
-}
-
-static void destroy_file_preds(struct ftrace_event_file *file)
-{
-       __free_filter(file->filter);
-       file->filter = NULL;
-}
-
-/*
- * Called when destroying the ftrace_event_file.
- * The file is being freed, so we do not need to worry about
- * the file being currently used. This is for module code removing
- * the tracepoints from within it.
- */
-void destroy_preds(struct ftrace_event_file *file)
-{
-       if (file->event_call->flags & TRACE_EVENT_FL_USE_CALL_FILTER)
-               destroy_call_preds(file->event_call);
-       else
-               destroy_file_preds(file);
-}
-
 static struct event_filter *__alloc_filter(void)
 {
        struct event_filter *filter;
@@ -873,17 +842,14 @@ static inline void __remove_filter(struct ftrace_event_file *file)
                remove_filter_string(file->filter);
 }
 
-static void filter_free_subsystem_preds(struct event_subsystem *system,
+static void filter_free_subsystem_preds(struct ftrace_subsystem_dir *dir,
                                        struct trace_array *tr)
 {
        struct ftrace_event_file *file;
-       struct ftrace_event_call *call;
 
        list_for_each_entry(file, &tr->events, list) {
-               call = file->event_call;
-               if (strcmp(call->class->system, system->name) != 0)
+               if (file->system != dir)
                        continue;
-
                __remove_filter(file);
        }
 }
@@ -901,15 +867,13 @@ static inline void __free_subsystem_filter(struct ftrace_event_file *file)
        }
 }
 
-static void filter_free_subsystem_filters(struct event_subsystem *system,
+static void filter_free_subsystem_filters(struct ftrace_subsystem_dir *dir,
                                          struct trace_array *tr)
 {
        struct ftrace_event_file *file;
-       struct ftrace_event_call *call;
 
        list_for_each_entry(file, &tr->events, list) {
-               call = file->event_call;
-               if (strcmp(call->class->system, system->name) != 0)
+               if (file->system != dir)
                        continue;
                __free_subsystem_filter(file);
        }
@@ -1582,7 +1546,6 @@ static int fold_pred_tree(struct event_filter *filter,
 static int replace_preds(struct ftrace_event_call *call,
                         struct event_filter *filter,
                         struct filter_parse_state *ps,
-                        char *filter_string,
                         bool dry_run)
 {
        char *operand1 = NULL, *operand2 = NULL;
@@ -1755,13 +1718,12 @@ struct filter_list {
        struct event_filter     *filter;
 };
 
-static int replace_system_preds(struct event_subsystem *system,
+static int replace_system_preds(struct ftrace_subsystem_dir *dir,
                                struct trace_array *tr,
                                struct filter_parse_state *ps,
                                char *filter_string)
 {
        struct ftrace_event_file *file;
-       struct ftrace_event_call *call;
        struct filter_list *filter_item;
        struct filter_list *tmp;
        LIST_HEAD(filter_list);
@@ -1769,15 +1731,14 @@ static int replace_system_preds(struct event_subsystem *system,
        int err;
 
        list_for_each_entry(file, &tr->events, list) {
-               call = file->event_call;
-               if (strcmp(call->class->system, system->name) != 0)
+               if (file->system != dir)
                        continue;
 
                /*
                 * Try to see if the filter can be applied
                 *  (filter arg is ignored on dry_run)
                 */
-               err = replace_preds(call, NULL, ps, filter_string, true);
+               err = replace_preds(file->event_call, NULL, ps, true);
                if (err)
                        event_set_no_set_filter_flag(file);
                else
@@ -1787,9 +1748,7 @@ static int replace_system_preds(struct event_subsystem *system,
        list_for_each_entry(file, &tr->events, list) {
                struct event_filter *filter;
 
-               call = file->event_call;
-
-               if (strcmp(call->class->system, system->name) != 0)
+               if (file->system != dir)
                        continue;
 
                if (event_no_set_filter_flag(file))
@@ -1811,7 +1770,7 @@ static int replace_system_preds(struct event_subsystem *system,
                if (err)
                        goto fail_mem;
 
-               err = replace_preds(call, filter, ps, filter_string, false);
+               err = replace_preds(file->event_call, filter, ps, false);
                if (err) {
                        filter_disable(file);
                        parse_error(ps, FILT_ERR_BAD_SUBSYS_FILTER, 0);
@@ -1933,7 +1892,7 @@ static int create_filter(struct ftrace_event_call *call,
 
        err = create_filter_start(filter_str, set_str, &ps, &filter);
        if (!err) {
-               err = replace_preds(call, filter, ps, filter_str, false);
+               err = replace_preds(call, filter, ps, false);
                if (err && set_str)
                        append_filter_err(ps, filter);
        }
@@ -1959,7 +1918,7 @@ int create_event_filter(struct ftrace_event_call *call,
  * Identical to create_filter() except that it creates a subsystem filter
  * and always remembers @filter_str.
  */
-static int create_system_filter(struct event_subsystem *system,
+static int create_system_filter(struct ftrace_subsystem_dir *dir,
                                struct trace_array *tr,
                                char *filter_str, struct event_filter **filterp)
 {
@@ -1969,7 +1928,7 @@ static int create_system_filter(struct event_subsystem *system,
 
        err = create_filter_start(filter_str, true, &ps, &filter);
        if (!err) {
-               err = replace_system_preds(system, tr, ps, filter_str);
+               err = replace_system_preds(dir, tr, ps, filter_str);
                if (!err) {
                        /* System filters just show a default message */
                        kfree(filter->filter_string);
@@ -2053,18 +2012,18 @@ int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
        }
 
        if (!strcmp(strstrip(filter_string), "0")) {
-               filter_free_subsystem_preds(system, tr);
+               filter_free_subsystem_preds(dir, tr);
                remove_filter_string(system->filter);
                filter = system->filter;
                system->filter = NULL;
                /* Ensure all filters are no longer used */
                synchronize_sched();
-               filter_free_subsystem_filters(system, tr);
+               filter_free_subsystem_filters(dir, tr);
                __free_filter(filter);
                goto out_unlock;
        }
 
-       err = create_system_filter(system, tr, filter_string, &filter);
+       err = create_system_filter(dir, tr, filter_string, &filter);
        if (filter) {
                /*
                 * No event actually uses the system filter
index 4de3e57..f0a0c98 100644 (file)
 #include "trace.h"
 #include "trace_output.h"
 
+static bool kill_ftrace_graph;
+
+/**
+ * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
+ *
+ * ftrace_graph_stop() is called when a severe error is detected in
+ * the function graph tracing. This function is called by the critical
+ * paths of function graph to keep those paths from doing any more harm.
+ */
+bool ftrace_graph_is_dead(void)
+{
+       return kill_ftrace_graph;
+}
+
+/**
+ * ftrace_graph_stop - set to permanently disable function graph tracincg
+ *
+ * In case of an error int function graph tracing, this is called
+ * to try to keep function graph tracing from causing any more harm.
+ * Usually this is pretty severe and this is called to try to at least
+ * get a warning out to the user.
+ */
+void ftrace_graph_stop(void)
+{
+       kill_ftrace_graph = true;
+}
+
 /* When set, irq functions will be ignored */
 static int ftrace_graph_skip_irqs;
 
@@ -92,6 +119,9 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
        unsigned long long calltime;
        int index;
 
+       if (unlikely(ftrace_graph_is_dead()))
+               return -EBUSY;
+
        if (!current->ret_stack)
                return -EBUSY;
 
@@ -323,7 +353,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
        return ret;
 }
 
-int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
+static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
 {
        if (tracing_thresh)
                return 1;
@@ -412,7 +442,7 @@ void set_graph_array(struct trace_array *tr)
        smp_mb();
 }
 
-void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
+static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
 {
        if (tracing_thresh &&
            (trace->rettime - trace->calltime < tracing_thresh))
@@ -445,6 +475,12 @@ static void graph_trace_reset(struct trace_array *tr)
        unregister_ftrace_graph();
 }
 
+static int graph_trace_update_thresh(struct trace_array *tr)
+{
+       graph_trace_reset(tr);
+       return graph_trace_init(tr);
+}
+
 static int max_bytes_for_cpu;
 
 static enum print_line_t
@@ -1399,7 +1435,7 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags)
        seq_printf(s, "               |   |   |   |\n");
 }
 
-void print_graph_headers(struct seq_file *s)
+static void print_graph_headers(struct seq_file *s)
 {
        print_graph_headers_flags(s, tracer_flags.val);
 }
@@ -1495,6 +1531,7 @@ static struct trace_event graph_trace_ret_event = {
 
 static struct tracer graph_trace __tracer_data = {
        .name           = "function_graph",
+       .update_thresh  = graph_trace_update_thresh,
        .open           = graph_trace_open,
        .pipe_open      = graph_trace_open,
        .close          = graph_trace_close,
index f3dad80..c6977d5 100644 (file)
@@ -20,23 +20,6 @@ static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
 
 static int next_event_type = __TRACE_LAST_TYPE + 1;
 
-int trace_print_seq(struct seq_file *m, struct trace_seq *s)
-{
-       int len = s->len >= PAGE_SIZE ? PAGE_SIZE - 1 : s->len;
-       int ret;
-
-       ret = seq_write(m, s->buffer, len);
-
-       /*
-        * Only reset this buffer if we successfully wrote to the
-        * seq_file buffer.
-        */
-       if (!ret)
-               trace_seq_init(s);
-
-       return ret;
-}
-
 enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
 {
        struct trace_seq *s = &iter->seq;
@@ -85,257 +68,6 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter)
        return TRACE_TYPE_HANDLED;
 }
 
-/**
- * trace_seq_printf - sequence printing of trace information
- * @s: trace sequence descriptor
- * @fmt: printf format string
- *
- * It returns 0 if the trace oversizes the buffer's free
- * space, 1 otherwise.
- *
- * The tracer may use either sequence operations or its own
- * copy to user routines. To simplify formating of a trace
- * trace_seq_printf is used to store strings into a special
- * buffer (@s). Then the output may be either used by
- * the sequencer or pulled into another buffer.
- */
-int
-trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
-{
-       int len = (PAGE_SIZE - 1) - s->len;
-       va_list ap;
-       int ret;
-
-       if (s->full || !len)
-               return 0;
-
-       va_start(ap, fmt);
-       ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
-       va_end(ap);
-
-       /* If we can't write it all, don't bother writing anything */
-       if (ret >= len) {
-               s->full = 1;
-               return 0;
-       }
-
-       s->len += ret;
-
-       return 1;
-}
-EXPORT_SYMBOL_GPL(trace_seq_printf);
-
-/**
- * trace_seq_bitmask - put a list of longs as a bitmask print output
- * @s:         trace sequence descriptor
- * @maskp:     points to an array of unsigned longs that represent a bitmask
- * @nmaskbits: The number of bits that are valid in @maskp
- *
- * It returns 0 if the trace oversizes the buffer's free
- * space, 1 otherwise.
- *
- * Writes a ASCII representation of a bitmask string into @s.
- */
-int
-trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
-                 int nmaskbits)
-{
-       int len = (PAGE_SIZE - 1) - s->len;
-       int ret;
-
-       if (s->full || !len)
-               return 0;
-
-       ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits);
-       s->len += ret;
-
-       return 1;
-}
-EXPORT_SYMBOL_GPL(trace_seq_bitmask);
-
-/**
- * trace_seq_vprintf - sequence printing of trace information
- * @s: trace sequence descriptor
- * @fmt: printf format string
- *
- * The tracer may use either sequence operations or its own
- * copy to user routines. To simplify formating of a trace
- * trace_seq_printf is used to store strings into a special
- * buffer (@s). Then the output may be either used by
- * the sequencer or pulled into another buffer.
- */
-int
-trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
-{
-       int len = (PAGE_SIZE - 1) - s->len;
-       int ret;
-
-       if (s->full || !len)
-               return 0;
-
-       ret = vsnprintf(s->buffer + s->len, len, fmt, args);
-
-       /* If we can't write it all, don't bother writing anything */
-       if (ret >= len) {
-               s->full = 1;
-               return 0;
-       }
-
-       s->len += ret;
-
-       return len;
-}
-EXPORT_SYMBOL_GPL(trace_seq_vprintf);
-
-int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
-{
-       int len = (PAGE_SIZE - 1) - s->len;
-       int ret;
-
-       if (s->full || !len)
-               return 0;
-
-       ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
-
-       /* If we can't write it all, don't bother writing anything */
-       if (ret >= len) {
-               s->full = 1;
-               return 0;
-       }
-
-       s->len += ret;
-
-       return len;
-}
-
-/**
- * trace_seq_puts - trace sequence printing of simple string
- * @s: trace sequence descriptor
- * @str: simple string to record
- *
- * The tracer may use either the sequence operations or its own
- * copy to user routines. This function records a simple string
- * into a special buffer (@s) for later retrieval by a sequencer
- * or other mechanism.
- */
-int trace_seq_puts(struct trace_seq *s, const char *str)
-{
-       int len = strlen(str);
-
-       if (s->full)
-               return 0;
-
-       if (len > ((PAGE_SIZE - 1) - s->len)) {
-               s->full = 1;
-               return 0;
-       }
-
-       memcpy(s->buffer + s->len, str, len);
-       s->len += len;
-
-       return len;
-}
-
-int trace_seq_putc(struct trace_seq *s, unsigned char c)
-{
-       if (s->full)
-               return 0;
-
-       if (s->len >= (PAGE_SIZE - 1)) {
-               s->full = 1;
-               return 0;
-       }
-
-       s->buffer[s->len++] = c;
-
-       return 1;
-}
-EXPORT_SYMBOL(trace_seq_putc);
-
-int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
-{
-       if (s->full)
-               return 0;
-
-       if (len > ((PAGE_SIZE - 1) - s->len)) {
-               s->full = 1;
-               return 0;
-       }
-
-       memcpy(s->buffer + s->len, mem, len);
-       s->len += len;
-
-       return len;
-}
-
-int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, size_t len)
-{
-       unsigned char hex[HEX_CHARS];
-       const unsigned char *data = mem;
-       int i, j;
-
-       if (s->full)
-               return 0;
-
-#ifdef __BIG_ENDIAN
-       for (i = 0, j = 0; i < len; i++) {
-#else
-       for (i = len-1, j = 0; i >= 0; i--) {
-#endif
-               hex[j++] = hex_asc_hi(data[i]);
-               hex[j++] = hex_asc_lo(data[i]);
-       }
-       hex[j++] = ' ';
-
-       return trace_seq_putmem(s, hex, j);
-}
-
-void *trace_seq_reserve(struct trace_seq *s, size_t len)
-{
-       void *ret;
-
-       if (s->full)
-               return NULL;
-
-       if (len > ((PAGE_SIZE - 1) - s->len)) {
-               s->full = 1;
-               return NULL;
-       }
-
-       ret = s->buffer + s->len;
-       s->len += len;
-
-       return ret;
-}
-
-int trace_seq_path(struct trace_seq *s, const struct path *path)
-{
-       unsigned char *p;
-
-       if (s->full)
-               return 0;
-
-       if (s->len >= (PAGE_SIZE - 1)) {
-               s->full = 1;
-               return 0;
-       }
-
-       p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
-       if (!IS_ERR(p)) {
-               p = mangle_path(s->buffer + s->len, p, "\n");
-               if (p) {
-                       s->len = p - s->buffer;
-                       return 1;
-               }
-       } else {
-               s->buffer[s->len++] = '?';
-               return 1;
-       }
-
-       s->full = 1;
-       return 0;
-}
-
 const char *
 ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
                       unsigned long flags,
@@ -343,7 +75,7 @@ ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
 {
        unsigned long mask;
        const char *str;
-       const char *ret = p->buffer + p->len;
+       const char *ret = trace_seq_buffer_ptr(p);
        int i, first = 1;
 
        for (i = 0;  flag_array[i].name && flags; i++) {
@@ -379,7 +111,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
                         const struct trace_print_flags *symbol_array)
 {
        int i;
-       const char *ret = p->buffer + p->len;
+       const char *ret = trace_seq_buffer_ptr(p);
 
        for (i = 0;  symbol_array[i].name; i++) {
 
@@ -390,7 +122,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
                break;
        }
 
-       if (ret == (const char *)(p->buffer + p->len))
+       if (ret == (const char *)(trace_seq_buffer_ptr(p)))
                trace_seq_printf(p, "0x%lx", val);
                
        trace_seq_putc(p, 0);
@@ -405,7 +137,7 @@ ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
                         const struct trace_print_flags_u64 *symbol_array)
 {
        int i;
-       const char *ret = p->buffer + p->len;
+       const char *ret = trace_seq_buffer_ptr(p);
 
        for (i = 0;  symbol_array[i].name; i++) {
 
@@ -416,7 +148,7 @@ ftrace_print_symbols_seq_u64(struct trace_seq *p, unsigned long long val,
                break;
        }
 
-       if (ret == (const char *)(p->buffer + p->len))
+       if (ret == (const char *)(trace_seq_buffer_ptr(p)))
                trace_seq_printf(p, "0x%llx", val);
 
        trace_seq_putc(p, 0);
@@ -430,7 +162,7 @@ const char *
 ftrace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
                         unsigned int bitmask_size)
 {
-       const char *ret = p->buffer + p->len;
+       const char *ret = trace_seq_buffer_ptr(p);
 
        trace_seq_bitmask(p, bitmask_ptr, bitmask_size * 8);
        trace_seq_putc(p, 0);
@@ -443,7 +175,7 @@ const char *
 ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
 {
        int i;
-       const char *ret = p->buffer + p->len;
+       const char *ret = trace_seq_buffer_ptr(p);
 
        for (i = 0; i < buf_len; i++)
                trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]);
index 127a9d8..80b25b5 100644 (file)
@@ -35,9 +35,6 @@ trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry);
 extern int __unregister_ftrace_event(struct trace_event *event);
 extern struct rw_semaphore trace_event_sem;
 
-#define MAX_MEMHEX_BYTES       8
-#define HEX_CHARS              (MAX_MEMHEX_BYTES*2 + 1)
-
 #define SEQ_PUT_FIELD_RET(s, x)                                \
 do {                                                   \
        if (!trace_seq_putmem(s, &(x), sizeof(x)))      \
@@ -46,7 +43,6 @@ do {                                                  \
 
 #define SEQ_PUT_HEX_FIELD_RET(s, x)                    \
 do {                                                   \
-       BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES);     \
        if (!trace_seq_putmem_hex(s, &(x), sizeof(x)))  \
                return TRACE_TYPE_PARTIAL_LINE;         \
 } while (0)
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c
new file mode 100644 (file)
index 0000000..1f24ed9
--- /dev/null
@@ -0,0 +1,428 @@
+/*
+ * trace_seq.c
+ *
+ * Copyright (C) 2008-2014 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
+ *
+ * The trace_seq is a handy tool that allows you to pass a descriptor around
+ * to a buffer that other functions can write to. It is similar to the
+ * seq_file functionality but has some differences.
+ *
+ * To use it, the trace_seq must be initialized with trace_seq_init().
+ * This will set up the counters within the descriptor. You can call
+ * trace_seq_init() more than once to reset the trace_seq to start
+ * from scratch.
+ * 
+ * The buffer size is currently PAGE_SIZE, although it may become dynamic
+ * in the future.
+ *
+ * A write to the buffer will either succed or fail. That is, unlike
+ * sprintf() there will not be a partial write (well it may write into
+ * the buffer but it wont update the pointers). This allows users to
+ * try to write something into the trace_seq buffer and if it fails
+ * they can flush it and try again.
+ *
+ */
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+#include <linux/trace_seq.h>
+
+/* How much buffer is left on the trace_seq? */
+#define TRACE_SEQ_BUF_LEFT(s) ((PAGE_SIZE - 1) - (s)->len)
+
+/* How much buffer is written? */
+#define TRACE_SEQ_BUF_USED(s) min((s)->len, (unsigned int)(PAGE_SIZE - 1))
+
+/**
+ * trace_print_seq - move the contents of trace_seq into a seq_file
+ * @m: the seq_file descriptor that is the destination
+ * @s: the trace_seq descriptor that is the source.
+ *
+ * Returns 0 on success and non zero on error. If it succeeds to
+ * write to the seq_file it will reset the trace_seq, otherwise
+ * it does not modify the trace_seq to let the caller try again.
+ */
+int trace_print_seq(struct seq_file *m, struct trace_seq *s)
+{
+       unsigned int len = TRACE_SEQ_BUF_USED(s);
+       int ret;
+
+       ret = seq_write(m, s->buffer, len);
+
+       /*
+        * Only reset this buffer if we successfully wrote to the
+        * seq_file buffer. This lets the caller try again or
+        * do something else with the contents.
+        */
+       if (!ret)
+               trace_seq_init(s);
+
+       return ret;
+}
+
+/**
+ * trace_seq_printf - sequence printing of trace information
+ * @s: trace sequence descriptor
+ * @fmt: printf format string
+ *
+ * The tracer may use either sequence operations or its own
+ * copy to user routines. To simplify formating of a trace
+ * trace_seq_printf() is used to store strings into a special
+ * buffer (@s). Then the output may be either used by
+ * the sequencer or pulled into another buffer.
+ *
+ * Returns 1 if we successfully written all the contents to
+ *   the buffer.
+  * Returns 0 if we the length to write is bigger than the
+ *   reserved buffer space. In this case, nothing gets written.
+ */
+int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+{
+       unsigned int len = TRACE_SEQ_BUF_LEFT(s);
+       va_list ap;
+       int ret;
+
+       if (s->full || !len)
+               return 0;
+
+       va_start(ap, fmt);
+       ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
+       va_end(ap);
+
+       /* If we can't write it all, don't bother writing anything */
+       if (ret >= len) {
+               s->full = 1;
+               return 0;
+       }
+
+       s->len += ret;
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(trace_seq_printf);
+
+/**
+ * trace_seq_bitmask - write a bitmask array in its ASCII representation
+ * @s:         trace sequence descriptor
+ * @maskp:     points to an array of unsigned longs that represent a bitmask
+ * @nmaskbits: The number of bits that are valid in @maskp
+ *
+ * Writes a ASCII representation of a bitmask string into @s.
+ *
+ * Returns 1 if we successfully written all the contents to
+ *   the buffer.
+ * Returns 0 if we the length to write is bigger than the
+ *   reserved buffer space. In this case, nothing gets written.
+ */
+int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
+                     int nmaskbits)
+{
+       unsigned int len = TRACE_SEQ_BUF_LEFT(s);
+       int ret;
+
+       if (s->full || !len)
+               return 0;
+
+       ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits);
+       s->len += ret;
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(trace_seq_bitmask);
+
+/**
+ * trace_seq_vprintf - sequence printing of trace information
+ * @s: trace sequence descriptor
+ * @fmt: printf format string
+ *
+ * The tracer may use either sequence operations or its own
+ * copy to user routines. To simplify formating of a trace
+ * trace_seq_printf is used to store strings into a special
+ * buffer (@s). Then the output may be either used by
+ * the sequencer or pulled into another buffer.
+ *
+ * Returns how much it wrote to the buffer.
+ */
+int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
+{
+       unsigned int len = TRACE_SEQ_BUF_LEFT(s);
+       int ret;
+
+       if (s->full || !len)
+               return 0;
+
+       ret = vsnprintf(s->buffer + s->len, len, fmt, args);
+
+       /* If we can't write it all, don't bother writing anything */
+       if (ret >= len) {
+               s->full = 1;
+               return 0;
+       }
+
+       s->len += ret;
+
+       return len;
+}
+EXPORT_SYMBOL_GPL(trace_seq_vprintf);
+
+/**
+ * trace_seq_bprintf - Write the printf string from binary arguments
+ * @s: trace sequence descriptor
+ * @fmt: The format string for the @binary arguments
+ * @binary: The binary arguments for @fmt.
+ *
+ * When recording in a fast path, a printf may be recorded with just
+ * saving the format and the arguments as they were passed to the
+ * function, instead of wasting cycles converting the arguments into
+ * ASCII characters. Instead, the arguments are saved in a 32 bit
+ * word array that is defined by the format string constraints.
+ *
+ * This function will take the format and the binary array and finish
+ * the conversion into the ASCII string within the buffer.
+ *
+ * Returns how much it wrote to the buffer.
+ */
+int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
+{
+       unsigned int len = TRACE_SEQ_BUF_LEFT(s);
+       int ret;
+
+       if (s->full || !len)
+               return 0;
+
+       ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
+
+       /* If we can't write it all, don't bother writing anything */
+       if (ret >= len) {
+               s->full = 1;
+               return 0;
+       }
+
+       s->len += ret;
+
+       return len;
+}
+EXPORT_SYMBOL_GPL(trace_seq_bprintf);
+
+/**
+ * trace_seq_puts - trace sequence printing of simple string
+ * @s: trace sequence descriptor
+ * @str: simple string to record
+ *
+ * The tracer may use either the sequence operations or its own
+ * copy to user routines. This function records a simple string
+ * into a special buffer (@s) for later retrieval by a sequencer
+ * or other mechanism.
+ *
+ * Returns how much it wrote to the buffer.
+ */
+int trace_seq_puts(struct trace_seq *s, const char *str)
+{
+       unsigned int len = strlen(str);
+
+       if (s->full)
+               return 0;
+
+       if (len > TRACE_SEQ_BUF_LEFT(s)) {
+               s->full = 1;
+               return 0;
+       }
+
+       memcpy(s->buffer + s->len, str, len);
+       s->len += len;
+
+       return len;
+}
+EXPORT_SYMBOL_GPL(trace_seq_puts);
+
+/**
+ * trace_seq_putc - trace sequence printing of simple character
+ * @s: trace sequence descriptor
+ * @c: simple character to record
+ *
+ * The tracer may use either the sequence operations or its own
+ * copy to user routines. This function records a simple charater
+ * into a special buffer (@s) for later retrieval by a sequencer
+ * or other mechanism.
+ *
+ * Returns how much it wrote to the buffer.
+ */
+int trace_seq_putc(struct trace_seq *s, unsigned char c)
+{
+       if (s->full)
+               return 0;
+
+       if (TRACE_SEQ_BUF_LEFT(s) < 1) {
+               s->full = 1;
+               return 0;
+       }
+
+       s->buffer[s->len++] = c;
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(trace_seq_putc);
+
+/**
+ * trace_seq_putmem - write raw data into the trace_seq buffer
+ * @s: trace sequence descriptor
+ * @mem: The raw memory to copy into the buffer
+ * @len: The length of the raw memory to copy (in bytes)
+ *
+ * There may be cases where raw memory needs to be written into the
+ * buffer and a strcpy() would not work. Using this function allows
+ * for such cases.
+ *
+ * Returns how much it wrote to the buffer.
+ */
+int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len)
+{
+       if (s->full)
+               return 0;
+
+       if (len > TRACE_SEQ_BUF_LEFT(s)) {
+               s->full = 1;
+               return 0;
+       }
+
+       memcpy(s->buffer + s->len, mem, len);
+       s->len += len;
+
+       return len;
+}
+EXPORT_SYMBOL_GPL(trace_seq_putmem);
+
+#define MAX_MEMHEX_BYTES       8U
+#define HEX_CHARS              (MAX_MEMHEX_BYTES*2 + 1)
+
+/**
+ * trace_seq_putmem_hex - write raw memory into the buffer in ASCII hex
+ * @s: trace sequence descriptor
+ * @mem: The raw memory to write its hex ASCII representation of
+ * @len: The length of the raw memory to copy (in bytes)
+ *
+ * This is similar to trace_seq_putmem() except instead of just copying the
+ * raw memory into the buffer it writes its ASCII representation of it
+ * in hex characters.
+ *
+ * Returns how much it wrote to the buffer.
+ */
+int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
+                        unsigned int len)
+{
+       unsigned char hex[HEX_CHARS];
+       const unsigned char *data = mem;
+       unsigned int start_len;
+       int i, j;
+       int cnt = 0;
+
+       if (s->full)
+               return 0;
+
+       while (len) {
+               start_len = min(len, HEX_CHARS - 1);
+#ifdef __BIG_ENDIAN
+               for (i = 0, j = 0; i < start_len; i++) {
+#else
+               for (i = start_len-1, j = 0; i >= 0; i--) {
+#endif
+                       hex[j++] = hex_asc_hi(data[i]);
+                       hex[j++] = hex_asc_lo(data[i]);
+               }
+               if (WARN_ON_ONCE(j == 0 || j/2 > len))
+                       break;
+
+               /* j increments twice per loop */
+               len -= j / 2;
+               hex[j++] = ' ';
+
+               cnt += trace_seq_putmem(s, hex, j);
+       }
+       return cnt;
+}
+EXPORT_SYMBOL_GPL(trace_seq_putmem_hex);
+
+/**
+ * trace_seq_path - copy a path into the sequence buffer
+ * @s: trace sequence descriptor
+ * @path: path to write into the sequence buffer.
+ *
+ * Write a path name into the sequence buffer.
+ *
+ * Returns 1 if we successfully written all the contents to
+ *   the buffer.
+ * Returns 0 if we the length to write is bigger than the
+ *   reserved buffer space. In this case, nothing gets written.
+ */
+int trace_seq_path(struct trace_seq *s, const struct path *path)
+{
+       unsigned char *p;
+
+       if (s->full)
+               return 0;
+
+       if (TRACE_SEQ_BUF_LEFT(s) < 1) {
+               s->full = 1;
+               return 0;
+       }
+
+       p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
+       if (!IS_ERR(p)) {
+               p = mangle_path(s->buffer + s->len, p, "\n");
+               if (p) {
+                       s->len = p - s->buffer;
+                       return 1;
+               }
+       } else {
+               s->buffer[s->len++] = '?';
+               return 1;
+       }
+
+       s->full = 1;
+       return 0;
+}
+EXPORT_SYMBOL_GPL(trace_seq_path);
+
+/**
+ * trace_seq_to_user - copy the squence buffer to user space
+ * @s: trace sequence descriptor
+ * @ubuf: The userspace memory location to copy to
+ * @cnt: The amount to copy
+ *
+ * Copies the sequence buffer into the userspace memory pointed to
+ * by @ubuf. It starts from the last read position (@s->readpos)
+ * and writes up to @cnt characters or till it reaches the end of
+ * the content in the buffer (@s->len), which ever comes first.
+ *
+ * On success, it returns a positive number of the number of bytes
+ * it copied.
+ *
+ * On failure it returns -EBUSY if all of the content in the
+ * sequence has been already read, which includes nothing in the
+ * sequenc (@s->len == @s->readpos).
+ *
+ * Returns -EFAULT if the copy to userspace fails.
+ */
+int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt)
+{
+       int len;
+       int ret;
+
+       if (!cnt)
+               return 0;
+
+       if (s->len <= s->readpos)
+               return -EBUSY;
+
+       len = s->len - s->readpos;
+       if (cnt > len)
+               cnt = len;
+       ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
+       if (ret == cnt)
+               return -EFAULT;
+
+       cnt -= ret;
+
+       s->readpos += cnt;
+       return cnt;
+}
+EXPORT_SYMBOL_GPL(trace_seq_to_user);
index 04fdb5d..33ff6a2 100644 (file)
@@ -265,7 +265,6 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret)
        if (is_ret)
                tu->consumer.ret_handler = uretprobe_dispatcher;
        init_trace_uprobe_filter(&tu->filter);
-       tu->tp.call.flags |= TRACE_EVENT_FL_USE_CALL_FILTER;
        return tu;
 
 error:
@@ -893,6 +892,9 @@ probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file,
        int ret;
 
        if (file) {
+               if (tu->tp.flags & TP_FLAG_PROFILE)
+                       return -EINTR;
+
                link = kmalloc(sizeof(*link), GFP_KERNEL);
                if (!link)
                        return -ENOMEM;
@@ -901,29 +903,40 @@ probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file,
                list_add_tail_rcu(&link->list, &tu->tp.files);
 
                tu->tp.flags |= TP_FLAG_TRACE;
-       } else
-               tu->tp.flags |= TP_FLAG_PROFILE;
+       } else {
+               if (tu->tp.flags & TP_FLAG_TRACE)
+                       return -EINTR;
 
-       ret = uprobe_buffer_enable();
-       if (ret < 0)
-               return ret;
+               tu->tp.flags |= TP_FLAG_PROFILE;
+       }
 
        WARN_ON(!uprobe_filter_is_empty(&tu->filter));
 
        if (enabled)
                return 0;
 
+       ret = uprobe_buffer_enable();
+       if (ret)
+               goto err_flags;
+
        tu->consumer.filter = filter;
        ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
-       if (ret) {
-               if (file) {
-                       list_del(&link->list);
-                       kfree(link);
-                       tu->tp.flags &= ~TP_FLAG_TRACE;
-               } else
-                       tu->tp.flags &= ~TP_FLAG_PROFILE;
-       }
+       if (ret)
+               goto err_buffer;
 
+       return 0;
+
+ err_buffer:
+       uprobe_buffer_disable();
+
+ err_flags:
+       if (file) {
+               list_del(&link->list);
+               kfree(link);
+               tu->tp.flags &= ~TP_FLAG_TRACE;
+       } else {
+               tu->tp.flags &= ~TP_FLAG_PROFILE;
+       }
        return ret;
 }
 
@@ -1201,12 +1214,6 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
 
        current->utask->vaddr = (unsigned long) &udd;
 
-#ifdef CONFIG_PERF_EVENTS
-       if ((tu->tp.flags & TP_FLAG_TRACE) == 0 &&
-           !uprobe_perf_filter(&tu->consumer, 0, current->mm))
-               return UPROBE_HANDLER_REMOVE;
-#endif
-
        if (WARN_ON_ONCE(!uprobe_cpu_buffer))
                return 0;
 
@@ -1284,7 +1291,7 @@ static int register_uprobe_event(struct trace_uprobe *tu)
                kfree(call->print_fmt);
                return -ENODEV;
        }
-       call->flags = 0;
+
        call->class->reg = trace_uprobe_register;
        call->data = tu;
        ret = trace_add_event_call(call);
index 33cbd8c..3490407 100644 (file)
@@ -492,33 +492,29 @@ static int sys_tracepoint_refcount;
 
 void syscall_regfunc(void)
 {
-       unsigned long flags;
-       struct task_struct *g, *t;
+       struct task_struct *p, *t;
 
        if (!sys_tracepoint_refcount) {
-               read_lock_irqsave(&tasklist_lock, flags);
-               do_each_thread(g, t) {
-                       /* Skip kernel threads. */
-                       if (t->mm)
-                               set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
-               } while_each_thread(g, t);
-               read_unlock_irqrestore(&tasklist_lock, flags);
+               read_lock(&tasklist_lock);
+               for_each_process_thread(p, t) {
+                       set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
+               }
+               read_unlock(&tasklist_lock);
        }
        sys_tracepoint_refcount++;
 }
 
 void syscall_unregfunc(void)
 {
-       unsigned long flags;
-       struct task_struct *g, *t;
+       struct task_struct *p, *t;
 
        sys_tracepoint_refcount--;
        if (!sys_tracepoint_refcount) {
-               read_lock_irqsave(&tasklist_lock, flags);
-               do_each_thread(g, t) {
+               read_lock(&tasklist_lock);
+               for_each_process_thread(p, t) {
                        clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
-               } while_each_thread(g, t);
-               read_unlock_irqrestore(&tasklist_lock, flags);
+               }
+               read_unlock(&tasklist_lock);
        }
 }
 #endif
index 516203e..c3319bd 100644 (file)
 
 int watchdog_user_enabled = 1;
 int __read_mostly watchdog_thresh = 10;
+#ifdef CONFIG_SMP
+int __read_mostly sysctl_softlockup_all_cpu_backtrace;
+#else
+#define sysctl_softlockup_all_cpu_backtrace 0
+#endif
+
 static int __read_mostly watchdog_running;
 static u64 __read_mostly sample_period;
 
@@ -47,6 +53,7 @@ static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
+static unsigned long soft_lockup_nmi_warn;
 
 /* boot commands */
 /*
@@ -95,6 +102,15 @@ static int __init nosoftlockup_setup(char *str)
 }
 __setup("nosoftlockup", nosoftlockup_setup);
 /*  */
+#ifdef CONFIG_SMP
+static int __init softlockup_all_cpu_backtrace_setup(char *str)
+{
+       sysctl_softlockup_all_cpu_backtrace =
+               !!simple_strtol(str, NULL, 0);
+       return 1;
+}
+__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
+#endif
 
 /*
  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
@@ -271,6 +287,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
        struct pt_regs *regs = get_irq_regs();
        int duration;
+       int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
 
        /* kick the hardlockup detector */
        watchdog_interrupt_count();
@@ -317,6 +334,17 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
                if (__this_cpu_read(soft_watchdog_warn) == true)
                        return HRTIMER_RESTART;
 
+               if (softlockup_all_cpu_backtrace) {
+                       /* Prevent multiple soft-lockup reports if one cpu is already
+                        * engaged in dumping cpu back traces
+                        */
+                       if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
+                               /* Someone else will report us. Let's give up */
+                               __this_cpu_write(soft_watchdog_warn, true);
+                               return HRTIMER_RESTART;
+                       }
+               }
+
                printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
                        smp_processor_id(), duration,
                        current->comm, task_pid_nr(current));
@@ -327,6 +355,17 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
                else
                        dump_stack();
 
+               if (softlockup_all_cpu_backtrace) {
+                       /* Avoid generating two back traces for current
+                        * given that one is already made above
+                        */
+                       trigger_allbutself_cpu_backtrace();
+
+                       clear_bit(0, &soft_lockup_nmi_warn);
+                       /* Barrier to sync with other cpus */
+                       smp_mb__after_atomic();
+               }
+
                if (softlockup_panic)
                        panic("softlockup: hung tasks");
                __this_cpu_write(soft_watchdog_warn, true);
@@ -527,10 +566,8 @@ static void update_timers_all_cpus(void)
        int cpu;
 
        get_online_cpus();
-       preempt_disable();
        for_each_online_cpu(cpu)
                update_timers(cpu);
-       preempt_enable();
        put_online_cpus();
 }
 
index 6203d29..5dbe22a 100644 (file)
@@ -265,7 +265,6 @@ struct workqueue_struct {
 
 static struct kmem_cache *pwq_cache;
 
-static int wq_numa_tbl_len;            /* highest possible NUMA node id + 1 */
 static cpumask_var_t *wq_numa_possible_cpumask;
                                        /* possible CPUs of each node */
 
@@ -758,13 +757,6 @@ static bool too_many_workers(struct worker_pool *pool)
        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
        int nr_busy = pool->nr_workers - nr_idle;
 
-       /*
-        * nr_idle and idle_list may disagree if idle rebinding is in
-        * progress.  Never return %true if idle_list is empty.
-        */
-       if (list_empty(&pool->idle_list))
-               return false;
-
        return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
 }
 
@@ -850,7 +842,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
        pool = worker->pool;
 
        /* this can only happen on the local cpu */
-       if (WARN_ON_ONCE(cpu != raw_smp_processor_id()))
+       if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu))
                return NULL;
 
        /*
@@ -874,35 +866,22 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
  * worker_set_flags - set worker flags and adjust nr_running accordingly
  * @worker: self
  * @flags: flags to set
- * @wakeup: wakeup an idle worker if necessary
  *
- * Set @flags in @worker->flags and adjust nr_running accordingly.  If
- * nr_running becomes zero and @wakeup is %true, an idle worker is
- * woken up.
+ * Set @flags in @worker->flags and adjust nr_running accordingly.
  *
  * CONTEXT:
  * spin_lock_irq(pool->lock)
  */
-static inline void worker_set_flags(struct worker *worker, unsigned int flags,
-                                   bool wakeup)
+static inline void worker_set_flags(struct worker *worker, unsigned int flags)
 {
        struct worker_pool *pool = worker->pool;
 
        WARN_ON_ONCE(worker->task != current);
 
-       /*
-        * If transitioning into NOT_RUNNING, adjust nr_running and
-        * wake up an idle worker as necessary if requested by
-        * @wakeup.
-        */
+       /* If transitioning into NOT_RUNNING, adjust nr_running. */
        if ((flags & WORKER_NOT_RUNNING) &&
            !(worker->flags & WORKER_NOT_RUNNING)) {
-               if (wakeup) {
-                       if (atomic_dec_and_test(&pool->nr_running) &&
-                           !list_empty(&pool->worklist))
-                               wake_up_worker(pool);
-               } else
-                       atomic_dec(&pool->nr_running);
+               atomic_dec(&pool->nr_running);
        }
 
        worker->flags |= flags;
@@ -1232,7 +1211,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
                        pwq_activate_delayed_work(work);
 
                list_del_init(&work->entry);
-               pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work));
+               pwq_dec_nr_in_flight(pwq, get_work_color(work));
 
                /* work->data points to pwq iff queued, point to pool */
                set_work_pool_and_keep_pending(work, pool->id);
@@ -1560,7 +1539,7 @@ static void worker_enter_idle(struct worker *worker)
                         (worker->hentry.next || worker->hentry.pprev)))
                return;
 
-       /* can't use worker_set_flags(), also called from start_worker() */
+       /* can't use worker_set_flags(), also called from create_worker() */
        worker->flags |= WORKER_IDLE;
        pool->nr_idle++;
        worker->last_active = jiffies;
@@ -1602,11 +1581,11 @@ static void worker_leave_idle(struct worker *worker)
        list_del_init(&worker->entry);
 }
 
-static struct worker *alloc_worker(void)
+static struct worker *alloc_worker(int node)
 {
        struct worker *worker;
 
-       worker = kzalloc(sizeof(*worker), GFP_KERNEL);
+       worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
        if (worker) {
                INIT_LIST_HEAD(&worker->entry);
                INIT_LIST_HEAD(&worker->scheduled);
@@ -1670,6 +1649,9 @@ static void worker_detach_from_pool(struct worker *worker,
                detach_completion = pool->detach_completion;
        mutex_unlock(&pool->attach_mutex);
 
+       /* clear leftover flags without pool->lock after it is detached */
+       worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
+
        if (detach_completion)
                complete(detach_completion);
 }
@@ -1678,8 +1660,7 @@ static void worker_detach_from_pool(struct worker *worker,
  * create_worker - create a new workqueue worker
  * @pool: pool the new worker will belong to
  *
- * Create a new worker which is attached to @pool.  The new worker must be
- * started by start_worker().
+ * Create and start a new worker which is attached to @pool.
  *
  * CONTEXT:
  * Might sleep.  Does GFP_KERNEL allocations.
@@ -1698,7 +1679,7 @@ static struct worker *create_worker(struct worker_pool *pool)
        if (id < 0)
                goto fail;
 
-       worker = alloc_worker();
+       worker = alloc_worker(pool->node);
        if (!worker)
                goto fail;
 
@@ -1724,6 +1705,13 @@ static struct worker *create_worker(struct worker_pool *pool)
        /* successful, attach the worker to the pool */
        worker_attach_to_pool(worker, pool);
 
+       /* start the newly created worker */
+       spin_lock_irq(&pool->lock);
+       worker->pool->nr_workers++;
+       worker_enter_idle(worker);
+       wake_up_process(worker->task);
+       spin_unlock_irq(&pool->lock);
+
        return worker;
 
 fail:
@@ -1733,44 +1721,6 @@ fail:
        return NULL;
 }
 
-/**
- * start_worker - start a newly created worker
- * @worker: worker to start
- *
- * Make the pool aware of @worker and start it.
- *
- * CONTEXT:
- * spin_lock_irq(pool->lock).
- */
-static void start_worker(struct worker *worker)
-{
-       worker->pool->nr_workers++;
-       worker_enter_idle(worker);
-       wake_up_process(worker->task);
-}
-
-/**
- * create_and_start_worker - create and start a worker for a pool
- * @pool: the target pool
- *
- * Grab the managership of @pool and create and start a new worker for it.
- *
- * Return: 0 on success. A negative error code otherwise.
- */
-static int create_and_start_worker(struct worker_pool *pool)
-{
-       struct worker *worker;
-
-       worker = create_worker(pool);
-       if (worker) {
-               spin_lock_irq(&pool->lock);
-               start_worker(worker);
-               spin_unlock_irq(&pool->lock);
-       }
-
-       return worker ? 0 : -ENOMEM;
-}
-
 /**
  * destroy_worker - destroy a workqueue worker
  * @worker: worker to be destroyed
@@ -1909,23 +1859,10 @@ restart:
        mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
 
        while (true) {
-               struct worker *worker;
-
-               worker = create_worker(pool);
-               if (worker) {
-                       del_timer_sync(&pool->mayday_timer);
-                       spin_lock_irq(&pool->lock);
-                       start_worker(worker);
-                       if (WARN_ON_ONCE(need_to_create_worker(pool)))
-                               goto restart;
-                       return true;
-               }
-
-               if (!need_to_create_worker(pool))
+               if (create_worker(pool) || !need_to_create_worker(pool))
                        break;
 
-               __set_current_state(TASK_INTERRUPTIBLE);
-               schedule_timeout(CREATE_COOLDOWN);
+               schedule_timeout_interruptible(CREATE_COOLDOWN);
 
                if (!need_to_create_worker(pool))
                        break;
@@ -1933,6 +1870,11 @@ restart:
 
        del_timer_sync(&pool->mayday_timer);
        spin_lock_irq(&pool->lock);
+       /*
+        * This is necessary even after a new worker was just successfully
+        * created as @pool->lock was dropped and the new worker might have
+        * already become busy.
+        */
        if (need_to_create_worker(pool))
                goto restart;
        return true;
@@ -2020,13 +1962,8 @@ __acquires(&pool->lock)
 
        lockdep_copy_map(&lockdep_map, &work->lockdep_map);
 #endif
-       /*
-        * Ensure we're on the correct CPU.  DISASSOCIATED test is
-        * necessary to avoid spurious warnings from rescuers servicing the
-        * unbound or a disassociated pool.
-        */
-       WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
-                    !(pool->flags & POOL_DISASSOCIATED) &&
+       /* ensure we're on the correct CPU */
+       WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
                     raw_smp_processor_id() != pool->cpu);
 
        /*
@@ -2052,17 +1989,22 @@ __acquires(&pool->lock)
        list_del_init(&work->entry);
 
        /*
-        * CPU intensive works don't participate in concurrency
-        * management.  They're the scheduler's responsibility.
+        * CPU intensive works don't participate in concurrency management.
+        * They're the scheduler's responsibility.  This takes @worker out
+        * of concurrency management and the next code block will chain
+        * execution of the pending work items.
         */
        if (unlikely(cpu_intensive))
-               worker_set_flags(worker, WORKER_CPU_INTENSIVE, true);
+               worker_set_flags(worker, WORKER_CPU_INTENSIVE);
 
        /*
-        * Unbound pool isn't concurrency managed and work items should be
-        * executed ASAP.  Wake up another worker if necessary.
+        * Wake up another worker if necessary.  The condition is always
+        * false for normal per-cpu workers since nr_running would always
+        * be >= 1 at this point.  This is used to chain execution of the
+        * pending work items for WORKER_NOT_RUNNING workers such as the
+        * UNBOUND and CPU_INTENSIVE ones.
         */
-       if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool))
+       if (need_more_worker(pool))
                wake_up_worker(pool);
 
        /*
@@ -2218,7 +2160,7 @@ recheck:
                }
        } while (keep_working(pool));
 
-       worker_set_flags(worker, WORKER_PREP, false);
+       worker_set_flags(worker, WORKER_PREP);
 sleep:
        /*
         * pool->lock is held and there's no work to process and no need to
@@ -2311,29 +2253,27 @@ repeat:
                                move_linked_works(work, scheduled, &n);
 
                process_scheduled_works(rescuer);
-               spin_unlock_irq(&pool->lock);
-
-               worker_detach_from_pool(rescuer, pool);
-
-               spin_lock_irq(&pool->lock);
 
                /*
                 * Put the reference grabbed by send_mayday().  @pool won't
-                * go away while we're holding its lock.
+                * go away while we're still attached to it.
                 */
                put_pwq(pwq);
 
                /*
-                * Leave this pool.  If keep_working() is %true, notify a
+                * Leave this pool.  If need_more_worker() is %true, notify a
                 * regular worker; otherwise, we end up with 0 concurrency
                 * and stalling the execution.
                 */
-               if (keep_working(pool))
+               if (need_more_worker(pool))
                        wake_up_worker(pool);
 
                rescuer->pool = NULL;
-               spin_unlock(&pool->lock);
-               spin_lock(&wq_mayday_lock);
+               spin_unlock_irq(&pool->lock);
+
+               worker_detach_from_pool(rescuer, pool);
+
+               spin_lock_irq(&wq_mayday_lock);
        }
 
        spin_unlock_irq(&wq_mayday_lock);
@@ -3284,6 +3224,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
                }
        }
 
+       dev_set_uevent_suppress(&wq_dev->dev, false);
        kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
        return 0;
 }
@@ -3457,7 +3398,7 @@ static void put_unbound_pool(struct worker_pool *pool)
                return;
 
        /* sanity checks */
-       if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) ||
+       if (WARN_ON(!(pool->cpu < 0)) ||
            WARN_ON(!list_empty(&pool->worklist)))
                return;
 
@@ -3523,7 +3464,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
        hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
                if (wqattrs_equal(pool->attrs, attrs)) {
                        pool->refcnt++;
-                       goto out_unlock;
+                       return pool;
                }
        }
 
@@ -3556,12 +3497,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
                goto fail;
 
        /* create and start the initial worker */
-       if (create_and_start_worker(pool) < 0)
+       if (!create_worker(pool))
                goto fail;
 
        /* install */
        hash_add(unbound_pool_hash, &pool->hash_node, hash);
-out_unlock:
+
        return pool;
 fail:
        if (pool)
@@ -3590,11 +3531,6 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
        if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
                return;
 
-       /*
-        * Unlink @pwq.  Synchronization against wq->mutex isn't strictly
-        * necessary on release but do it anyway.  It's easier to verify
-        * and consistent with the linking path.
-        */
        mutex_lock(&wq->mutex);
        list_del_rcu(&pwq->pwqs_node);
        is_last = list_empty(&wq->pwqs);
@@ -3691,10 +3627,7 @@ static void link_pwq(struct pool_workqueue *pwq)
        if (!list_empty(&pwq->pwqs_node))
                return;
 
-       /*
-        * Set the matching work_color.  This is synchronized with
-        * wq->mutex to avoid confusing flush_workqueue().
-        */
+       /* set the matching work_color */
        pwq->work_color = wq->work_color;
 
        /* sync max_active to the current setting */
@@ -3831,7 +3764,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
        if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
                return -EINVAL;
 
-       pwq_tbl = kzalloc(wq_numa_tbl_len * sizeof(pwq_tbl[0]), GFP_KERNEL);
+       pwq_tbl = kzalloc(nr_node_ids * sizeof(pwq_tbl[0]), GFP_KERNEL);
        new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
        tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
        if (!pwq_tbl || !new_attrs || !tmp_attrs)
@@ -4079,7 +4012,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
 
        /* allocate wq and format name */
        if (flags & WQ_UNBOUND)
-               tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]);
+               tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
 
        wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
        if (!wq)
@@ -4121,7 +4054,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
        if (flags & WQ_MEM_RECLAIM) {
                struct worker *rescuer;
 
-               rescuer = alloc_worker();
+               rescuer = alloc_worker(NUMA_NO_NODE);
                if (!rescuer)
                        goto err_destroy;
 
@@ -4469,8 +4402,6 @@ static void wq_unbind_fn(struct work_struct *work)
        struct worker *worker;
 
        for_each_cpu_worker_pool(pool, cpu) {
-               WARN_ON_ONCE(cpu != smp_processor_id());
-
                mutex_lock(&pool->attach_mutex);
                spin_lock_irq(&pool->lock);
 
@@ -4542,6 +4473,7 @@ static void rebind_workers(struct worker_pool *pool)
                                                  pool->attrs->cpumask) < 0);
 
        spin_lock_irq(&pool->lock);
+       pool->flags &= ~POOL_DISASSOCIATED;
 
        for_each_pool_worker(worker, pool) {
                unsigned int worker_flags = worker->flags;
@@ -4631,7 +4563,7 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb,
                for_each_cpu_worker_pool(pool, cpu) {
                        if (pool->nr_workers)
                                continue;
-                       if (create_and_start_worker(pool) < 0)
+                       if (!create_worker(pool))
                                return NOTIFY_BAD;
                }
                break;
@@ -4643,15 +4575,10 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb,
                for_each_pool(pool, pi) {
                        mutex_lock(&pool->attach_mutex);
 
-                       if (pool->cpu == cpu) {
-                               spin_lock_irq(&pool->lock);
-                               pool->flags &= ~POOL_DISASSOCIATED;
-                               spin_unlock_irq(&pool->lock);
-
+                       if (pool->cpu == cpu)
                                rebind_workers(pool);
-                       } else if (pool->cpu < 0) {
+                       else if (pool->cpu < 0)
                                restore_unbound_workers_cpumask(pool, cpu);
-                       }
 
                        mutex_unlock(&pool->attach_mutex);
                }
@@ -4855,10 +4782,6 @@ static void __init wq_numa_init(void)
        cpumask_var_t *tbl;
        int node, cpu;
 
-       /* determine NUMA pwq table len - highest node id + 1 */
-       for_each_node(node)
-               wq_numa_tbl_len = max(wq_numa_tbl_len, node + 1);
-
        if (num_possible_nodes() <= 1)
                return;
 
@@ -4875,11 +4798,11 @@ static void __init wq_numa_init(void)
         * available.  Build one from cpu_to_node() which should have been
         * fully initialized by now.
         */
-       tbl = kzalloc(wq_numa_tbl_len * sizeof(tbl[0]), GFP_KERNEL);
+       tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
        BUG_ON(!tbl);
 
        for_each_node(node)
-               BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
+               BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
                                node_online(node) ? node : NUMA_NO_NODE));
 
        for_each_possible_cpu(cpu) {
@@ -4935,7 +4858,7 @@ static int __init init_workqueues(void)
 
                for_each_cpu_worker_pool(pool, cpu) {
                        pool->flags &= ~POOL_DISASSOCIATED;
-                       BUG_ON(create_and_start_worker(pool) < 0);
+                       BUG_ON(!create_worker(pool));
                }
        }
 
index 7cfcc1b..901096d 100644 (file)
@@ -835,7 +835,7 @@ config DEBUG_RT_MUTEXES
 
 config RT_MUTEX_TESTER
        bool "Built-in scriptable tester for rt-mutexes"
-       depends on DEBUG_KERNEL && RT_MUTEXES
+       depends on DEBUG_KERNEL && RT_MUTEXES && BROKEN
        help
          This option enables a rt-mutex tester.
 
@@ -930,7 +930,7 @@ config LOCKDEP
        bool
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
        select STACKTRACE
-       select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC
+       select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE
        select KALLSYMS
        select KALLSYMS_ALL
 
@@ -1131,20 +1131,6 @@ config PROVE_RCU_REPEATEDLY
 
         Say N if you are unsure.
 
-config PROVE_RCU_DELAY
-       bool "RCU debugging: preemptible RCU race provocation"
-       depends on DEBUG_KERNEL && PREEMPT_RCU
-       default n
-       help
-        There is a class of races that involve an unlikely preemption
-        of __rcu_read_unlock() just after ->rcu_read_lock_nesting has
-        been set to INT_MIN.  This feature inserts a delay at that
-        point to increase the probability of these races.
-
-        Say Y to increase probability of preemption of __rcu_read_unlock().
-
-        Say N if you are unsure.
-
 config SPARSE_RCU_POINTER
        bool "RCU debugging: sparse-based checks for pointer usage"
        default n
@@ -1408,7 +1394,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
        depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
        depends on !X86_64
        select STACKTRACE
-       select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
+       select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE
        help
          Provide stacktrace filter for fault-injection capabilities
 
index c101230..b6513a9 100644 (file)
@@ -191,7 +191,7 @@ int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
 
        i %= num_online_cpus();
 
-       if (!cpumask_of_node(numa_node)) {
+       if (numa_node == -1 || !cpumask_of_node(numa_node)) {
                /* Use all online cpu's for non numa aware system */
                cpumask_copy(mask, cpu_online_mask);
        } else {
index 454baa8..7a7c2da 100644 (file)
@@ -51,3 +51,58 @@ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
        return 0;
 }
 EXPORT_SYMBOL(memcpy_toiovec);
+
+/*
+ *     Copy kernel to iovec. Returns -EFAULT on error.
+ */
+
+int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
+                     int offset, int len)
+{
+       int copy;
+       for (; len > 0; ++iov) {
+               /* Skip over the finished iovecs */
+               if (unlikely(offset >= iov->iov_len)) {
+                       offset -= iov->iov_len;
+                       continue;
+               }
+               copy = min_t(unsigned int, iov->iov_len - offset, len);
+               if (copy_to_user(iov->iov_base + offset, kdata, copy))
+                       return -EFAULT;
+               offset = 0;
+               kdata += copy;
+               len -= copy;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(memcpy_toiovecend);
+
+/*
+ *     Copy iovec to kernel. Returns -EFAULT on error.
+ */
+
+int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
+                       int offset, int len)
+{
+       /* Skip over the finished iovecs */
+       while (offset >= iov->iov_len) {
+               offset -= iov->iov_len;
+               iov++;
+       }
+
+       while (len > 0) {
+               u8 __user *base = iov->iov_base + offset;
+               int copy = min_t(unsigned int, len, iov->iov_len - offset);
+
+               offset = 0;
+               if (copy_from_user(kdata, base, copy))
+                       return -EFAULT;
+               len -= copy;
+               kdata += copy;
+               iov++;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(memcpy_fromiovecend);
index f07a40d..d2233de 100644 (file)
@@ -1,6 +1,5 @@
 #include <linux/export.h>
 #include <linux/lockref.h>
-#include <linux/mutex.h>
 
 #if USE_CMPXCHG_LOCKREF
 
@@ -29,7 +28,7 @@
                if (likely(old.lock_count == prev.lock_count)) {                \
                        SUCCESS;                                                \
                }                                                               \
-               arch_mutex_cpu_relax();                                         \
+               cpu_relax_lowlatency();                                         \
        }                                                                       \
 } while (0)
 
index df6839e..7a85967 100644 (file)
@@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
                        len = *ip++;
                        for (; len == 255; length += 255)
                                len = *ip++;
+                       if (unlikely(length > (size_t)(length + len)))
+                               goto _output_error;
                        length += len;
                }
 
@@ -106,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
                if (length == ML_MASK) {
                        for (; *ip == 255; length += 255)
                                ip++;
+                       if (unlikely(length > (size_t)(length + *ip)))
+                               goto _output_error;
                        length += *ip++;
                }
 
@@ -155,7 +159,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
 
        /* write overflow error detected */
 _output_error:
-       return (int) (-(((char *)ip) - source));
+       return -1;
 }
 
 static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
@@ -188,6 +192,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
                        int s = 255;
                        while ((ip < iend) && (s == 255)) {
                                s = *ip++;
+                               if (unlikely(length > (size_t)(length + s)))
+                                       goto _output_error;
                                length += s;
                        }
                }
@@ -228,6 +234,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
                if (length == ML_MASK) {
                        while (ip < iend) {
                                int s = *ip++;
+                               if (unlikely(length > (size_t)(length + s)))
+                                       goto _output_error;
                                length += s;
                                if (s == 255)
                                        continue;
@@ -280,7 +288,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
 
        /* write overflow error detected */
 _output_error:
-       return (int) (-(((char *) ip) - source));
+       return -1;
 }
 
 int lz4_decompress(const unsigned char *src, size_t *src_len,
index 569985d..8563081 100644 (file)
 #include <linux/lzo.h>
 #include "lzodefs.h"
 
-#define HAVE_IP(x)      ((size_t)(ip_end - ip) >= (size_t)(x))
-#define HAVE_OP(x)      ((size_t)(op_end - op) >= (size_t)(x))
-#define NEED_IP(x)      if (!HAVE_IP(x)) goto input_overrun
-#define NEED_OP(x)      if (!HAVE_OP(x)) goto output_overrun
-#define TEST_LB(m_pos)  if ((m_pos) < out) goto lookbehind_overrun
+#define HAVE_IP(t, x)                                  \
+       (((size_t)(ip_end - ip) >= (size_t)(t + x)) &&  \
+        (((t + x) >= t) && ((t + x) >= x)))
+
+#define HAVE_OP(t, x)                                  \
+       (((size_t)(op_end - op) >= (size_t)(t + x)) &&  \
+        (((t + x) >= t) && ((t + x) >= x)))
+
+#define NEED_IP(t, x)                                  \
+       do {                                            \
+               if (!HAVE_IP(t, x))                     \
+                       goto input_overrun;             \
+       } while (0)
+
+#define NEED_OP(t, x)                                  \
+       do {                                            \
+               if (!HAVE_OP(t, x))                     \
+                       goto output_overrun;            \
+       } while (0)
+
+#define TEST_LB(m_pos)                                 \
+       do {                                            \
+               if ((m_pos) < out)                      \
+                       goto lookbehind_overrun;        \
+       } while (0)
 
 int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
                          unsigned char *out, size_t *out_len)
@@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
                                        while (unlikely(*ip == 0)) {
                                                t += 255;
                                                ip++;
-                                               NEED_IP(1);
+                                               NEED_IP(1, 0);
                                        }
                                        t += 15 + *ip++;
                                }
                                t += 3;
 copy_literal_run:
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-                               if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+                               if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
                                        const unsigned char *ie = ip + t;
                                        unsigned char *oe = op + t;
                                        do {
@@ -81,8 +101,8 @@ copy_literal_run:
                                } else
 #endif
                                {
-                                       NEED_OP(t);
-                                       NEED_IP(t + 3);
+                                       NEED_OP(t, 0);
+                                       NEED_IP(t, 3);
                                        do {
                                                *op++ = *ip++;
                                        } while (--t > 0);
@@ -95,7 +115,7 @@ copy_literal_run:
                                m_pos -= t >> 2;
                                m_pos -= *ip++ << 2;
                                TEST_LB(m_pos);
-                               NEED_OP(2);
+                               NEED_OP(2, 0);
                                op[0] = m_pos[0];
                                op[1] = m_pos[1];
                                op += 2;
@@ -119,10 +139,10 @@ copy_literal_run:
                                while (unlikely(*ip == 0)) {
                                        t += 255;
                                        ip++;
-                                       NEED_IP(1);
+                                       NEED_IP(1, 0);
                                }
                                t += 31 + *ip++;
-                               NEED_IP(2);
+                               NEED_IP(2, 0);
                        }
                        m_pos = op - 1;
                        next = get_unaligned_le16(ip);
@@ -137,10 +157,10 @@ copy_literal_run:
                                while (unlikely(*ip == 0)) {
                                        t += 255;
                                        ip++;
-                                       NEED_IP(1);
+                                       NEED_IP(1, 0);
                                }
                                t += 7 + *ip++;
-                               NEED_IP(2);
+                               NEED_IP(2, 0);
                        }
                        next = get_unaligned_le16(ip);
                        ip += 2;
@@ -154,7 +174,7 @@ copy_literal_run:
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
                if (op - m_pos >= 8) {
                        unsigned char *oe = op + t;
-                       if (likely(HAVE_OP(t + 15))) {
+                       if (likely(HAVE_OP(t, 15))) {
                                do {
                                        COPY8(op, m_pos);
                                        op += 8;
@@ -164,7 +184,7 @@ copy_literal_run:
                                        m_pos += 8;
                                } while (op < oe);
                                op = oe;
-                               if (HAVE_IP(6)) {
+                               if (HAVE_IP(6, 0)) {
                                        state = next;
                                        COPY4(op, ip);
                                        op += next;
@@ -172,7 +192,7 @@ copy_literal_run:
                                        continue;
                                }
                        } else {
-                               NEED_OP(t);
+                               NEED_OP(t, 0);
                                do {
                                        *op++ = *m_pos++;
                                } while (op < oe);
@@ -181,7 +201,7 @@ copy_literal_run:
 #endif
                {
                        unsigned char *oe = op + t;
-                       NEED_OP(t);
+                       NEED_OP(t, 0);
                        op[0] = m_pos[0];
                        op[1] = m_pos[1];
                        op += 2;
@@ -194,15 +214,15 @@ match_next:
                state = next;
                t = next;
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-               if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+               if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
                        COPY4(op, ip);
                        op += t;
                        ip += t;
                } else
 #endif
                {
-                       NEED_IP(t + 3);
-                       NEED_OP(t);
+                       NEED_IP(t, 3);
+                       NEED_OP(t, 0);
                        while (t > 0) {
                                *op++ = *ip++;
                                t--;
index 963b703..fe5a334 100644 (file)
 
 #define PCPU_COUNT_BIAS                (1U << 31)
 
+static unsigned __percpu *pcpu_count_ptr(struct percpu_ref *ref)
+{
+       return (unsigned __percpu *)(ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+}
+
 /**
  * percpu_ref_init - initialize a percpu refcount
  * @ref: percpu_ref to initialize
@@ -46,8 +51,8 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
 {
        atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
 
-       ref->pcpu_count = alloc_percpu(unsigned);
-       if (!ref->pcpu_count)
+       ref->pcpu_count_ptr = (unsigned long)alloc_percpu(unsigned);
+       if (!ref->pcpu_count_ptr)
                return -ENOMEM;
 
        ref->release = release;
@@ -56,53 +61,71 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release)
 EXPORT_SYMBOL_GPL(percpu_ref_init);
 
 /**
- * percpu_ref_cancel_init - cancel percpu_ref_init()
- * @ref: percpu_ref to cancel init for
+ * percpu_ref_reinit - re-initialize a percpu refcount
+ * @ref: perpcu_ref to re-initialize
  *
- * Once a percpu_ref is initialized, its destruction is initiated by
- * percpu_ref_kill() and completes asynchronously, which can be painful to
- * do when destroying a half-constructed object in init failure path.
+ * Re-initialize @ref so that it's in the same state as when it finished
+ * percpu_ref_init().  @ref must have been initialized successfully, killed
+ * and reached 0 but not exited.
  *
- * This function destroys @ref without invoking @ref->release and the
- * memory area containing it can be freed immediately on return.  To
- * prevent accidental misuse, it's required that @ref has finished
- * percpu_ref_init(), whether successful or not, but never used.
- *
- * The weird name and usage restriction are to prevent people from using
- * this function by mistake for normal shutdown instead of
- * percpu_ref_kill().
+ * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
+ * this function is in progress.
  */
-void percpu_ref_cancel_init(struct percpu_ref *ref)
+void percpu_ref_reinit(struct percpu_ref *ref)
 {
-       unsigned __percpu *pcpu_count = ref->pcpu_count;
+       unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
        int cpu;
 
-       WARN_ON_ONCE(atomic_read(&ref->count) != 1 + PCPU_COUNT_BIAS);
+       BUG_ON(!pcpu_count);
+       WARN_ON(!percpu_ref_is_zero(ref));
+
+       atomic_set(&ref->count, 1 + PCPU_COUNT_BIAS);
+
+       /*
+        * Restore per-cpu operation.  smp_store_release() is paired with
+        * smp_read_barrier_depends() in __pcpu_ref_alive() and guarantees
+        * that the zeroing is visible to all percpu accesses which can see
+        * the following PCPU_REF_DEAD clearing.
+        */
+       for_each_possible_cpu(cpu)
+               *per_cpu_ptr(pcpu_count, cpu) = 0;
+
+       smp_store_release(&ref->pcpu_count_ptr,
+                         ref->pcpu_count_ptr & ~PCPU_REF_DEAD);
+}
+EXPORT_SYMBOL_GPL(percpu_ref_reinit);
+
+/**
+ * percpu_ref_exit - undo percpu_ref_init()
+ * @ref: percpu_ref to exit
+ *
+ * This function exits @ref.  The caller is responsible for ensuring that
+ * @ref is no longer in active use.  The usual places to invoke this
+ * function from are the @ref->release() callback or in init failure path
+ * where percpu_ref_init() succeeded but other parts of the initialization
+ * of the embedding object failed.
+ */
+void percpu_ref_exit(struct percpu_ref *ref)
+{
+       unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
 
        if (pcpu_count) {
-               for_each_possible_cpu(cpu)
-                       WARN_ON_ONCE(*per_cpu_ptr(pcpu_count, cpu));
-               free_percpu(ref->pcpu_count);
+               free_percpu(pcpu_count);
+               ref->pcpu_count_ptr = PCPU_REF_DEAD;
        }
 }
-EXPORT_SYMBOL_GPL(percpu_ref_cancel_init);
+EXPORT_SYMBOL_GPL(percpu_ref_exit);
 
 static void percpu_ref_kill_rcu(struct rcu_head *rcu)
 {
        struct percpu_ref *ref = container_of(rcu, struct percpu_ref, rcu);
-       unsigned __percpu *pcpu_count = ref->pcpu_count;
+       unsigned __percpu *pcpu_count = pcpu_count_ptr(ref);
        unsigned count = 0;
        int cpu;
 
-       /* Mask out PCPU_REF_DEAD */
-       pcpu_count = (unsigned __percpu *)
-               (((unsigned long) pcpu_count) & ~PCPU_STATUS_MASK);
-
        for_each_possible_cpu(cpu)
                count += *per_cpu_ptr(pcpu_count, cpu);
 
-       free_percpu(pcpu_count);
-
        pr_debug("global %i pcpu %i", atomic_read(&ref->count), (int) count);
 
        /*
@@ -152,11 +175,10 @@ static void percpu_ref_kill_rcu(struct rcu_head *rcu)
 void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
                                 percpu_ref_func_t *confirm_kill)
 {
-       WARN_ONCE(REF_STATUS(ref->pcpu_count) == PCPU_REF_DEAD,
+       WARN_ONCE(ref->pcpu_count_ptr & PCPU_REF_DEAD,
                  "percpu_ref_kill() called more than once!\n");
 
-       ref->pcpu_count = (unsigned __percpu *)
-               (((unsigned long) ref->pcpu_count)|PCPU_REF_DEAD);
+       ref->pcpu_count_ptr |= PCPU_REF_DEAD;
        ref->confirm_kill = confirm_kill;
 
        call_rcu_sched(&ref->rcu, percpu_ref_kill_rcu);
index 649d097..4abda07 100644 (file)
@@ -86,6 +86,7 @@ static unsigned int io_tlb_index;
  * We need to save away the original address corresponding to a mapped entry
  * for the sync operations.
  */
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
 static phys_addr_t *io_tlb_orig_addr;
 
 /*
@@ -188,12 +189,14 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
        io_tlb_list = memblock_virt_alloc(
                                PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
                                PAGE_SIZE);
-       for (i = 0; i < io_tlb_nslabs; i++)
-               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-       io_tlb_index = 0;
        io_tlb_orig_addr = memblock_virt_alloc(
                                PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
                                PAGE_SIZE);
+       for (i = 0; i < io_tlb_nslabs; i++) {
+               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+               io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+       }
+       io_tlb_index = 0;
 
        if (verbose)
                swiotlb_print_info();
@@ -313,10 +316,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
        if (!io_tlb_list)
                goto cleanup3;
 
-       for (i = 0; i < io_tlb_nslabs; i++)
-               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-       io_tlb_index = 0;
-
        io_tlb_orig_addr = (phys_addr_t *)
                __get_free_pages(GFP_KERNEL,
                                 get_order(io_tlb_nslabs *
@@ -324,7 +323,11 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
        if (!io_tlb_orig_addr)
                goto cleanup4;
 
-       memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
+       for (i = 0; i < io_tlb_nslabs; i++) {
+               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+               io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+       }
+       io_tlb_index = 0;
 
        swiotlb_print_info();
 
@@ -556,7 +559,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
        /*
         * First, sync the memory before unmapping the entry
         */
-       if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+       if (orig_addr != INVALID_PHYS_ADDR &&
+           ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
                swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
 
        /*
@@ -573,8 +577,10 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
                 * Step 1: return the slots to the free list, merging the
                 * slots with superceeding slots
                 */
-               for (i = index + nslots - 1; i >= index; i--)
+               for (i = index + nslots - 1; i >= index; i--) {
                        io_tlb_list[i] = ++count;
+                       io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+               }
                /*
                 * Step 2: merge the returned slots with the preceding slots,
                 * if available (non zero)
@@ -593,6 +599,8 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
        int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
        phys_addr_t orig_addr = io_tlb_orig_addr[index];
 
+       if (orig_addr == INVALID_PHYS_ADDR)
+               return;
        orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
 
        switch (target) {
index dafb06f..65d44fd 100644 (file)
@@ -241,18 +241,6 @@ void delete_from_page_cache(struct page *page)
 }
 EXPORT_SYMBOL(delete_from_page_cache);
 
-static int sleep_on_page(void *word)
-{
-       io_schedule();
-       return 0;
-}
-
-static int sleep_on_page_killable(void *word)
-{
-       sleep_on_page(word);
-       return fatal_signal_pending(current) ? -EINTR : 0;
-}
-
 static int filemap_check_errors(struct address_space *mapping)
 {
        int ret = 0;
@@ -692,7 +680,7 @@ void wait_on_page_bit(struct page *page, int bit_nr)
        DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
 
        if (test_bit(bit_nr, &page->flags))
-               __wait_on_bit(page_waitqueue(page), &wait, sleep_on_page,
+               __wait_on_bit(page_waitqueue(page), &wait, bit_wait_io,
                                                        TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(wait_on_page_bit);
@@ -705,7 +693,7 @@ int wait_on_page_bit_killable(struct page *page, int bit_nr)
                return 0;
 
        return __wait_on_bit(page_waitqueue(page), &wait,
-                            sleep_on_page_killable, TASK_KILLABLE);
+                            bit_wait_io, TASK_KILLABLE);
 }
 
 /**
@@ -806,7 +794,7 @@ void __lock_page(struct page *page)
 {
        DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 
-       __wait_on_bit_lock(page_waitqueue(page), &wait, sleep_on_page,
+       __wait_on_bit_lock(page_waitqueue(page), &wait, bit_wait_io,
                                                        TASK_UNINTERRUPTIBLE);
 }
 EXPORT_SYMBOL(__lock_page);
@@ -816,7 +804,7 @@ int __lock_page_killable(struct page *page)
        DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
 
        return __wait_on_bit_lock(page_waitqueue(page), &wait,
-                                       sleep_on_page_killable, TASK_KILLABLE);
+                                       bit_wait_io, TASK_KILLABLE);
 }
 EXPORT_SYMBOL_GPL(__lock_page_killable);
 
@@ -1031,18 +1019,21 @@ EXPORT_SYMBOL(find_lock_entry);
  * @mapping: the address_space to search
  * @offset: the page index
  * @fgp_flags: PCG flags
- * @gfp_mask: gfp mask to use if a page is to be allocated
+ * @cache_gfp_mask: gfp mask to use for the page cache data page allocation
+ * @radix_gfp_mask: gfp mask to use for radix tree node allocation
  *
  * Looks up the page cache slot at @mapping & @offset.
  *
- * PCG flags modify how the page is returned
+ * PCG flags modify how the page is returned.
  *
  * FGP_ACCESSED: the page will be marked accessed
  * FGP_LOCK: Page is return locked
  * FGP_CREAT: If page is not present then a new page is allocated using
- *             @gfp_mask and added to the page cache and the VM's LRU
- *             list. The page is returned locked and with an increased
- *             refcount. Otherwise, %NULL is returned.
+ *             @cache_gfp_mask and added to the page cache and the VM's LRU
+ *             list. If radix tree nodes are allocated during page cache
+ *             insertion then @radix_gfp_mask is used. The page is returned
+ *             locked and with an increased refcount. Otherwise, %NULL is
+ *             returned.
  *
  * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
  * if the GFP flags specified for FGP_CREAT are atomic.
index e60837d..33514d8 100644 (file)
@@ -941,6 +941,37 @@ unlock:
        spin_unlock(ptl);
 }
 
+/*
+ * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages
+ * during copy_user_huge_page()'s copy_page_rep(): in the case when
+ * the source page gets split and a tail freed before copy completes.
+ * Called under pmd_lock of checked pmd, so safe from splitting itself.
+ */
+static void get_user_huge_page(struct page *page)
+{
+       if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
+               struct page *endpage = page + HPAGE_PMD_NR;
+
+               atomic_add(HPAGE_PMD_NR, &page->_count);
+               while (++page < endpage)
+                       get_huge_page_tail(page);
+       } else {
+               get_page(page);
+       }
+}
+
+static void put_user_huge_page(struct page *page)
+{
+       if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
+               struct page *endpage = page + HPAGE_PMD_NR;
+
+               while (page < endpage)
+                       put_page(page++);
+       } else {
+               put_page(page);
+       }
+}
+
 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
                                        unsigned long address,
@@ -1074,7 +1105,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                ret |= VM_FAULT_WRITE;
                goto out_unlock;
        }
-       get_page(page);
+       get_user_huge_page(page);
        spin_unlock(ptl);
 alloc:
        if (transparent_hugepage_enabled(vma) &&
@@ -1095,7 +1126,7 @@ alloc:
                                split_huge_page(page);
                                ret |= VM_FAULT_FALLBACK;
                        }
-                       put_page(page);
+                       put_user_huge_page(page);
                }
                count_vm_event(THP_FAULT_FALLBACK);
                goto out;
@@ -1105,7 +1136,7 @@ alloc:
                put_page(new_page);
                if (page) {
                        split_huge_page(page);
-                       put_page(page);
+                       put_user_huge_page(page);
                } else
                        split_huge_page_pmd(vma, address, pmd);
                ret |= VM_FAULT_FALLBACK;
@@ -1127,7 +1158,7 @@ alloc:
 
        spin_lock(ptl);
        if (page)
-               put_page(page);
+               put_user_huge_page(page);
        if (unlikely(!pmd_same(*pmd, orig_pmd))) {
                spin_unlock(ptl);
                mem_cgroup_uncharge_page(new_page);
@@ -2392,8 +2423,6 @@ static void collapse_huge_page(struct mm_struct *mm,
        pmd = mm_find_pmd(mm, address);
        if (!pmd)
                goto out;
-       if (pmd_trans_huge(*pmd))
-               goto out;
 
        anon_vma_lock_write(vma->anon_vma);
 
@@ -2492,8 +2521,6 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
        pmd = mm_find_pmd(mm, address);
        if (!pmd)
                goto out;
-       if (pmd_trans_huge(*pmd))
-               goto out;
 
        memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -2846,12 +2873,22 @@ void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
 static void split_huge_page_address(struct mm_struct *mm,
                                    unsigned long address)
 {
+       pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
 
        VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
 
-       pmd = mm_find_pmd(mm, address);
-       if (!pmd)
+       pgd = pgd_offset(mm, address);
+       if (!pgd_present(*pgd))
+               return;
+
+       pud = pud_offset(pgd, address);
+       if (!pud_present(*pud))
+               return;
+
+       pmd = pmd_offset(pud, address);
+       if (!pmd_present(*pmd))
                return;
        /*
         * Caller holds the mmap_sem write mode, so a huge pmd cannot
index 226910c..7a0a73d 100644 (file)
@@ -856,7 +856,7 @@ struct hstate *size_to_hstate(unsigned long size)
        return NULL;
 }
 
-static void free_huge_page(struct page *page)
+void free_huge_page(struct page *page)
 {
        /*
         * Can't pass hstate in here because it is called from the
@@ -2520,6 +2520,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
                update_mmu_cache(vma, address, ptep);
 }
 
+static int is_hugetlb_entry_migration(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_migration_entry(swp))
+               return 1;
+       else
+               return 0;
+}
+
+static int is_hugetlb_entry_hwpoisoned(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
+               return 1;
+       else
+               return 0;
+}
 
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                            struct vm_area_struct *vma)
@@ -2559,7 +2584,24 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                dst_ptl = huge_pte_lock(h, dst, dst_pte);
                src_ptl = huge_pte_lockptr(h, src, src_pte);
                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
-               if (!huge_pte_none(huge_ptep_get(src_pte))) {
+               entry = huge_ptep_get(src_pte);
+               if (huge_pte_none(entry)) { /* skip none entry */
+                       ;
+               } else if (unlikely(is_hugetlb_entry_migration(entry) ||
+                                   is_hugetlb_entry_hwpoisoned(entry))) {
+                       swp_entry_t swp_entry = pte_to_swp_entry(entry);
+
+                       if (is_write_migration_entry(swp_entry) && cow) {
+                               /*
+                                * COW mappings require pages in both
+                                * parent and child to be set to read.
+                                */
+                               make_migration_entry_read(&swp_entry);
+                               entry = swp_entry_to_pte(swp_entry);
+                               set_huge_pte_at(src, addr, src_pte, entry);
+                       }
+                       set_huge_pte_at(dst, addr, dst_pte, entry);
+               } else {
                        if (cow)
                                huge_ptep_set_wrprotect(src, addr, src_pte);
                        entry = huge_ptep_get(src_pte);
@@ -2578,32 +2620,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
        return ret;
 }
 
-static int is_hugetlb_entry_migration(pte_t pte)
-{
-       swp_entry_t swp;
-
-       if (huge_pte_none(pte) || pte_present(pte))
-               return 0;
-       swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_migration_entry(swp))
-               return 1;
-       else
-               return 0;
-}
-
-static int is_hugetlb_entry_hwpoisoned(pte_t pte)
-{
-       swp_entry_t swp;
-
-       if (huge_pte_none(pte) || pte_present(pte))
-               return 0;
-       swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
-               return 1;
-       else
-               return 0;
-}
-
 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                            unsigned long start, unsigned long end,
                            struct page *ref_page)
index 493f758..9aae6f4 100644 (file)
@@ -358,9 +358,8 @@ static void __init __hugetlb_cgroup_file_init(int idx)
        cft = &h->cgroup_files[4];
        memset(cft, 0, sizeof(*cft));
 
-       WARN_ON(cgroup_add_cftypes(&hugetlb_cgrp_subsys, h->cgroup_files));
-
-       return;
+       WARN_ON(cgroup_add_legacy_cftypes(&hugetlb_cgrp_subsys,
+                                         h->cgroup_files));
 }
 
 void __init hugetlb_cgroup_file_init(void)
index 68710e8..fb75902 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -945,7 +945,6 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
        pmd = mm_find_pmd(mm, addr);
        if (!pmd)
                goto out;
-       BUG_ON(pmd_trans_huge(*pmd));
 
        mmun_start = addr;
        mmun_end   = addr + PAGE_SIZE;
@@ -1979,18 +1978,12 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage)
 #endif /* CONFIG_MIGRATION */
 
 #ifdef CONFIG_MEMORY_HOTREMOVE
-static int just_wait(void *word)
-{
-       schedule();
-       return 0;
-}
-
 static void wait_while_offlining(void)
 {
        while (ksm_run & KSM_RUN_OFFLINE) {
                mutex_unlock(&ksm_thread_mutex);
                wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE),
-                               just_wait, TASK_UNINTERRUPTIBLE);
+                           TASK_UNINTERRUPTIBLE);
                mutex_lock(&ksm_thread_mutex);
        }
 }
index a2c7bcb..f009a14 100644 (file)
@@ -5415,8 +5415,12 @@ static int mem_cgroup_oom_notify_cb(struct mem_cgroup *memcg)
 {
        struct mem_cgroup_eventfd_list *ev;
 
+       spin_lock(&memcg_oom_lock);
+
        list_for_each_entry(ev, &memcg->oom_notify, list)
                eventfd_signal(ev->eventfd, 1);
+
+       spin_unlock(&memcg_oom_lock);
        return 0;
 }
 
@@ -6003,7 +6007,6 @@ static struct cftype mem_cgroup_files[] = {
        },
        {
                .name = "use_hierarchy",
-               .flags = CFTYPE_INSANE,
                .write_u64 = mem_cgroup_hierarchy_write,
                .read_u64 = mem_cgroup_hierarchy_read,
        },
@@ -6407,6 +6410,29 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
        __mem_cgroup_free(memcg);
 }
 
+/**
+ * mem_cgroup_css_reset - reset the states of a mem_cgroup
+ * @css: the target css
+ *
+ * Reset the states of the mem_cgroup associated with @css.  This is
+ * invoked when the userland requests disabling on the default hierarchy
+ * but the memcg is pinned through dependency.  The memcg should stop
+ * applying policies and should revert to the vanilla state as it may be
+ * made visible again.
+ *
+ * The current implementation only resets the essential configurations.
+ * This needs to be expanded to cover all the visible parts.
+ */
+static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
+{
+       struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+
+       mem_cgroup_resize_limit(memcg, ULLONG_MAX);
+       mem_cgroup_resize_memsw_limit(memcg, ULLONG_MAX);
+       memcg_update_kmem_limit(memcg, ULLONG_MAX);
+       res_counter_set_soft_limit(&memcg->res, ULLONG_MAX);
+}
+
 #ifdef CONFIG_MMU
 /* Handlers for move charge at task migration. */
 #define PRECHARGE_COUNT_AT_ONCE        256
@@ -7001,16 +7027,17 @@ static void mem_cgroup_move_task(struct cgroup_subsys_state *css,
 
 /*
  * Cgroup retains root cgroups across [un]mount cycles making it necessary
- * to verify sane_behavior flag on each mount attempt.
+ * to verify whether we're attached to the default hierarchy on each mount
+ * attempt.
  */
 static void mem_cgroup_bind(struct cgroup_subsys_state *root_css)
 {
        /*
-        * use_hierarchy is forced with sane_behavior.  cgroup core
+        * use_hierarchy is forced on the default hierarchy.  cgroup core
         * guarantees that @root doesn't have any children, so turning it
         * on for the root memcg is enough.
         */
-       if (cgroup_sane_behavior(root_css->cgroup))
+       if (cgroup_on_dfl(root_css->cgroup))
                mem_cgroup_from_css(root_css)->use_hierarchy = true;
 }
 
@@ -7019,11 +7046,12 @@ struct cgroup_subsys memory_cgrp_subsys = {
        .css_online = mem_cgroup_css_online,
        .css_offline = mem_cgroup_css_offline,
        .css_free = mem_cgroup_css_free,
+       .css_reset = mem_cgroup_css_reset,
        .can_attach = mem_cgroup_can_attach,
        .cancel_attach = mem_cgroup_cancel_attach,
        .attach = mem_cgroup_move_task,
        .bind = mem_cgroup_bind,
-       .base_cftypes = mem_cgroup_files,
+       .legacy_cftypes = mem_cgroup_files,
        .early_init = 0,
 };
 
@@ -7040,7 +7068,8 @@ __setup("swapaccount=", enable_swap_account);
 
 static void __init memsw_file_init(void)
 {
-       WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, memsw_cgroup_files));
+       WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys,
+                                         memsw_cgroup_files));
 }
 
 static void __init enable_swap_cgroup(void)
index cd8989c..a013bc9 100644 (file)
@@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
        if (av == NULL) /* Not actually mapped anymore */
                return;
 
-       pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       pgoff = page_to_pgoff(page);
        read_lock(&tasklist_lock);
        for_each_process (tsk) {
                struct anon_vma_chain *vmac;
@@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
        mutex_lock(&mapping->i_mmap_mutex);
        read_lock(&tasklist_lock);
        for_each_process(tsk) {
-               pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+               pgoff_t pgoff = page_to_pgoff(page);
                struct task_struct *t = task_early_kill(tsk, force_early);
 
                if (!t)
@@ -895,8 +895,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
        struct page *hpage = *hpagep;
        struct page *ppage;
 
+       /*
+        * Here we are interested only in user-mapped pages, so skip any
+        * other types of pages.
+        */
        if (PageReserved(p) || PageSlab(p))
                return SWAP_SUCCESS;
+       if (!(PageLRU(hpage) || PageHuge(p)))
+               return SWAP_SUCCESS;
 
        /*
         * This check implies we don't kill processes if their pages
@@ -905,8 +911,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
        if (!page_mapped(hpage))
                return SWAP_SUCCESS;
 
-       if (PageKsm(p))
+       if (PageKsm(p)) {
+               pr_err("MCE %#lx: can't handle KSM pages.\n", pfn);
                return SWAP_FAIL;
+       }
 
        if (PageSwapCache(p)) {
                printk(KERN_ERR
@@ -1159,9 +1167,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                                        action_result(pfn, "free buddy, 2nd try", DELAYED);
                                return 0;
                        }
-                       action_result(pfn, "non LRU", IGNORED);
-                       put_page(p);
-                       return -EBUSY;
                }
        }
 
@@ -1194,6 +1199,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                return 0;
        }
 
+       if (!PageHuge(p) && !PageTransTail(p) && !PageLRU(p))
+               goto identify_page_state;
+
        /*
         * For error on the tail page, we should set PG_hwpoison
         * on the head page to show that the hugepage is hwpoisoned
@@ -1229,7 +1237,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
         */
        if (hwpoison_user_mappings(p, pfn, trapno, flags, &hpage)
            != SWAP_SUCCESS) {
-               printk(KERN_ERR "MCE %#lx: cannot unmap page, give up\n", pfn);
+               action_result(pfn, "unmapping failed", IGNORED);
                res = -EBUSY;
                goto out;
        }
@@ -1243,6 +1251,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                goto out;
        }
 
+identify_page_state:
        res = -EBUSY;
        /*
         * The first check uses the current page flags which may not have any
index d67fd9f..8b44f76 100644 (file)
@@ -2758,23 +2758,18 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
        update_mmu_cache(vma, address, pte);
 }
 
-static unsigned long fault_around_bytes = 65536;
+static unsigned long fault_around_bytes = rounddown_pow_of_two(65536);
 
-/*
- * fault_around_pages() and fault_around_mask() round down fault_around_bytes
- * to nearest page order. It's what do_fault_around() expects to see.
- */
 static inline unsigned long fault_around_pages(void)
 {
-       return rounddown_pow_of_two(fault_around_bytes) / PAGE_SIZE;
+       return fault_around_bytes >> PAGE_SHIFT;
 }
 
 static inline unsigned long fault_around_mask(void)
 {
-       return ~(rounddown_pow_of_two(fault_around_bytes) - 1) & PAGE_MASK;
+       return ~(fault_around_bytes - 1) & PAGE_MASK;
 }
 
-
 #ifdef CONFIG_DEBUG_FS
 static int fault_around_bytes_get(void *data, u64 *val)
 {
@@ -2782,11 +2777,19 @@ static int fault_around_bytes_get(void *data, u64 *val)
        return 0;
 }
 
+/*
+ * fault_around_pages() and fault_around_mask() expects fault_around_bytes
+ * rounded down to nearest page order. It's what do_fault_around() expects to
+ * see.
+ */
 static int fault_around_bytes_set(void *data, u64 val)
 {
        if (val / PAGE_SIZE > PTRS_PER_PTE)
                return -EINVAL;
-       fault_around_bytes = val;
+       if (val > PAGE_SIZE)
+               fault_around_bytes = rounddown_pow_of_two(val);
+       else
+               fault_around_bytes = PAGE_SIZE; /* rounddown_pow_of_two(0) is undefined */
        return 0;
 }
 DEFINE_SIMPLE_ATTRIBUTE(fault_around_bytes_fops,
@@ -2882,7 +2885,8 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * if page by the offset is not ready to be mapped (cold cache or
         * something).
         */
-       if (vma->vm_ops->map_pages && fault_around_pages() > 1) {
+       if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
+           fault_around_pages() > 1) {
                pte = pte_offset_map_lock(mm, pmd, address, &ptl);
                do_fault_around(vma, address, pte, pgoff, flags);
                if (!pte_same(*pte, orig_pte))
index 2849742..8f5330d 100644 (file)
@@ -656,19 +656,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
  * @nodes and @flags,) it's isolated and queued to the pagelist which is
  * passed via @private.)
  */
-static struct vm_area_struct *
+static int
 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
                const nodemask_t *nodes, unsigned long flags, void *private)
 {
-       int err;
-       struct vm_area_struct *first, *vma, *prev;
-
+       int err = 0;
+       struct vm_area_struct *vma, *prev;
 
-       first = find_vma(mm, start);
-       if (!first)
-               return ERR_PTR(-EFAULT);
+       vma = find_vma(mm, start);
+       if (!vma)
+               return -EFAULT;
        prev = NULL;
-       for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
+       for (; vma && vma->vm_start < end; vma = vma->vm_next) {
                unsigned long endvma = vma->vm_end;
 
                if (endvma > end)
@@ -678,9 +677,9 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 
                if (!(flags & MPOL_MF_DISCONTIG_OK)) {
                        if (!vma->vm_next && vma->vm_end < end)
-                               return ERR_PTR(-EFAULT);
+                               return -EFAULT;
                        if (prev && prev->vm_end < vma->vm_start)
-                               return ERR_PTR(-EFAULT);
+                               return -EFAULT;
                }
 
                if (flags & MPOL_MF_LAZY) {
@@ -694,15 +693,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 
                        err = queue_pages_pgd_range(vma, start, endvma, nodes,
                                                flags, private);
-                       if (err) {
-                               first = ERR_PTR(err);
+                       if (err)
                                break;
-                       }
                }
 next:
                prev = vma;
        }
-       return first;
+       return err;
 }
 
 /*
@@ -1156,16 +1153,17 @@ out:
 
 /*
  * Allocate a new page for page migration based on vma policy.
- * Start assuming that page is mapped by vma pointed to by @private.
+ * Start by assuming the page is mapped by the same vma as contains @start.
  * Search forward from there, if not.  N.B., this assumes that the
  * list of pages handed to migrate_pages()--which is how we get here--
  * is in virtual address order.
  */
-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
+static struct page *new_page(struct page *page, unsigned long start, int **x)
 {
-       struct vm_area_struct *vma = (struct vm_area_struct *)private;
+       struct vm_area_struct *vma;
        unsigned long uninitialized_var(address);
 
+       vma = find_vma(current->mm, start);
        while (vma) {
                address = page_address_in_vma(page, vma);
                if (address != -EFAULT)
@@ -1195,7 +1193,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
        return -ENOSYS;
 }
 
-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
+static struct page *new_page(struct page *page, unsigned long start, int **x)
 {
        return NULL;
 }
@@ -1205,7 +1203,6 @@ static long do_mbind(unsigned long start, unsigned long len,
                     unsigned short mode, unsigned short mode_flags,
                     nodemask_t *nmask, unsigned long flags)
 {
-       struct vm_area_struct *vma;
        struct mm_struct *mm = current->mm;
        struct mempolicy *new;
        unsigned long end;
@@ -1271,11 +1268,9 @@ static long do_mbind(unsigned long start, unsigned long len,
        if (err)
                goto mpol_out;
 
-       vma = queue_pages_range(mm, start, end, nmask,
+       err = queue_pages_range(mm, start, end, nmask,
                          flags | MPOL_MF_INVERT, &pagelist);
-
-       err = PTR_ERR(vma);     /* maybe ... */
-       if (!IS_ERR(vma))
+       if (!err)
                err = mbind_range(mm, start, end, new);
 
        if (!err) {
@@ -1283,9 +1278,8 @@ static long do_mbind(unsigned long start, unsigned long len,
 
                if (!list_empty(&pagelist)) {
                        WARN_ON_ONCE(flags & MPOL_MF_LAZY);
-                       nr_failed = migrate_pages(&pagelist, new_vma_page,
-                                       NULL, (unsigned long)vma,
-                                       MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
+                       nr_failed = migrate_pages(&pagelist, new_page, NULL,
+                               start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
                        if (nr_failed)
                                putback_movable_pages(&pagelist);
                }
@@ -2145,7 +2139,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
        } else
                *new = *old;
 
-       rcu_read_lock();
        if (current_cpuset_is_being_rebound()) {
                nodemask_t mems = cpuset_mems_allowed(current);
                if (new->flags & MPOL_F_REBINDING)
@@ -2153,7 +2146,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
                else
                        mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
        }
-       rcu_read_unlock();
        atomic_set(&new->refcnt, 1);
        return new;
 }
index 63f0cd5..be6dbf9 100644 (file)
@@ -120,8 +120,6 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
                pmd = mm_find_pmd(mm, addr);
                if (!pmd)
                        goto out;
-               if (pmd_trans_huge(*pmd))
-                       goto out;
 
                ptep = pte_offset_map(pmd, addr);
 
@@ -990,9 +988,10 @@ out:
         * it.  Otherwise, putback_lru_page() will drop the reference grabbed
         * during isolation.
         */
-       if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
+       if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
+               ClearPageSwapBacked(newpage);
                put_new_page(newpage, private);
-       else
+       else
                putback_lru_page(newpage);
 
        if (result) {
index a5c6736..992a167 100644 (file)
@@ -78,7 +78,8 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
                        goto out_unlock;
                }
                file = vma->vm_file;
-               fstart = start + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+               fstart = (start - vma->vm_start) +
+                        ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
                fend = fstart + (min(end, vma->vm_end) - start) - 1;
                start = vma->vm_end;
                if ((flags & MS_SYNC) && file &&
index b78e3a8..4a852f6 100644 (file)
@@ -786,7 +786,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
        for (i = 0; i < VMACACHE_SIZE; i++) {
                /* if the vma is cached, invalidate the entire cache */
                if (curr->vmacache[i] == vma) {
-                       vmacache_invalidate(curr->mm);
+                       vmacache_invalidate(mm);
                        break;
                }
        }
index 518e2c3..e0c9430 100644 (file)
@@ -1306,9 +1306,9 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
        *bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
 
        if (bdi_bg_thresh)
-               *bdi_bg_thresh = div_u64((u64)*bdi_thresh *
-                                        background_thresh,
-                                        dirty_thresh);
+               *bdi_bg_thresh = dirty_thresh ? div_u64((u64)*bdi_thresh *
+                                                       background_thresh,
+                                                       dirty_thresh) : 0;
 
        /*
         * In order to avoid the stacked BDI deadlock we need
index 4f59fa2..ef44ad7 100644 (file)
@@ -69,6 +69,7 @@
 
 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
 static DEFINE_MUTEX(pcp_batch_high_lock);
+#define MIN_PERCPU_PAGELIST_FRACTION   (8)
 
 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
 DEFINE_PER_CPU(int, numa_node);
@@ -815,9 +816,21 @@ void __init init_cma_reserved_pageblock(struct page *page)
                set_page_count(p, 0);
        } while (++p, --i);
 
-       set_page_refcounted(page);
        set_pageblock_migratetype(page, MIGRATE_CMA);
-       __free_pages(page, pageblock_order);
+
+       if (pageblock_order >= MAX_ORDER) {
+               i = pageblock_nr_pages;
+               p = page;
+               do {
+                       set_page_refcounted(p);
+                       __free_pages(p, MAX_ORDER - 1);
+                       p += MAX_ORDER_NR_PAGES;
+               } while (i -= MAX_ORDER_NR_PAGES);
+       } else {
+               set_page_refcounted(page);
+               __free_pages(page, pageblock_order);
+       }
+
        adjust_managed_page_count(page, pageblock_nr_pages);
 }
 #endif
@@ -2434,7 +2447,7 @@ static inline int
 gfp_to_alloc_flags(gfp_t gfp_mask)
 {
        int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
-       const gfp_t wait = gfp_mask & __GFP_WAIT;
+       const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
 
        /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
        BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
@@ -2443,20 +2456,20 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
         * The caller may dip into page reserves a bit more if the caller
         * cannot run direct reclaim, or if the caller has realtime scheduling
         * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
-        * set both ALLOC_HARDER (!wait) and ALLOC_HIGH (__GFP_HIGH).
+        * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
         */
        alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
 
-       if (!wait) {
+       if (atomic) {
                /*
-                * Not worth trying to allocate harder for
-                * __GFP_NOMEMALLOC even if it can't schedule.
+                * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
+                * if it can't schedule.
                 */
-               if  (!(gfp_mask & __GFP_NOMEMALLOC))
+               if (!(gfp_mask & __GFP_NOMEMALLOC))
                        alloc_flags |= ALLOC_HARDER;
                /*
-                * Ignore cpuset if GFP_ATOMIC (!wait) rather than fail alloc.
-                * See also cpuset_zone_allowed() comment in kernel/cpuset.c.
+                * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
+                * comment for __cpuset_node_allowed_softwall().
                 */
                alloc_flags &= ~ALLOC_CPUSET;
        } else if (unlikely(rt_task(current)) && !in_interrupt())
@@ -4145,7 +4158,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
        memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
 #endif
 
-static int __meminit zone_batchsize(struct zone *zone)
+static int zone_batchsize(struct zone *zone)
 {
 #ifdef CONFIG_MMU
        int batch;
@@ -4261,8 +4274,8 @@ static void pageset_set_high(struct per_cpu_pageset *p,
        pageset_update(&p->pcp, high, batch);
 }
 
-static void __meminit pageset_set_high_and_batch(struct zone *zone,
-               struct per_cpu_pageset *pcp)
+static void pageset_set_high_and_batch(struct zone *zone,
+                                      struct per_cpu_pageset *pcp)
 {
        if (percpu_pagelist_fraction)
                pageset_set_high(pcp,
@@ -5881,23 +5894,38 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
        void __user *buffer, size_t *length, loff_t *ppos)
 {
        struct zone *zone;
-       unsigned int cpu;
+       int old_percpu_pagelist_fraction;
        int ret;
 
+       mutex_lock(&pcp_batch_high_lock);
+       old_percpu_pagelist_fraction = percpu_pagelist_fraction;
+
        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
-       if (!write || (ret < 0))
-               return ret;
+       if (!write || ret < 0)
+               goto out;
+
+       /* Sanity checking to avoid pcp imbalance */
+       if (percpu_pagelist_fraction &&
+           percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
+               percpu_pagelist_fraction = old_percpu_pagelist_fraction;
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* No change? */
+       if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
+               goto out;
 
-       mutex_lock(&pcp_batch_high_lock);
        for_each_populated_zone(zone) {
-               unsigned long  high;
-               high = zone->managed_pages / percpu_pagelist_fraction;
+               unsigned int cpu;
+
                for_each_possible_cpu(cpu)
-                       pageset_set_high(per_cpu_ptr(zone->pageset, cpu),
-                                        high);
+                       pageset_set_high_and_batch(zone,
+                                       per_cpu_ptr(zone->pageset, cpu));
        }
+out:
        mutex_unlock(&pcp_batch_high_lock);
-       return 0;
+       return ret;
 }
 
 int hashdist = HASHDIST_DEFAULT;
@@ -6034,11 +6062,13 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
 }
 
 /**
- * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
+ * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
  * @page: The page within the block of interest
- * @start_bitidx: The first bit of interest to retrieve
- * @end_bitidx: The last bit of interest
- * returns pageblock_bits flags
+ * @pfn: The target page frame number
+ * @end_bitidx: The last bit of interest to retrieve
+ * @mask: mask of bits that the caller is interested in
+ *
+ * Return: pageblock_bits flags
  */
 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
                                        unsigned long end_bitidx,
@@ -6063,9 +6093,10 @@ unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
 /**
  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
  * @page: The page within the block of interest
- * @start_bitidx: The first bit of interest
- * @end_bitidx: The last bit of interest
  * @flags: The flags to set
+ * @pfn: The target page frame number
+ * @end_bitidx: The last bit of interest
+ * @mask: mask of bits that the caller is interested in
  */
 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
                                        unsigned long pfn,
index 2ddf9a9..2139e30 100644 (file)
@@ -720,8 +720,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
        if (unlikely(align < 2))
                align = 2;
 
-       if (unlikely(size & 1))
-               size++;
+       size = ALIGN(size, 2);
 
        if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
                WARN(true, "illegal size (%zu) or align (%zu) for "
index bf05fc8..22a4a76 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
 static inline unsigned long
 __vma_address(struct page *page, struct vm_area_struct *vma)
 {
-       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
-       if (unlikely(is_vm_hugetlb_page(vma)))
-               pgoff = page->index << huge_page_order(page_hstate(page));
-
+       pgoff_t pgoff = page_to_pgoff(page);
        return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 }
 
@@ -569,6 +565,7 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd = NULL;
+       pmd_t pmde;
 
        pgd = pgd_offset(mm, address);
        if (!pgd_present(*pgd))
@@ -579,7 +576,13 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
                goto out;
 
        pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd))
+       /*
+        * Some THP functions use the sequence pmdp_clear_flush(), set_pmd_at()
+        * without holding anon_vma lock for write.  So when looking for a
+        * genuine pmde (in which to find pte), test present and !THP together.
+        */
+       pmde = ACCESS_ONCE(*pmd);
+       if (!pmd_present(pmde) || pmd_trans_huge(pmde))
                pmd = NULL;
 out:
        return pmd;
@@ -615,9 +618,6 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
        if (!pmd)
                return NULL;
 
-       if (pmd_trans_huge(*pmd))
-               return NULL;
-
        pte = pte_offset_map(pmd, address);
        /* Make a quick check before getting the lock */
        if (!sync && !pte_present(*pte)) {
@@ -1635,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
 static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
 {
        struct anon_vma *anon_vma;
-       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       pgoff_t pgoff = page_to_pgoff(page);
        struct anon_vma_chain *avc;
        int ret = SWAP_AGAIN;
 
@@ -1676,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
 static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
 {
        struct address_space *mapping = page->mapping;
-       pgoff_t pgoff = page->index << compound_order(page);
+       pgoff_t pgoff = page_to_pgoff(page);
        struct vm_area_struct *vma;
        int ret = SWAP_AGAIN;
 
index f484c27..af68b15 100644 (file)
@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
 #define SHORT_SYMLINK_LEN 128
 
 /*
- * shmem_fallocate and shmem_writepage communicate via inode->i_private
- * (with i_mutex making sure that it has only one user at a time):
- * we would prefer not to enlarge the shmem inode just for that.
+ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
+ * inode->i_private (with i_mutex making sure that it has only one user at
+ * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
+       wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
        pgoff_t start;          /* start of range currently being fallocated */
        pgoff_t next;           /* the next page offset to be fallocated */
        pgoff_t nr_falloced;    /* how many new pages have been fallocated */
@@ -467,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                return;
 
        index = start;
-       for ( ; ; ) {
+       while (index < end) {
                cond_resched();
 
                pvec.nr = find_get_entries(mapping, index,
                                min(end - index, (pgoff_t)PAGEVEC_SIZE),
                                pvec.pages, indices);
                if (!pvec.nr) {
-                       if (index == start || unfalloc)
+                       /* If all gone or hole-punch or unfalloc, we're done */
+                       if (index == start || end != -1)
                                break;
+                       /* But if truncating, restart to make sure all gone */
                        index = start;
                        continue;
                }
-               if ((index == start || unfalloc) && indices[0] >= end) {
-                       pagevec_remove_exceptionals(&pvec);
-                       pagevec_release(&pvec);
-                       break;
-               }
                mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
@@ -495,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                        if (radix_tree_exceptional_entry(page)) {
                                if (unfalloc)
                                        continue;
-                               nr_swaps_freed += !shmem_free_swap(mapping,
-                                                               index, page);
+                               if (shmem_free_swap(mapping, index, page)) {
+                                       /* Swap was replaced by page: retry */
+                                       index--;
+                                       break;
+                               }
+                               nr_swaps_freed++;
                                continue;
                        }
 
@@ -505,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                                if (page->mapping == mapping) {
                                        VM_BUG_ON_PAGE(PageWriteback(page), page);
                                        truncate_inode_page(mapping, page);
+                               } else {
+                                       /* Page was replaced by swap: retry */
+                                       unlock_page(page);
+                                       index--;
+                                       break;
                                }
                        }
                        unlock_page(page);
@@ -759,6 +766,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                        spin_lock(&inode->i_lock);
                        shmem_falloc = inode->i_private;
                        if (shmem_falloc &&
+                           !shmem_falloc->waitq &&
                            index >= shmem_falloc->start &&
                            index < shmem_falloc->next)
                                shmem_falloc->nr_unswapped++;
@@ -1027,6 +1035,9 @@ repeat:
                goto failed;
        }
 
+       if (page && sgp == SGP_WRITE)
+               mark_page_accessed(page);
+
        /* fallocated page? */
        if (page && !PageUptodate(page)) {
                if (sgp != SGP_READ)
@@ -1108,6 +1119,9 @@ repeat:
                shmem_recalc_inode(inode);
                spin_unlock(&info->lock);
 
+               if (sgp == SGP_WRITE)
+                       mark_page_accessed(page);
+
                delete_from_swap_cache(page);
                set_page_dirty(page);
                swap_free(swap);
@@ -1134,6 +1148,9 @@ repeat:
 
                __SetPageSwapBacked(page);
                __set_page_locked(page);
+               if (sgp == SGP_WRITE)
+                       init_page_accessed(page);
+
                error = mem_cgroup_charge_file(page, current->mm,
                                                gfp & GFP_RECLAIM_MASK);
                if (error)
@@ -1233,6 +1250,64 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        int error;
        int ret = VM_FAULT_LOCKED;
 
+       /*
+        * Trinity finds that probing a hole which tmpfs is punching can
+        * prevent the hole-punch from ever completing: which in turn
+        * locks writers out with its hold on i_mutex.  So refrain from
+        * faulting pages into the hole while it's being punched.  Although
+        * shmem_undo_range() does remove the additions, it may be unable to
+        * keep up, as each new page needs its own unmap_mapping_range() call,
+        * and the i_mmap tree grows ever slower to scan if new vmas are added.
+        *
+        * It does not matter if we sometimes reach this check just before the
+        * hole-punch begins, so that one fault then races with the punch:
+        * we just need to make racing faults a rare case.
+        *
+        * The implementation below would be much simpler if we just used a
+        * standard mutex or completion: but we cannot take i_mutex in fault,
+        * and bloating every shmem inode for this unlikely case would be sad.
+        */
+       if (unlikely(inode->i_private)) {
+               struct shmem_falloc *shmem_falloc;
+
+               spin_lock(&inode->i_lock);
+               shmem_falloc = inode->i_private;
+               if (shmem_falloc &&
+                   shmem_falloc->waitq &&
+                   vmf->pgoff >= shmem_falloc->start &&
+                   vmf->pgoff < shmem_falloc->next) {
+                       wait_queue_head_t *shmem_falloc_waitq;
+                       DEFINE_WAIT(shmem_fault_wait);
+
+                       ret = VM_FAULT_NOPAGE;
+                       if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
+                          !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+                               /* It's polite to up mmap_sem if we can */
+                               up_read(&vma->vm_mm->mmap_sem);
+                               ret = VM_FAULT_RETRY;
+                       }
+
+                       shmem_falloc_waitq = shmem_falloc->waitq;
+                       prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
+                                       TASK_UNINTERRUPTIBLE);
+                       spin_unlock(&inode->i_lock);
+                       schedule();
+
+                       /*
+                        * shmem_falloc_waitq points into the shmem_fallocate()
+                        * stack of the hole-punching task: shmem_falloc_waitq
+                        * is usually invalid by the time we reach here, but
+                        * finish_wait() does not dereference it in that case;
+                        * though i_lock needed lest racing with wake_up_all().
+                        */
+                       spin_lock(&inode->i_lock);
+                       finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
+                       spin_unlock(&inode->i_lock);
+                       return ret;
+               }
+               spin_unlock(&inode->i_lock);
+       }
+
        error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
        if (error)
                return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
@@ -1372,13 +1447,9 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned flags,
                        struct page **pagep, void **fsdata)
 {
-       int ret;
        struct inode *inode = mapping->host;
        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-       ret = shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
-       if (ret == 0 && *pagep)
-               init_page_accessed(*pagep);
-       return ret;
+       return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
 }
 
 static int
@@ -1724,18 +1795,34 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
        pgoff_t start, index, end;
        int error;
 
+       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+               return -EOPNOTSUPP;
+
        mutex_lock(&inode->i_mutex);
 
        if (mode & FALLOC_FL_PUNCH_HOLE) {
                struct address_space *mapping = file->f_mapping;
                loff_t unmap_start = round_up(offset, PAGE_SIZE);
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+               DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
+
+               shmem_falloc.waitq = &shmem_falloc_waitq;
+               shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+               shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
+               spin_lock(&inode->i_lock);
+               inode->i_private = &shmem_falloc;
+               spin_unlock(&inode->i_lock);
 
                if ((u64)unmap_end > (u64)unmap_start)
                        unmap_mapping_range(mapping, unmap_start,
                                            1 + unmap_end - unmap_start, 0);
                shmem_truncate_range(inode, offset, offset + len - 1);
                /* No need to unmap again: hole-punching leaves COWed pages */
+
+               spin_lock(&inode->i_lock);
+               inode->i_private = NULL;
+               wake_up_all(&shmem_falloc_waitq);
+               spin_unlock(&inode->i_lock);
                error = 0;
                goto out;
        }
@@ -1753,6 +1840,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                goto out;
        }
 
+       shmem_falloc.waitq = NULL;
        shmem_falloc.start = start;
        shmem_falloc.next  = start;
        shmem_falloc.nr_falloced = 0;
index 9ca3b87..3070b92 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -386,6 +386,39 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 
 #endif
 
+#define OBJECT_FREE (0)
+#define OBJECT_ACTIVE (1)
+
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+
+static void set_obj_status(struct page *page, int idx, int val)
+{
+       int freelist_size;
+       char *status;
+       struct kmem_cache *cachep = page->slab_cache;
+
+       freelist_size = cachep->num * sizeof(freelist_idx_t);
+       status = (char *)page->freelist + freelist_size;
+       status[idx] = val;
+}
+
+static inline unsigned int get_obj_status(struct page *page, int idx)
+{
+       int freelist_size;
+       char *status;
+       struct kmem_cache *cachep = page->slab_cache;
+
+       freelist_size = cachep->num * sizeof(freelist_idx_t);
+       status = (char *)page->freelist + freelist_size;
+
+       return status[idx];
+}
+
+#else
+static inline void set_obj_status(struct page *page, int idx, int val) {}
+
+#endif
+
 /*
  * Do not go above this order unless 0 objects fit into the slab or
  * overridden on the command line.
@@ -576,12 +609,30 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
        return cachep->array[smp_processor_id()];
 }
 
+static size_t calculate_freelist_size(int nr_objs, size_t align)
+{
+       size_t freelist_size;
+
+       freelist_size = nr_objs * sizeof(freelist_idx_t);
+       if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
+               freelist_size += nr_objs * sizeof(char);
+
+       if (align)
+               freelist_size = ALIGN(freelist_size, align);
+
+       return freelist_size;
+}
+
 static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
                                size_t idx_size, size_t align)
 {
        int nr_objs;
+       size_t remained_size;
        size_t freelist_size;
+       int extra_space = 0;
 
+       if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
+               extra_space = sizeof(char);
        /*
         * Ignore padding for the initial guess. The padding
         * is at most @align-1 bytes, and @buffer_size is at
@@ -590,14 +641,15 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
         * into the memory allocation when taking the padding
         * into account.
         */
-       nr_objs = slab_size / (buffer_size + idx_size);
+       nr_objs = slab_size / (buffer_size + idx_size + extra_space);
 
        /*
         * This calculated number will be either the right
         * amount, or one greater than what we want.
         */
-       freelist_size = slab_size - nr_objs * buffer_size;
-       if (freelist_size < ALIGN(nr_objs * idx_size, align))
+       remained_size = slab_size - nr_objs * buffer_size;
+       freelist_size = calculate_freelist_size(nr_objs, align);
+       if (remained_size < freelist_size)
                nr_objs--;
 
        return nr_objs;
@@ -635,7 +687,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
        } else {
                nr_objs = calculate_nr_objs(slab_size, buffer_size,
                                        sizeof(freelist_idx_t), align);
-               mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align);
+               mgmt_size = calculate_freelist_size(nr_objs, align);
        }
        *num = nr_objs;
        *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
@@ -2041,13 +2093,16 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
                        break;
 
                if (flags & CFLGS_OFF_SLAB) {
+                       size_t freelist_size_per_obj = sizeof(freelist_idx_t);
                        /*
                         * Max number of objs-per-slab for caches which
                         * use off-slab slabs. Needed to avoid a possible
                         * looping condition in cache_grow().
                         */
+                       if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
+                               freelist_size_per_obj += sizeof(char);
                        offslab_limit = size;
-                       offslab_limit /= sizeof(freelist_idx_t);
+                       offslab_limit /= freelist_size_per_obj;
 
                        if (num > offslab_limit)
                                break;
@@ -2294,8 +2349,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
        if (!cachep->num)
                return -E2BIG;
 
-       freelist_size =
-               ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align);
+       freelist_size = calculate_freelist_size(cachep->num, cachep->align);
 
        /*
         * If the slab has been placed off-slab, and we have enough space then
@@ -2308,7 +2362,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
 
        if (flags & CFLGS_OFF_SLAB) {
                /* really off slab. No need for manual alignment */
-               freelist_size = cachep->num * sizeof(freelist_idx_t);
+               freelist_size = calculate_freelist_size(cachep->num, 0);
 
 #ifdef CONFIG_PAGE_POISONING
                /* If we're going to use the generic kernel_map_pages()
@@ -2612,6 +2666,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
                if (cachep->ctor)
                        cachep->ctor(objp);
 #endif
+               set_obj_status(page, i, OBJECT_FREE);
                set_free_obj(page, i, i);
        }
 }
@@ -2820,6 +2875,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
        BUG_ON(objnr >= cachep->num);
        BUG_ON(objp != index_to_obj(cachep, page, objnr));
 
+       set_obj_status(page, objnr, OBJECT_FREE);
        if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
                if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
@@ -2953,6 +3009,8 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
                                gfp_t flags, void *objp, unsigned long caller)
 {
+       struct page *page;
+
        if (!objp)
                return objp;
        if (cachep->flags & SLAB_POISON) {
@@ -2983,6 +3041,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
                *dbg_redzone1(cachep, objp) = RED_ACTIVE;
                *dbg_redzone2(cachep, objp) = RED_ACTIVE;
        }
+
+       page = virt_to_head_page(objp);
+       set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
        objp += obj_offset(cachep);
        if (cachep->ctor && cachep->flags & SLAB_POISON)
                cachep->ctor(objp);
@@ -4219,21 +4280,12 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c,
                                                struct page *page)
 {
        void *p;
-       int i, j;
+       int i;
 
        if (n[0] == n[1])
                return;
        for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
-               bool active = true;
-
-               for (j = page->active; j < c->num; j++) {
-                       /* Skip freed item */
-                       if (get_free_obj(page, j) == i) {
-                               active = false;
-                               break;
-                       }
-               }
-               if (!active)
+               if (get_obj_status(page, i) != OBJECT_ACTIVE)
                        continue;
 
                if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
index 735e01a..d31c4ba 100644 (file)
@@ -55,7 +55,7 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
                        continue;
                }
 
-#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
+#if !defined(CONFIG_SLUB)
                if (!strcmp(s->name, name)) {
                        pr_err("%s (%s): Cache name already exists.\n",
                               __func__, name);
index b2b0473..7300480 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1881,7 +1881,7 @@ redo:
 
        new.frozen = 0;
 
-       if (!new.inuse && n->nr_partial > s->min_partial)
+       if (!new.inuse && n->nr_partial >= s->min_partial)
                m = M_FREE;
        else if (new.freelist) {
                m = M_PARTIAL;
@@ -1992,7 +1992,7 @@ static void unfreeze_partials(struct kmem_cache *s,
                                new.freelist, new.counters,
                                "unfreezing slab"));
 
-               if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) {
+               if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
                        page->next = discard_page;
                        discard_page = page;
                } else {
@@ -2620,7 +2620,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                 return;
         }
 
-       if (unlikely(!new.inuse && n->nr_partial > s->min_partial))
+       if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
                goto slab_empty;
 
        /*
index 6a78c81..eda2473 100644 (file)
@@ -355,14 +355,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
        for ( ; ; ) {
                cond_resched();
                if (!pagevec_lookup_entries(&pvec, mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE),
-                       indices)) {
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
+                       /* If all gone from start onwards, we're done */
                        if (index == start)
                                break;
+                       /* Otherwise restart to make sure all gone */
                        index = start;
                        continue;
                }
                if (index == start && indices[0] >= end) {
+                       /* All gone out of hole to be punched, we're done */
                        pagevec_remove_exceptionals(&pvec);
                        pagevec_release(&pvec);
                        break;
@@ -373,8 +375,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
 
                        /* We rely upon deletion not changing page->index */
                        index = indices[i];
-                       if (index >= end)
+                       if (index >= end) {
+                               /* Restart punch to make sure all gone */
+                               index = start - 1;
                                break;
+                       }
 
                        if (radix_tree_exceptional_entry(page)) {
                                clear_exceptional_entry(mapping, index, page);
index 9012b1c..75d4277 100644 (file)
@@ -114,8 +114,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_proto);
 
 static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
 {
-       if (skb_cow(skb, skb_headroom(skb)) < 0)
+       if (skb_cow(skb, skb_headroom(skb)) < 0) {
+               kfree_skb(skb);
                return NULL;
+       }
+
        memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
        skb->mac_header += VLAN_HLEN;
        return skb;
index ad2ac3c..dd11f61 100644 (file)
@@ -627,8 +627,6 @@ static void vlan_dev_uninit(struct net_device *dev)
        struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        int i;
 
-       free_percpu(vlan->vlan_pcpu_stats);
-       vlan->vlan_pcpu_stats = NULL;
        for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
                while ((pm = vlan->egress_priority_map[i]) != NULL) {
                        vlan->egress_priority_map[i] = pm->next;
@@ -785,6 +783,15 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
 };
 
+static void vlan_dev_free(struct net_device *dev)
+{
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+       free_percpu(vlan->vlan_pcpu_stats);
+       vlan->vlan_pcpu_stats = NULL;
+       free_netdev(dev);
+}
+
 void vlan_setup(struct net_device *dev)
 {
        ether_setup(dev);
@@ -794,7 +801,7 @@ void vlan_setup(struct net_device *dev)
        dev->tx_queue_len       = 0;
 
        dev->netdev_ops         = &vlan_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->destructor         = vlan_dev_free;
        dev->ethtool_ops        = &vlan_ethtool_ops;
 
        memset(dev->broadcast, 0, ETH_ALEN);
index 01a1082..bfcf6be 100644 (file)
@@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
                goto drop;
 
        /* Queue packet (standard) */
-       skb->sk = sock;
-
        if (sock_queue_rcv_skb(sock, skb) < 0)
                goto drop;
 
@@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
        if (!skb)
                goto out;
 
-       skb->sk = sk;
        skb_reserve(skb, ddp_dl->header_length);
        skb_reserve(skb, dev->hard_header_len);
        skb->dev = dev;
index 6f0d9ec..a957c81 100644 (file)
@@ -800,11 +800,6 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
        bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
        bla_dst_own = &bat_priv->bla.claim_dest;
 
-       /* check if it is a claim packet in general */
-       if (memcmp(bla_dst->magic, bla_dst_own->magic,
-                  sizeof(bla_dst->magic)) != 0)
-               return 0;
-
        /* if announcement packet, use the source,
         * otherwise assume it is in the hw_src
         */
@@ -866,12 +861,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
                                    struct batadv_hard_iface *primary_if,
                                    struct sk_buff *skb)
 {
-       struct batadv_bla_claim_dst *bla_dst;
+       struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
        uint8_t *hw_src, *hw_dst;
-       struct vlan_ethhdr *vhdr;
+       struct vlan_hdr *vhdr, vhdr_buf;
        struct ethhdr *ethhdr;
        struct arphdr *arphdr;
        unsigned short vid;
+       int vlan_depth = 0;
        __be16 proto;
        int headlen;
        int ret;
@@ -882,9 +878,24 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        proto = ethhdr->h_proto;
        headlen = ETH_HLEN;
        if (vid & BATADV_VLAN_HAS_TAG) {
-               vhdr = vlan_eth_hdr(skb);
-               proto = vhdr->h_vlan_encapsulated_proto;
-               headlen += VLAN_HLEN;
+               /* Traverse the VLAN/Ethertypes.
+                *
+                * At this point it is known that the first protocol is a VLAN
+                * header, so start checking at the encapsulated protocol.
+                *
+                * The depth of the VLAN headers is recorded to drop BLA claim
+                * frames encapsulated into multiple VLAN headers (QinQ).
+                */
+               do {
+                       vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
+                                                 &vhdr_buf);
+                       if (!vhdr)
+                               return 0;
+
+                       proto = vhdr->h_vlan_encapsulated_proto;
+                       headlen += VLAN_HLEN;
+                       vlan_depth++;
+               } while (proto == htons(ETH_P_8021Q));
        }
 
        if (proto != htons(ETH_P_ARP))
@@ -914,6 +925,19 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
        hw_dst = hw_src + ETH_ALEN + 4;
        bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
+       bla_dst_own = &bat_priv->bla.claim_dest;
+
+       /* check if it is a claim frame in general */
+       if (memcmp(bla_dst->magic, bla_dst_own->magic,
+                  sizeof(bla_dst->magic)) != 0)
+               return 0;
+
+       /* check if there is a claim frame encapsulated deeper in (QinQ) and
+        * drop that, as this is not supported by BLA but should also not be
+        * sent via the mesh.
+        */
+       if (vlan_depth > 1)
+               return 1;
 
        /* check if it is a claim frame. */
        ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
index e7ee65d..cbd677f 100644 (file)
@@ -448,10 +448,15 @@ out:
  *  possibly free it
  * @softif_vlan: the vlan object to release
  */
-void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan)
+void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
 {
-       if (atomic_dec_and_test(&softif_vlan->refcount))
-               kfree_rcu(softif_vlan, rcu);
+       if (atomic_dec_and_test(&vlan->refcount)) {
+               spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+               hlist_del_rcu(&vlan->list);
+               spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+
+               kfree_rcu(vlan, rcu);
+       }
 }
 
 /**
@@ -505,6 +510,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
        if (!vlan)
                return -ENOMEM;
 
+       vlan->bat_priv = bat_priv;
        vlan->vid = vid;
        atomic_set(&vlan->refcount, 1);
 
@@ -516,6 +522,10 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
                return err;
        }
 
+       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+       hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
        /* add a new TT local entry. This one will be marked with the NOPURGE
         * flag
         */
@@ -523,10 +533,6 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
                            bat_priv->soft_iface->dev_addr, vid,
                            BATADV_NULL_IFINDEX, BATADV_NO_MARK);
 
-       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
-       hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
-       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
        return 0;
 }
 
@@ -538,18 +544,13 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
 static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
                                       struct batadv_softif_vlan *vlan)
 {
-       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
-       hlist_del_rcu(&vlan->list);
-       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
-       batadv_sysfs_del_vlan(bat_priv, vlan);
-
        /* explicitly remove the associated TT local entry because it is marked
         * with the NOPURGE flag
         */
        batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
                               vlan->vid, "vlan interface destroyed", false);
 
+       batadv_sysfs_del_vlan(bat_priv, vlan);
        batadv_softif_vlan_free_ref(vlan);
 }
 
@@ -567,6 +568,8 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
                                    unsigned short vid)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
+       struct batadv_softif_vlan *vlan;
+       int ret;
 
        /* only 802.1Q vlans are supported.
         * batman-adv does not know how to handle other types
@@ -576,7 +579,36 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
 
        vid |= BATADV_VLAN_HAS_TAG;
 
-       return batadv_softif_create_vlan(bat_priv, vid);
+       /* if a new vlan is getting created and it already exists, it means that
+        * it was not deleted yet. batadv_softif_vlan_get() increases the
+        * refcount in order to revive the object.
+        *
+        * if it does not exist then create it.
+        */
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (!vlan)
+               return batadv_softif_create_vlan(bat_priv, vid);
+
+       /* recreate the sysfs object if it was already destroyed (and it should
+        * be since we received a kill_vid() for this vlan
+        */
+       if (!vlan->kobj) {
+               ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
+               if (ret) {
+                       batadv_softif_vlan_free_ref(vlan);
+                       return ret;
+               }
+       }
+
+       /* add a new TT local entry. This one will be marked with the NOPURGE
+        * flag. This must be added again, even if the vlan object already
+        * exists, because the entry was deleted by kill_vid()
+        */
+       batadv_tt_local_add(bat_priv->soft_iface,
+                           bat_priv->soft_iface->dev_addr, vid,
+                           BATADV_NULL_IFINDEX, BATADV_NO_MARK);
+
+       return 0;
 }
 
 /**
index d636bde..5f59e7f 100644 (file)
@@ -511,6 +511,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct batadv_tt_local_entry *tt_local;
        struct batadv_tt_global_entry *tt_global = NULL;
+       struct batadv_softif_vlan *vlan;
        struct net_device *in_dev = NULL;
        struct hlist_head *head;
        struct batadv_tt_orig_list_entry *orig_entry;
@@ -572,6 +573,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        if (!tt_local)
                goto out;
 
+       /* increase the refcounter of the related vlan */
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
                   addr, BATADV_PRINT_VID(vid),
@@ -604,6 +608,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        if (unlikely(hash_added != 0)) {
                /* remove the reference for the hash */
                batadv_tt_local_entry_free_ref(tt_local);
+               batadv_softif_vlan_free_ref(vlan);
                goto out;
        }
 
@@ -1009,6 +1014,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
 {
        struct batadv_tt_local_entry *tt_local_entry;
        uint16_t flags, curr_flags = BATADV_NO_FLAGS;
+       struct batadv_softif_vlan *vlan;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
@@ -1039,6 +1045,11 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
        hlist_del_rcu(&tt_local_entry->common.hash_entry);
        batadv_tt_local_entry_free_ref(tt_local_entry);
 
+       /* decrease the reference held for this vlan */
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       batadv_softif_vlan_free_ref(vlan);
+       batadv_softif_vlan_free_ref(vlan);
+
 out:
        if (tt_local_entry)
                batadv_tt_local_entry_free_ref(tt_local_entry);
@@ -1111,6 +1122,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
        spinlock_t *list_lock; /* protects write access to the hash lists */
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
+       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        uint32_t i;
@@ -1131,6 +1143,13 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
                        tt_local = container_of(tt_common_entry,
                                                struct batadv_tt_local_entry,
                                                common);
+
+                       /* decrease the reference held for this vlan */
+                       vlan = batadv_softif_vlan_get(bat_priv,
+                                                     tt_common_entry->vid);
+                       batadv_softif_vlan_free_ref(vlan);
+                       batadv_softif_vlan_free_ref(vlan);
+
                        batadv_tt_local_entry_free_ref(tt_local);
                }
                spin_unlock_bh(list_lock);
@@ -3139,6 +3158,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_local_entry *tt_local;
+       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3167,6 +3187,12 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                        tt_local = container_of(tt_common,
                                                struct batadv_tt_local_entry,
                                                common);
+
+                       /* decrease the reference held for this vlan */
+                       vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
+                       batadv_softif_vlan_free_ref(vlan);
+                       batadv_softif_vlan_free_ref(vlan);
+
                        batadv_tt_local_entry_free_ref(tt_local);
                }
                spin_unlock_bh(list_lock);
index 34891a5..8854c05 100644 (file)
@@ -687,6 +687,7 @@ struct batadv_priv_nc {
 
 /**
  * struct batadv_softif_vlan - per VLAN attributes set
+ * @bat_priv: pointer to the mesh object
  * @vid: VLAN identifier
  * @kobj: kobject for sysfs vlan subdirectory
  * @ap_isolation: AP isolation state
@@ -696,6 +697,7 @@ struct batadv_priv_nc {
  * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_softif_vlan {
+       struct batadv_priv *bat_priv;
        unsigned short vid;
        struct kobject *kobj;
        atomic_t ap_isolation;          /* boolean */
index 8671bc7..a7a27bc 100644 (file)
@@ -289,10 +289,20 @@ static void hci_conn_timeout(struct work_struct *work)
 {
        struct hci_conn *conn = container_of(work, struct hci_conn,
                                             disc_work.work);
+       int refcnt = atomic_read(&conn->refcnt);
 
        BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
 
-       if (atomic_read(&conn->refcnt))
+       WARN_ON(refcnt < 0);
+
+       /* FIXME: It was observed that in pairing failed scenario, refcnt
+        * drops below 0. Probably this is because l2cap_conn_del calls
+        * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
+        * dropped. After that loop hci_chan_del is called which also drops
+        * conn. For now make sure that ACL is alive if refcnt is higher then 0,
+        * otherwise drop it.
+        */
+       if (refcnt > 0)
                return;
 
        switch (conn->state) {
@@ -610,11 +620,6 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
        if (hci_update_random_address(req, false, &own_addr_type))
                return;
 
-       /* Save the address type used for this connnection attempt so we able
-        * to retrieve this information if we need it.
-        */
-       conn->src_type = own_addr_type;
-
        cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
        cp.scan_window = cpu_to_le16(hdev->le_scan_window);
        bacpy(&cp.peer_addr, &conn->dst);
@@ -894,7 +899,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
                /* If we're already encrypted set the REAUTH_PEND flag,
                 * otherwise set the ENCRYPT_PEND.
                 */
-               if (conn->key_type != 0xff)
+               if (conn->link_mode & HCI_LM_ENCRYPT)
                        set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
                else
                        set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
index 0a43cce..e090bff 100644 (file)
@@ -2186,12 +2186,6 @@ static void hci_inq_req(struct hci_request *req, unsigned long opt)
        hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
 }
 
-static int wait_inquiry(void *word)
-{
-       schedule();
-       return signal_pending(current);
-}
-
 int hci_inquiry(void __user *arg)
 {
        __u8 __user *ptr = arg;
@@ -2242,7 +2236,7 @@ int hci_inquiry(void __user *arg)
                /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
                 * cleared). If it is interrupted by a signal, return -EINTR.
                 */
-               if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
+               if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
                                TASK_INTERRUPTIBLE))
                        return -EINTR;
        }
index 21e5913..640c54e 100644 (file)
@@ -48,6 +48,10 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
        smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
        wake_up_bit(&hdev->flags, HCI_INQUIRY);
 
+       hci_dev_lock(hdev);
+       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+       hci_dev_unlock(hdev);
+
        hci_conn_check_pending(hdev);
 }
 
@@ -3537,7 +3541,11 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
                        cp.authentication = conn->auth_type;
 
                        /* Request MITM protection if our IO caps allow it
-                        * except for the no-bonding case
+                        * except for the no-bonding case.
+                        * conn->auth_type is not updated here since
+                        * that might cause the user confirmation to be
+                        * rejected in case the remote doesn't have the
+                        * IO capabilities for MITM.
                         */
                        if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
                            cp.authentication != HCI_AT_NO_BONDING)
@@ -3628,8 +3636,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
 
                /* If we're not the initiators request authorization to
                 * proceed from user space (mgmt_user_confirm with
-                * confirm_hint set to 1). */
-               if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
+                * confirm_hint set to 1). The exception is if neither
+                * side had MITM in which case we do auto-accept.
+                */
+               if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
+                   (loc_mitm || rem_mitm)) {
                        BT_DBG("Confirming auto-accept as acceptor");
                        confirm_hint = 1;
                        goto confirm;
index 6eabbe0..323f23c 100644 (file)
@@ -1663,7 +1663,13 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
        kfree_skb(conn->rx_skb);
 
        skb_queue_purge(&conn->pending_rx);
-       flush_work(&conn->pending_rx_work);
+
+       /* We can not call flush_work(&conn->pending_rx_work) here since we
+        * might block if we are running on a worker from the same workqueue
+        * pending_rx_work is waiting on.
+        */
+       if (work_pending(&conn->pending_rx_work))
+               cancel_work_sync(&conn->pending_rx_work);
 
        l2cap_unregister_all_users(conn);
 
index ade3fb4..e137869 100644 (file)
@@ -787,11 +787,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
 
                /*change security for LE channels */
                if (chan->scid == L2CAP_CID_ATT) {
-                       if (!conn->hcon->out) {
-                               err = -EINVAL;
-                               break;
-                       }
-
                        if (smp_conn_security(conn->hcon, sec.level))
                                break;
                        sk->sk_state = BT_CONFIG;
index 0fce544..af8e0a6 100644 (file)
@@ -1047,6 +1047,43 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
        }
 }
 
+static void hci_stop_discovery(struct hci_request *req)
+{
+       struct hci_dev *hdev = req->hdev;
+       struct hci_cp_remote_name_req_cancel cp;
+       struct inquiry_entry *e;
+
+       switch (hdev->discovery.state) {
+       case DISCOVERY_FINDING:
+               if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+                       hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+               } else {
+                       cancel_delayed_work(&hdev->le_scan_disable);
+                       hci_req_add_le_scan_disable(req);
+               }
+
+               break;
+
+       case DISCOVERY_RESOLVING:
+               e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
+                                                    NAME_PENDING);
+               if (!e)
+                       return;
+
+               bacpy(&cp.bdaddr, &e->data.bdaddr);
+               hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
+                           &cp);
+
+               break;
+
+       default:
+               /* Passive scanning */
+               if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
+                       hci_req_add_le_scan_disable(req);
+               break;
+       }
+}
+
 static int clean_up_hci_state(struct hci_dev *hdev)
 {
        struct hci_request req;
@@ -1063,9 +1100,7 @@ static int clean_up_hci_state(struct hci_dev *hdev)
        if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
                disable_advertising(&req);
 
-       if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
-               hci_req_add_le_scan_disable(&req);
-       }
+       hci_stop_discovery(&req);
 
        list_for_each_entry(conn, &hdev->conn_hash.list, list) {
                struct hci_cp_disconnect dc;
@@ -2996,8 +3031,13 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
        }
 
        if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
-               /* Continue with pairing via SMP */
+               /* Continue with pairing via SMP. The hdev lock must be
+                * released as SMP may try to recquire it for crypto
+                * purposes.
+                */
+               hci_dev_unlock(hdev);
                err = smp_user_confirm_reply(conn, mgmt_op, passkey);
+               hci_dev_lock(hdev);
 
                if (!err)
                        err = cmd_complete(sk, hdev->id, mgmt_op,
@@ -3574,8 +3614,6 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
 {
        struct mgmt_cp_stop_discovery *mgmt_cp = data;
        struct pending_cmd *cmd;
-       struct hci_cp_remote_name_req_cancel cp;
-       struct inquiry_entry *e;
        struct hci_request req;
        int err;
 
@@ -3605,52 +3643,22 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
 
        hci_req_init(&req, hdev);
 
-       switch (hdev->discovery.state) {
-       case DISCOVERY_FINDING:
-               if (test_bit(HCI_INQUIRY, &hdev->flags)) {
-                       hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
-               } else {
-                       cancel_delayed_work(&hdev->le_scan_disable);
-
-                       hci_req_add_le_scan_disable(&req);
-               }
-
-               break;
+       hci_stop_discovery(&req);
 
-       case DISCOVERY_RESOLVING:
-               e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
-                                                    NAME_PENDING);
-               if (!e) {
-                       mgmt_pending_remove(cmd);
-                       err = cmd_complete(sk, hdev->id,
-                                          MGMT_OP_STOP_DISCOVERY, 0,
-                                          &mgmt_cp->type,
-                                          sizeof(mgmt_cp->type));
-                       hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
-                       goto unlock;
-               }
-
-               bacpy(&cp.bdaddr, &e->data.bdaddr);
-               hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
-                           &cp);
-
-               break;
-
-       default:
-               BT_DBG("unknown discovery state %u", hdev->discovery.state);
-
-               mgmt_pending_remove(cmd);
-               err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
-                                  MGMT_STATUS_FAILED, &mgmt_cp->type,
-                                  sizeof(mgmt_cp->type));
+       err = hci_req_run(&req, stop_discovery_complete);
+       if (!err) {
+               hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
                goto unlock;
        }
 
-       err = hci_req_run(&req, stop_discovery_complete);
-       if (err < 0)
-               mgmt_pending_remove(cmd);
-       else
-               hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
+       mgmt_pending_remove(cmd);
+
+       /* If no HCI commands were sent we're done */
+       if (err == -ENODATA) {
+               err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
+                                  &mgmt_cp->type, sizeof(mgmt_cp->type));
+               hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+       }
 
 unlock:
        hci_dev_unlock(hdev);
index 3d1cc16..e33a982 100644 (file)
@@ -385,6 +385,16 @@ static const u8 gen_method[5][5] = {
        { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP     },
 };
 
+static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
+{
+       /* If either side has unknown io_caps, use JUST WORKS */
+       if (local_io > SMP_IO_KEYBOARD_DISPLAY ||
+           remote_io > SMP_IO_KEYBOARD_DISPLAY)
+               return JUST_WORKS;
+
+       return gen_method[remote_io][local_io];
+}
+
 static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
                                                u8 local_io, u8 remote_io)
 {
@@ -401,14 +411,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
        BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
 
        /* If neither side wants MITM, use JUST WORKS */
-       /* If either side has unknown io_caps, use JUST WORKS */
        /* Otherwise, look up method from the table */
-       if (!(auth & SMP_AUTH_MITM) ||
-           local_io > SMP_IO_KEYBOARD_DISPLAY ||
-           remote_io > SMP_IO_KEYBOARD_DISPLAY)
+       if (!(auth & SMP_AUTH_MITM))
                method = JUST_WORKS;
        else
-               method = gen_method[remote_io][local_io];
+               method = get_auth_method(smp, local_io, remote_io);
 
        /* If not bonding, don't ask user to confirm a Zero TK */
        if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
@@ -544,7 +551,7 @@ static u8 smp_random(struct smp_chan *smp)
                hci_le_start_enc(hcon, ediv, rand, stk);
                hcon->enc_key_size = smp->enc_key_size;
        } else {
-               u8 stk[16];
+               u8 stk[16], auth;
                __le64 rand = 0;
                __le16 ediv = 0;
 
@@ -556,8 +563,13 @@ static u8 smp_random(struct smp_chan *smp)
                memset(stk + smp->enc_key_size, 0,
                       SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
 
+               if (hcon->pending_sec_level == BT_SECURITY_HIGH)
+                       auth = 1;
+               else
+                       auth = 0;
+
                hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
-                           HCI_SMP_STK_SLAVE, 0, stk, smp->enc_key_size,
+                           HCI_SMP_STK_SLAVE, auth, stk, smp->enc_key_size,
                            ediv, rand);
        }
 
@@ -664,7 +676,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
 {
        struct smp_cmd_pairing rsp, *req = (void *) skb->data;
        struct smp_chan *smp;
-       u8 key_size, auth;
+       u8 key_size, auth, sec_level;
        int ret;
 
        BT_DBG("conn %p", conn);
@@ -690,7 +702,19 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
        /* We didn't start the pairing, so match remote */
        auth = req->auth_req;
 
-       conn->hcon->pending_sec_level = authreq_to_seclevel(auth);
+       sec_level = authreq_to_seclevel(auth);
+       if (sec_level > conn->hcon->pending_sec_level)
+               conn->hcon->pending_sec_level = sec_level;
+
+       /* If we need MITM check that it can be acheived */
+       if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
+               u8 method;
+
+               method = get_auth_method(smp, conn->hcon->io_capability,
+                                        req->io_capability);
+               if (method == JUST_WORKS || method == JUST_CFM)
+                       return SMP_AUTH_REQUIREMENTS;
+       }
 
        build_pairing_cmd(conn, req, &rsp, auth);
 
@@ -738,6 +762,16 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
        if (check_enc_key_size(conn, key_size))
                return SMP_ENC_KEY_SIZE;
 
+       /* If we need MITM check that it can be acheived */
+       if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
+               u8 method;
+
+               method = get_auth_method(smp, req->io_capability,
+                                        rsp->io_capability);
+               if (method == JUST_WORKS || method == JUST_CFM)
+                       return SMP_AUTH_REQUIREMENTS;
+       }
+
        get_random_bytes(smp->prnd, sizeof(smp->prnd));
 
        smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -833,6 +867,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
        struct smp_cmd_pairing cp;
        struct hci_conn *hcon = conn->hcon;
        struct smp_chan *smp;
+       u8 sec_level;
 
        BT_DBG("conn %p", conn);
 
@@ -842,7 +877,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
        if (!(conn->hcon->link_mode & HCI_LM_MASTER))
                return SMP_CMD_NOTSUPP;
 
-       hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req);
+       sec_level = authreq_to_seclevel(rp->auth_req);
+       if (sec_level > hcon->pending_sec_level)
+               hcon->pending_sec_level = sec_level;
 
        if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
                return 0;
@@ -896,9 +933,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
        if (smp_sufficient_security(hcon, sec_level))
                return 1;
 
+       if (sec_level > hcon->pending_sec_level)
+               hcon->pending_sec_level = sec_level;
+
        if (hcon->link_mode & HCI_LM_MASTER)
-               if (smp_ltk_encrypt(conn, sec_level))
-                       goto done;
+               if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
+                       return 0;
 
        if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
                return 0;
@@ -913,7 +953,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
         * requires it.
         */
        if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
-           sec_level > BT_SECURITY_MEDIUM)
+           hcon->pending_sec_level > BT_SECURITY_MEDIUM)
                authreq |= SMP_AUTH_MITM;
 
        if (hcon->link_mode & HCI_LM_MASTER) {
@@ -932,9 +972,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
 
        set_bit(SMP_FLAG_INITIATOR, &smp->flags);
 
-done:
-       hcon->pending_sec_level = sec_level;
-
        return 0;
 }
 
index 9a76eaf..bc8aeef 100644 (file)
@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
 {
        int tot_len;
 
-       if (kern_msg->msg_namelen) {
+       if (kern_msg->msg_name && kern_msg->msg_namelen) {
                if (mode == VERIFY_READ) {
                        int err = move_addr_to_kernel(kern_msg->msg_name,
                                                      kern_msg->msg_namelen,
@@ -93,10 +93,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
                        if (err < 0)
                                return err;
                }
-               if (kern_msg->msg_name)
-                       kern_msg->msg_name = kern_address;
-       } else
+               kern_msg->msg_name = kern_address;
+       } else {
                kern_msg->msg_name = NULL;
+               kern_msg->msg_namelen = 0;
+       }
 
        tot_len = iov_from_user_compat_to_kern(kern_iov,
                                          (struct compat_iovec __user *)kern_msg->msg_iov,
index 30eedf6..367a586 100644 (file)
@@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly;   /* Taps */
 static struct list_head offload_base __read_mostly;
 
 static int netif_rx_internal(struct sk_buff *skb);
+static int call_netdevice_notifiers_info(unsigned long val,
+                                        struct net_device *dev,
+                                        struct netdev_notifier_info *info);
 
 /*
  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -1207,7 +1210,11 @@ EXPORT_SYMBOL(netdev_features_change);
 void netdev_state_change(struct net_device *dev)
 {
        if (dev->flags & IFF_UP) {
-               call_netdevice_notifiers(NETDEV_CHANGE, dev);
+               struct netdev_notifier_change_info change_info;
+
+               change_info.flags_changed = 0;
+               call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
+                                             &change_info.info);
                rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
        }
 }
@@ -4089,6 +4096,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
        skb->vlan_tci = 0;
        skb->dev = napi->dev;
        skb->skb_iif = 0;
+       skb->encapsulation = 0;
+       skb_shinfo(skb)->gso_type = 0;
        skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
 
        napi->skb = skb;
@@ -4227,9 +4236,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
 #endif
        napi->weight = weight_p;
        local_irq_disable();
-       while (work < quota) {
+       while (1) {
                struct sk_buff *skb;
-               unsigned int qlen;
 
                while ((skb = __skb_dequeue(&sd->process_queue))) {
                        local_irq_enable();
@@ -4243,24 +4251,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
                }
 
                rps_lock(sd);
-               qlen = skb_queue_len(&sd->input_pkt_queue);
-               if (qlen)
-                       skb_queue_splice_tail_init(&sd->input_pkt_queue,
-                                                  &sd->process_queue);
-
-               if (qlen < quota - work) {
+               if (skb_queue_empty(&sd->input_pkt_queue)) {
                        /*
                         * Inline a custom version of __napi_complete().
                         * only current cpu owns and manipulates this napi,
-                        * and NAPI_STATE_SCHED is the only possible flag set on backlog.
-                        * we can use a plain write instead of clear_bit(),
+                        * and NAPI_STATE_SCHED is the only possible flag set
+                        * on backlog.
+                        * We can use a plain write instead of clear_bit(),
                         * and we dont need an smp_mb() memory barrier.
                         */
                        list_del(&napi->poll_list);
                        napi->state = 0;
+                       rps_unlock(sd);
 
-                       quota = work + qlen;
+                       break;
                }
+
+               skb_queue_splice_tail_init(&sd->input_pkt_queue,
+                                          &sd->process_queue);
                rps_unlock(sd);
        }
        local_irq_enable();
index 80d6286..a028409 100644 (file)
@@ -269,6 +269,15 @@ again:
 }
 EXPORT_SYMBOL(dst_destroy);
 
+static void dst_destroy_rcu(struct rcu_head *head)
+{
+       struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
+
+       dst = dst_destroy(dst);
+       if (dst)
+               __dst_free(dst);
+}
+
 void dst_release(struct dst_entry *dst)
 {
        if (dst) {
@@ -276,11 +285,8 @@ void dst_release(struct dst_entry *dst)
 
                newrefcnt = atomic_dec_return(&dst->__refcnt);
                WARN_ON(newrefcnt < 0);
-               if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
-                       dst = dst_destroy(dst);
-                       if (dst)
-                               __dst_free(dst);
-               }
+               if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
+                       call_rcu(&dst->rcu_head, dst_destroy_rcu);
        }
 }
 EXPORT_SYMBOL(dst_release);
index 735fad8..1dbf646 100644 (file)
@@ -840,11 +840,11 @@ int sk_convert_filter(struct sock_filter *prog, int len,
        BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
        BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
 
-       if (len <= 0 || len >= BPF_MAXINSNS)
+       if (len <= 0 || len > BPF_MAXINSNS)
                return -EINVAL;
 
        if (new_prog) {
-               addrs = kzalloc(len * sizeof(*addrs), GFP_KERNEL);
+               addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
                if (!addrs)
                        return -ENOMEM;
        }
@@ -1101,7 +1101,7 @@ static int check_load_and_stores(struct sock_filter *filter, int flen)
 
        BUILD_BUG_ON(BPF_MEMWORDS > 16);
 
-       masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
+       masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
        if (!masks)
                return -ENOMEM;
 
@@ -1382,7 +1382,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
        fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
        if (fp_new) {
                *fp_new = *fp;
-               /* As we're kepping orig_prog in fp_new along,
+               /* As we're keeping orig_prog in fp_new along,
                 * we need to make sure we're not evicting it
                 * from the old fp.
                 */
@@ -1524,8 +1524,8 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
 
 /**
  *     sk_unattached_filter_create - create an unattached filter
- *     @fprog: the filter program
  *     @pfp: the unattached filter that is created
+ *     @fprog: the filter program
  *
  * Create a filter independent of any socket. We first run some
  * sanity checks on it to make sure it does not explode on us later.
index b618694..e1ec45a 100644 (file)
@@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
 {
        int size, ct, err;
 
-       if (m->msg_namelen) {
+       if (m->msg_name && m->msg_namelen) {
                if (mode == VERIFY_READ) {
                        void __user *namep;
                        namep = (void __user __force *) m->msg_name;
@@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
                        if (err < 0)
                                return err;
                }
-               if (m->msg_name)
-                       m->msg_name = address;
+               m->msg_name = address;
        } else {
                m->msg_name = NULL;
+               m->msg_namelen = 0;
        }
 
        size = m->msg_iovlen * sizeof(struct iovec);
@@ -74,61 +74,6 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
        return err;
 }
 
-/*
- *     Copy kernel to iovec. Returns -EFAULT on error.
- */
-
-int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
-                     int offset, int len)
-{
-       int copy;
-       for (; len > 0; ++iov) {
-               /* Skip over the finished iovecs */
-               if (unlikely(offset >= iov->iov_len)) {
-                       offset -= iov->iov_len;
-                       continue;
-               }
-               copy = min_t(unsigned int, iov->iov_len - offset, len);
-               if (copy_to_user(iov->iov_base + offset, kdata, copy))
-                       return -EFAULT;
-               offset = 0;
-               kdata += copy;
-               len -= copy;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(memcpy_toiovecend);
-
-/*
- *     Copy iovec to kernel. Returns -EFAULT on error.
- */
-
-int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
-                       int offset, int len)
-{
-       /* Skip over the finished iovecs */
-       while (offset >= iov->iov_len) {
-               offset -= iov->iov_len;
-               iov++;
-       }
-
-       while (len > 0) {
-               u8 __user *base = iov->iov_base + offset;
-               int copy = min_t(unsigned int, len, iov->iov_len - offset);
-
-               offset = 0;
-               if (copy_from_user(kdata, base, copy))
-                       return -EFAULT;
-               len -= copy;
-               kdata += copy;
-               iov++;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(memcpy_fromiovecend);
-
 /*
  *     And now for the all-in-one: copy and checksum from a user iovec
  *     directly to a datagram
index 32d872e..ef31fef 100644 (file)
@@ -2249,7 +2249,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
        ndm->ndm_pad1    = 0;
        ndm->ndm_pad2    = 0;
        ndm->ndm_flags   = pn->flags | NTF_PROXY;
-       ndm->ndm_type    = NDA_DST;
+       ndm->ndm_type    = RTN_UNICAST;
        ndm->ndm_ifindex = pn->dev->ifindex;
        ndm->ndm_state   = NUD_NONE;
 
@@ -3059,11 +3059,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
                memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
                       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
        } else {
+               struct neigh_table *tbl = p->tbl;
                dev_name_source = "default";
-               t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
-               t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
-               t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
-               t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
+               t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
+               t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
+               t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
+               t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
        }
 
        if (handler) {
index 30d903b..1f2a126 100644 (file)
@@ -107,5 +107,5 @@ struct cgroup_subsys net_cls_cgrp_subsys = {
        .css_online             = cgrp_css_online,
        .css_free               = cgrp_css_free,
        .attach                 = cgrp_attach,
-       .base_cftypes           = ss_files,
+       .legacy_cftypes         = ss_files,
 };
index 2f385b9..cbd0a19 100644 (file)
@@ -249,7 +249,7 @@ struct cgroup_subsys net_prio_cgrp_subsys = {
        .css_online     = cgrp_css_online,
        .css_free       = cgrp_css_free,
        .attach         = net_prio_attach,
-       .base_cftypes   = ss_files,
+       .legacy_cftypes = ss_files,
 };
 
 static int netprio_device_event(struct notifier_block *unused,
index 9cd5344..c1a3303 100644 (file)
@@ -2993,7 +2993,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
                                                            skb_put(nskb, len),
                                                            len, 0);
                        SKB_GSO_CB(nskb)->csum_start =
-                           skb_headroom(nskb) + offset;
+                           skb_headroom(nskb) + doffset;
                        continue;
                }
 
index 9acec61..dd8696a 100644 (file)
@@ -150,7 +150,7 @@ int dns_query(const char *type, const char *name, size_t namelen,
                goto put;
 
        memcpy(*_result, upayload->data, len);
-       *_result[len] = '\0';
+       (*_result)[len] = '\0';
 
        if (_expiry)
                *_expiry = rkey->expiry;
index d5e6836..d156b3c 100644 (file)
@@ -1429,6 +1429,9 @@ static int inet_gro_complete(struct sk_buff *skb, int nhoff)
        int proto = iph->protocol;
        int err = -ENOSYS;
 
+       if (skb->encapsulation)
+               skb_set_inner_network_header(skb, nhoff);
+
        csum_replace2(&iph->check, iph->tot_len, newlen);
        iph->tot_len = newlen;
 
index 4e9619b..0485bf7 100644 (file)
@@ -68,6 +68,7 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 
        skb_push(skb, hdr_len);
 
+       skb_reset_transport_header(skb);
        greh = (struct gre_base_hdr *)skb->data;
        greh->flags = tnl_flags_to_gre_flags(tpi->flags);
        greh->protocol = tpi->proto;
index eb92deb..f0bdd47 100644 (file)
@@ -263,6 +263,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
        int err = -ENOENT;
        __be16 type;
 
+       skb->encapsulation = 1;
+       skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
+
        type = greh->protocol;
        if (greh->flags & GRE_KEY)
                grehlen += GRE_HEADER_SECTION;
index 79c3d94..42b7bcf 100644 (file)
@@ -739,8 +739,6 @@ static void icmp_unreach(struct sk_buff *skb)
                                /* fall through */
                        case 0:
                                info = ntohs(icmph->un.frag.mtu);
-                               if (!info)
-                                       goto out;
                        }
                        break;
                case ICMP_SR_FAILED:
index 6748d42..db710b0 100644 (file)
@@ -1944,6 +1944,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 
        rtnl_lock();
        in_dev = ip_mc_find_dev(net, imr);
+       if (!in_dev) {
+               ret = -ENODEV;
+               goto out;
+       }
        ifindex = imr->imr_ifindex;
        for (imlp = &inet->mc_list;
             (iml = rtnl_dereference(*imlp)) != NULL;
@@ -1961,16 +1965,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 
                *imlp = iml->next_rcu;
 
-               if (in_dev)
-                       ip_mc_dec_group(in_dev, group);
+               ip_mc_dec_group(in_dev, group);
                rtnl_unlock();
                /* decrease mem now to avoid the memleak warning */
                atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
                kfree_rcu(iml, rcu);
                return 0;
        }
-       if (!in_dev)
-               ret = -ENODEV;
+out:
        rtnl_unlock();
        return ret;
 }
index 5e7aece..ad38249 100644 (file)
@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
                        optptr++;
                        continue;
                }
+               if (unlikely(l < 2)) {
+                       pp_ptr = optptr;
+                       goto error;
+               }
                optlen = optptr[1];
                if (optlen < 2 || optlen > l) {
                        pp_ptr = optptr;
index 097b3e7..6f9de61 100644 (file)
@@ -73,12 +73,7 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
 {
        struct dst_entry *old_dst;
 
-       if (dst) {
-               if (dst->flags & DST_NOCACHE)
-                       dst = NULL;
-               else
-                       dst_clone(dst);
-       }
+       dst_clone(dst);
        old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
        dst_release(old_dst);
 }
@@ -108,13 +103,14 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
 
        rcu_read_lock();
        dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
+       if (dst && !atomic_inc_not_zero(&dst->__refcnt))
+               dst = NULL;
        if (dst) {
                if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
-                       rcu_read_unlock();
                        tunnel_dst_reset(t);
-                       return NULL;
+                       dst_release(dst);
+                       dst = NULL;
                }
-               dst_hold(dst);
        }
        rcu_read_unlock();
        return (struct rtable *)dst;
@@ -173,6 +169,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
 
        hlist_for_each_entry_rcu(t, head, hash_node) {
                if (remote != t->parms.iph.daddr ||
+                   t->parms.iph.saddr != 0 ||
                    !(t->dev->flags & IFF_UP))
                        continue;
 
@@ -189,10 +186,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
        head = &itn->tunnels[hash];
 
        hlist_for_each_entry_rcu(t, head, hash_node) {
-               if ((local != t->parms.iph.saddr &&
-                    (local != t->parms.iph.daddr ||
-                     !ipv4_is_multicast(local))) ||
-                   !(t->dev->flags & IFF_UP))
+               if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
+                   (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
+                       continue;
+
+               if (!(t->dev->flags & IFF_UP))
                        continue;
 
                if (!ip_tunnel_key_match(&t->parms, flags, key))
@@ -209,6 +207,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
 
        hlist_for_each_entry_rcu(t, head, hash_node) {
                if (t->parms.i_key != key ||
+                   t->parms.iph.saddr != 0 ||
+                   t->parms.iph.daddr != 0 ||
                    !(t->dev->flags & IFF_UP))
                        continue;
 
index 082239f..1901998 100644 (file)
@@ -457,8 +457,31 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
        return neigh_create(&arp_tbl, pkey, dev);
 }
 
-atomic_t *ip_idents __read_mostly;
-EXPORT_SYMBOL(ip_idents);
+#define IP_IDENTS_SZ 2048u
+struct ip_ident_bucket {
+       atomic_t        id;
+       u32             stamp32;
+};
+
+static struct ip_ident_bucket *ip_idents __read_mostly;
+
+/* In order to protect privacy, we add a perturbation to identifiers
+ * if one generator is seldom used. This makes hard for an attacker
+ * to infer how many packets were sent between two points in time.
+ */
+u32 ip_idents_reserve(u32 hash, int segs)
+{
+       struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
+       u32 old = ACCESS_ONCE(bucket->stamp32);
+       u32 now = (u32)jiffies;
+       u32 delta = 0;
+
+       if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
+               delta = prandom_u32_max(now - old);
+
+       return atomic_add_return(segs + delta, &bucket->id) - segs;
+}
+EXPORT_SYMBOL(ip_idents_reserve);
 
 void __ip_select_ident(struct iphdr *iph, int segs)
 {
@@ -467,7 +490,10 @@ void __ip_select_ident(struct iphdr *iph, int segs)
 
        net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
 
-       hash = jhash_1word((__force u32)iph->daddr, ip_idents_hashrnd);
+       hash = jhash_3words((__force u32)iph->daddr,
+                           (__force u32)iph->saddr,
+                           iph->protocol,
+                           ip_idents_hashrnd);
        id = ip_idents_reserve(hash, segs);
        iph->id = htons(id);
 }
@@ -1010,7 +1036,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        const struct iphdr *iph = (const struct iphdr *) skb->data;
        struct flowi4 fl4;
        struct rtable *rt;
-       struct dst_entry *dst;
+       struct dst_entry *odst = NULL;
        bool new = false;
 
        bh_lock_sock(sk);
@@ -1018,16 +1044,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        if (!ip_sk_accept_pmtu(sk))
                goto out;
 
-       rt = (struct rtable *) __sk_dst_get(sk);
+       odst = sk_dst_get(sk);
 
-       if (sock_owned_by_user(sk) || !rt) {
+       if (sock_owned_by_user(sk) || !odst) {
                __ipv4_sk_update_pmtu(skb, sk, mtu);
                goto out;
        }
 
        __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
 
-       if (!__sk_dst_check(sk, 0)) {
+       rt = (struct rtable *)odst;
+       if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
                rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
                if (IS_ERR(rt))
                        goto out;
@@ -1037,8 +1064,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
 
        __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
 
-       dst = dst_check(&rt->dst, 0);
-       if (!dst) {
+       if (!dst_check(&rt->dst, 0)) {
                if (new)
                        dst_release(&rt->dst);
 
@@ -1050,10 +1076,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        }
 
        if (new)
-               __sk_dst_set(sk, &rt->dst);
+               sk_dst_set(sk, &rt->dst);
 
 out:
        bh_unlock_sock(sk);
+       dst_release(odst);
 }
 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
 
index eb1dde3..9d2118e 100644 (file)
@@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (unlikely(tp->repair)) {
                if (tp->repair_queue == TCP_RECV_QUEUE) {
                        copied = tcp_send_rcvq(sk, msg, size);
-                       goto out;
+                       goto out_nopush;
                }
 
                err = -EINVAL;
@@ -1282,6 +1282,7 @@ wait_for_memory:
 out:
        if (copied)
                tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
+out_nopush:
        release_sock(sk);
        return copied + copied_syn;
 
index 62e48cf..9771563 100644 (file)
@@ -131,7 +131,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
                                      struct dst_entry *dst,
                                      struct request_sock *req)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       struct tcp_sock *tp;
        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
        struct sock *child;
 
index 40661fc..40639c2 100644 (file)
@@ -1106,7 +1106,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
        }
 
        /* D-SACK for already forgotten data... Do dumb counting. */
-       if (dup_sack && tp->undo_marker && tp->undo_retrans &&
+       if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
            !after(end_seq_0, prior_snd_una) &&
            after(end_seq_0, tp->undo_marker))
                tp->undo_retrans--;
@@ -1162,7 +1162,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
                        unsigned int new_len = (pkt_len / mss) * mss;
                        if (!in_sack && new_len < pkt_len) {
                                new_len += mss;
-                               if (new_len > skb->len)
+                               if (new_len >= skb->len)
                                        return 0;
                        }
                        pkt_len = new_len;
@@ -1187,7 +1187,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
 
        /* Account D-SACK for retransmitted packet. */
        if (dup_sack && (sacked & TCPCB_RETRANS)) {
-               if (tp->undo_marker && tp->undo_retrans &&
+               if (tp->undo_marker && tp->undo_retrans > 0 &&
                    after(end_seq, tp->undo_marker))
                        tp->undo_retrans--;
                if (sacked & TCPCB_SACKED_ACKED)
@@ -1893,7 +1893,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
        tp->lost_out = 0;
 
        tp->undo_marker = 0;
-       tp->undo_retrans = 0;
+       tp->undo_retrans = -1;
 }
 
 void tcp_clear_retrans(struct tcp_sock *tp)
@@ -2665,7 +2665,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
 
        tp->prior_ssthresh = 0;
        tp->undo_marker = tp->snd_una;
-       tp->undo_retrans = tp->retrans_out;
+       tp->undo_retrans = tp->retrans_out ? : -1;
 
        if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
                if (!ece_ack)
index f7a2ec3..3af5226 100644 (file)
@@ -222,7 +222,7 @@ static struct cftype tcp_files[] = {
 
 static int __init tcp_memcontrol_init(void)
 {
-       WARN_ON(cgroup_add_cftypes(&memory_cgrp_subsys, tcp_files));
+       WARN_ON(cgroup_add_legacy_cftypes(&memory_cgrp_subsys, tcp_files));
        return 0;
 }
 __initcall(tcp_memcontrol_init);
index 4e86c59..55046ec 100644 (file)
@@ -309,7 +309,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
 
        th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
                                  iph->daddr, 0);
-       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+       skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
 
        return tcp_gro_complete(skb);
 }
index d92bce0..179b51e 100644 (file)
@@ -2525,8 +2525,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                if (!tp->retrans_stamp)
                        tp->retrans_stamp = TCP_SKB_CB(skb)->when;
 
-               tp->undo_retrans += tcp_skb_pcount(skb);
-
                /* snd_nxt is stored to detect loss of retransmitted segment,
                 * see tcp_input.c tcp_sacktag_write_queue().
                 */
@@ -2534,6 +2532,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
        } else if (err != -EBUSY) {
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
        }
+
+       if (tp->undo_retrans < 0)
+               tp->undo_retrans = 0;
+       tp->undo_retrans += tcp_skb_pcount(skb);
        return err;
 }
 
index d92f94b..7d5a866 100644 (file)
@@ -1588,8 +1588,11 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                goto csum_error;
 
 
-       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
+       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
+               UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+                                is_udplite);
                goto drop;
+       }
 
        rc = 0;
 
index cb9df0e..45702b8 100644 (file)
@@ -545,6 +545,8 @@ static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
        net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
 
        hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
+       hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
+
        id = ip_idents_reserve(hash, 1);
        fhdr->identification = htonl(id);
 }
index 08b367c..617f095 100644 (file)
@@ -1301,8 +1301,17 @@ int igmp6_event_query(struct sk_buff *skb)
        len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
        len -= skb_network_header_len(skb);
 
-       /* Drop queries with not link local source */
-       if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
+       /* RFC3810 6.2
+        * Upon reception of an MLD message that contains a Query, the node
+        * checks if the source address of the message is a valid link-local
+        * address, if the Hop Limit is set to 1, and if the Router Alert
+        * option is present in the Hop-By-Hop Options header of the IPv6
+        * packet.  If any of these checks fails, the packet is dropped.
+        */
+       if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
+           ipv6_hdr(skb)->hop_limit != 1 ||
+           !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
+           IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
                return -EINVAL;
 
        idev = __in6_dev_get(skb->dev);
index 8517d3c..01b0ff9 100644 (file)
@@ -73,7 +73,7 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
 
        th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
                                  &iph->daddr, 0);
-       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+       skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
 
        return tcp_gro_complete(skb);
 }
index 95c8347..7092ff7 100644 (file)
@@ -674,8 +674,11 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                        goto csum_error;
        }
 
-       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
+       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
+               UDP6_INC_STATS_BH(sock_net(sk),
+                                 UDP_MIB_RCVBUFERRORS, is_udplite);
                goto drop;
+       }
 
        skb_dst_drop(skb);
 
@@ -690,6 +693,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        bh_unlock_sock(sk);
 
        return rc;
+
 csum_error:
        UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
index 950909f..13752d9 100644 (file)
@@ -1365,7 +1365,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
        int err;
 
        if (level != SOL_PPPOL2TP)
-               return udp_prot.setsockopt(sk, level, optname, optval, optlen);
+               return -EINVAL;
 
        if (optlen < sizeof(int))
                return -EINVAL;
@@ -1491,7 +1491,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
        struct pppol2tp_session *ps;
 
        if (level != SOL_PPPOL2TP)
-               return udp_prot.getsockopt(sk, level, optname, optval, optlen);
+               return -EINVAL;
 
        if (get_user(len, optlen))
                return -EFAULT;
index d7513a5..592f4b1 100644 (file)
@@ -472,12 +472,15 @@ static void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
 {
        struct ieee80211_sub_if_data *sdata = sta->sdata;
        struct ieee80211_local *local = sdata->local;
-       struct rate_control_ref *ref = local->rate_ctrl;
+       struct rate_control_ref *ref = NULL;
        struct timespec uptime;
        u64 packets = 0;
        u32 thr = 0;
        int i, ac;
 
+       if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
+               ref = local->rate_ctrl;
+
        sinfo->generation = sdata->local->sta_generation;
 
        sinfo->filled = STATION_INFO_INACTIVE_TIME |
index 5214686..1a252c6 100644 (file)
@@ -414,6 +414,9 @@ ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx)
        if (ieee80211_has_order(hdr->frame_control))
                return TX_CONTINUE;
 
+       if (ieee80211_is_probe_req(hdr->frame_control))
+               return TX_CONTINUE;
+
        if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
                info->hw_queue = tx->sdata->vif.cab_queue;
 
@@ -463,6 +466,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
 {
        struct sta_info *sta = tx->sta;
        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
+       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
        struct ieee80211_local *local = tx->local;
 
        if (unlikely(!sta))
@@ -473,6 +477,12 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
                     !(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER))) {
                int ac = skb_get_queue_mapping(tx->skb);
 
+               if (ieee80211_is_mgmt(hdr->frame_control) &&
+                   !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
+                       info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
+                       return TX_CONTINUE;
+               }
+
                ps_dbg(sta->sdata, "STA %pM aid %d: PS buffer for AC %d\n",
                       sta->sta.addr, sta->sta.aid, ac);
                if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
@@ -531,19 +541,9 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
 {
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
-       struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data;
-
        if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
                return TX_CONTINUE;
 
-       if (ieee80211_is_mgmt(hdr->frame_control) &&
-           !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
-               if (tx->flags & IEEE80211_TX_UNICAST)
-                       info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
-               return TX_CONTINUE;
-       }
-
        if (tx->flags & IEEE80211_TX_UNICAST)
                return ieee80211_tx_h_unicast_ps_buf(tx);
        else
index 6886601..a6cda52 100644 (file)
@@ -1096,11 +1096,12 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
        int err;
 
        /* 24 + 6 = header + auth_algo + auth_transaction + status_code */
-       skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 6 + extra_len);
+       skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN +
+                           24 + 6 + extra_len + IEEE80211_WEP_ICV_LEN);
        if (!skb)
                return;
 
-       skb_reserve(skb, local->hw.extra_tx_headroom);
+       skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN);
 
        mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
        memset(mgmt, 0, 24 + 6);
index a8eb0a8..610e19c 100644 (file)
@@ -797,7 +797,6 @@ static void ip_vs_conn_expire(unsigned long data)
                        ip_vs_control_del(cp);
 
                if (cp->flags & IP_VS_CONN_F_NFCT) {
-                       ip_vs_conn_drop_conntrack(cp);
                        /* Do not access conntracks during subsys cleanup
                         * because nf_conntrack_find_get can not be used after
                         * conntrack cleanup for the net.
index c42e83d..581a658 100644 (file)
@@ -3778,6 +3778,7 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
        cancel_delayed_work_sync(&ipvs->defense_work);
        cancel_work_sync(&ipvs->defense_work.work);
        unregister_net_sysctl_table(ipvs->sysctl_hdr);
+       ip_vs_stop_estimator(net, &ipvs->tot_stats);
 }
 
 #else
@@ -3840,7 +3841,6 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
        struct netns_ipvs *ipvs = net_ipvs(net);
 
        ip_vs_trash_cleanup(net);
-       ip_vs_stop_estimator(net, &ipvs->tot_stats);
        ip_vs_control_net_cleanup_sysctl(net);
        remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
        remove_proc_entry("ip_vs_stats", net->proc_net);
index 5857963..300ed1e 100644 (file)
@@ -596,6 +596,9 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
 #endif
 #ifdef CONFIG_NF_CONNTRACK_MARK
               + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
+#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+              + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
 #endif
               + ctnetlink_proto_size(ct)
               + ctnetlink_label_size(ct)
@@ -1150,7 +1153,7 @@ static int ctnetlink_done_list(struct netlink_callback *cb)
 static int
 ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
 {
-       struct nf_conn *ct, *last = NULL;
+       struct nf_conn *ct, *last;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
        struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
@@ -1163,8 +1166,7 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
        if (cb->args[2])
                return 0;
 
-       if (cb->args[0] == nr_cpu_ids)
-               return 0;
+       last = (struct nf_conn *)cb->args[1];
 
        for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
                struct ct_pcpu *pcpu;
@@ -1174,7 +1176,6 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
 
                pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
                spin_lock_bh(&pcpu->lock);
-               last = (struct nf_conn *)cb->args[1];
                list = dying ? &pcpu->dying : &pcpu->unconfirmed;
 restart:
                hlist_nulls_for_each_entry(h, n, list, hnnode) {
@@ -1193,7 +1194,9 @@ restart:
                                                  ct);
                        rcu_read_unlock();
                        if (res < 0) {
-                               nf_conntrack_get(&ct->ct_general);
+                               if (!atomic_inc_not_zero(&ct->ct_general.use))
+                                       continue;
+                               cb->args[0] = cpu;
                                cb->args[1] = (unsigned long)ct;
                                spin_unlock_bh(&pcpu->lock);
                                goto out;
@@ -1202,10 +1205,10 @@ restart:
                if (cb->args[1]) {
                        cb->args[1] = 0;
                        goto restart;
-               } else
-                       cb->args[2] = 1;
+               }
                spin_unlock_bh(&pcpu->lock);
        }
+       cb->args[2] = 1;
 out:
        if (last)
                nf_ct_put(last);
@@ -2039,6 +2042,9 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
 #endif
 #ifdef CONFIG_NF_CONNTRACK_MARK
               + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
+#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+              + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
 #endif
               + ctnetlink_proto_size(ct)
               ;
index 09096a6..a49907b 100644 (file)
@@ -525,6 +525,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
        return i->status & IPS_NAT_MASK ? 1 : 0;
 }
 
+static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
+{
+       struct nf_conn_nat *nat = nfct_nat(ct);
+
+       if (nf_nat_proto_remove(ct, data))
+               return 1;
+
+       if (!nat || !nat->ct)
+               return 0;
+
+       /* This netns is being destroyed, and conntrack has nat null binding.
+        * Remove it from bysource hash, as the table will be freed soon.
+        *
+        * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
+        * will delete entry from already-freed table.
+        */
+       if (!del_timer(&ct->timeout))
+               return 1;
+
+       spin_lock_bh(&nf_nat_lock);
+       hlist_del_rcu(&nat->bysource);
+       ct->status &= ~IPS_NAT_DONE_MASK;
+       nat->ct = NULL;
+       spin_unlock_bh(&nf_nat_lock);
+
+       add_timer(&ct->timeout);
+
+       /* don't delete conntrack.  Although that would make things a lot
+        * simpler, we'd end up flushing all conntracks on nat rmmod.
+        */
+       return 0;
+}
+
 static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
 {
        struct nf_nat_proto_clean clean = {
@@ -795,7 +828,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
 {
        struct nf_nat_proto_clean clean = {};
 
-       nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0);
+       nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
        synchronize_rcu();
        nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
 }
index 624e083..8746ff9 100644 (file)
@@ -35,7 +35,7 @@ int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
 {
        INIT_LIST_HEAD(&afi->tables);
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_add_tail(&afi->list, &net->nft.af_info);
+       list_add_tail_rcu(&afi->list, &net->nft.af_info);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
        return 0;
 }
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(nft_register_afinfo);
 void nft_unregister_afinfo(struct nft_af_info *afi)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_del(&afi->list);
+       list_del_rcu(&afi->list);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
@@ -277,11 +277,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (family != NFPROTO_UNSPEC && family != afi->family)
                        continue;
 
-               list_for_each_entry(table, &afi->tables, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
                        if (idx < s_idx)
                                goto cont;
                        if (idx > s_idx)
@@ -294,11 +297,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
                                                      NLM_F_MULTI,
                                                      afi->family, table) < 0)
                                goto done;
+
+                       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                        idx++;
                }
        }
 done:
+       rcu_read_unlock();
        cb->args[0] = idx;
        return skb->len;
 }
@@ -407,6 +413,9 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
        if (flags & ~NFT_TABLE_F_DORMANT)
                return -EINVAL;
 
+       if (flags == ctx->table->flags)
+               return 0;
+
        trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
                                sizeof(struct nft_trans_table));
        if (trans == NULL)
@@ -514,7 +523,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
                module_put(afi->owner);
                return err;
        }
-       list_add_tail(&table->list, &afi->tables);
+       list_add_tail_rcu(&table->list, &afi->tables);
        return 0;
 }
 
@@ -546,7 +555,7 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       list_del(&table->list);
+       list_del_rcu(&table->list);
        return 0;
 }
 
@@ -635,13 +644,20 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
 {
        struct nft_stats *cpu_stats, total;
        struct nlattr *nest;
+       unsigned int seq;
+       u64 pkts, bytes;
        int cpu;
 
        memset(&total, 0, sizeof(total));
        for_each_possible_cpu(cpu) {
                cpu_stats = per_cpu_ptr(stats, cpu);
-               total.pkts += cpu_stats->pkts;
-               total.bytes += cpu_stats->bytes;
+               do {
+                       seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+                       pkts = cpu_stats->pkts;
+                       bytes = cpu_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
+               total.pkts += pkts;
+               total.bytes += bytes;
        }
        nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
        if (nest == NULL)
@@ -761,12 +777,15 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (family != NFPROTO_UNSPEC && family != afi->family)
                        continue;
 
-               list_for_each_entry(table, &afi->tables, list) {
-                       list_for_each_entry(chain, &table->chains, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
+                       list_for_each_entry_rcu(chain, &table->chains, list) {
                                if (idx < s_idx)
                                        goto cont;
                                if (idx > s_idx)
@@ -778,17 +797,19 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
                                                              NLM_F_MULTI,
                                                              afi->family, table, chain) < 0)
                                        goto done;
+
+                               nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                                idx++;
                        }
                }
        }
 done:
+       rcu_read_unlock();
        cb->args[0] = idx;
        return skb->len;
 }
 
-
 static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
                              const struct nlmsghdr *nlh,
                              const struct nlattr * const nla[])
@@ -861,7 +882,7 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
        if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
                return ERR_PTR(-EINVAL);
 
-       newstats = alloc_percpu(struct nft_stats);
+       newstats = netdev_alloc_pcpu_stats(struct nft_stats);
        if (newstats == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -1077,7 +1098,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                        }
                        basechain->stats = stats;
                } else {
-                       stats = alloc_percpu(struct nft_stats);
+                       stats = netdev_alloc_pcpu_stats(struct nft_stats);
                        if (IS_ERR(stats)) {
                                module_put(type->owner);
                                kfree(basechain);
@@ -1130,7 +1151,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                goto err2;
 
        table->use++;
-       list_add_tail(&chain->list, &table->chains);
+       list_add_tail_rcu(&chain->list, &table->chains);
        return 0;
 err2:
        if (!(table->flags & NFT_TABLE_F_DORMANT) &&
@@ -1180,7 +1201,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
                return err;
 
        table->use--;
-       list_del(&chain->list);
+       list_del_rcu(&chain->list);
        return 0;
 }
 
@@ -1199,9 +1220,9 @@ int nft_register_expr(struct nft_expr_type *type)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
        if (type->family == NFPROTO_UNSPEC)
-               list_add_tail(&type->list, &nf_tables_expressions);
+               list_add_tail_rcu(&type->list, &nf_tables_expressions);
        else
-               list_add(&type->list, &nf_tables_expressions);
+               list_add_rcu(&type->list, &nf_tables_expressions);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
        return 0;
 }
@@ -1216,7 +1237,7 @@ EXPORT_SYMBOL_GPL(nft_register_expr);
 void nft_unregister_expr(struct nft_expr_type *type)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_del(&type->list);
+       list_del_rcu(&type->list);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_expr);
@@ -1549,16 +1570,17 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
        unsigned int idx = 0, s_idx = cb->args[0];
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
-       u8 genctr = ACCESS_ONCE(net->nft.genctr);
-       u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (family != NFPROTO_UNSPEC && family != afi->family)
                        continue;
 
-               list_for_each_entry(table, &afi->tables, list) {
-                       list_for_each_entry(chain, &table->chains, list) {
-                               list_for_each_entry(rule, &chain->rules, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
+                       list_for_each_entry_rcu(chain, &table->chains, list) {
+                               list_for_each_entry_rcu(rule, &chain->rules, list) {
                                        if (!nft_rule_is_active(net, rule))
                                                goto cont;
                                        if (idx < s_idx)
@@ -1572,6 +1594,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
                                                                      NLM_F_MULTI | NLM_F_APPEND,
                                                                      afi->family, table, chain, rule) < 0)
                                                goto done;
+
+                                       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                                        idx++;
                                }
@@ -1579,9 +1603,7 @@ cont:
                }
        }
 done:
-       /* Invalidate this dump, a transition to the new generation happened */
-       if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
-               return -EBUSY;
+       rcu_read_unlock();
 
        cb->args[0] = idx;
        return skb->len;
@@ -1730,6 +1752,9 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
                if (!create || nlh->nlmsg_flags & NLM_F_REPLACE)
                        return -EINVAL;
                handle = nf_tables_alloc_handle(table);
+
+               if (chain->use == UINT_MAX)
+                       return -EOVERFLOW;
        }
 
        if (nla[NFTA_RULE_POSITION]) {
@@ -1789,14 +1814,15 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
 
        if (nlh->nlmsg_flags & NLM_F_REPLACE) {
                if (nft_rule_is_active_next(net, old_rule)) {
-                       trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE,
+                       trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
                                                   old_rule);
                        if (trans == NULL) {
                                err = -ENOMEM;
                                goto err2;
                        }
                        nft_rule_disactivate_next(net, old_rule);
-                       list_add_tail(&rule->list, &old_rule->list);
+                       chain->use--;
+                       list_add_tail_rcu(&rule->list, &old_rule->list);
                } else {
                        err = -ENOENT;
                        goto err2;
@@ -1826,6 +1852,7 @@ err3:
                list_del_rcu(&nft_trans_rule(trans)->list);
                nft_rule_clear(net, nft_trans_rule(trans));
                nft_trans_destroy(trans);
+               chain->use++;
        }
 err2:
        nf_tables_rule_destroy(&ctx, rule);
@@ -1927,7 +1954,7 @@ static LIST_HEAD(nf_tables_set_ops);
 int nft_register_set(struct nft_set_ops *ops)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_add_tail(&ops->list, &nf_tables_set_ops);
+       list_add_tail_rcu(&ops->list, &nf_tables_set_ops);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
        return 0;
 }
@@ -1936,7 +1963,7 @@ EXPORT_SYMBOL_GPL(nft_register_set);
 void nft_unregister_set(struct nft_set_ops *ops)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_del(&ops->list);
+       list_del_rcu(&ops->list);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_set);
@@ -2229,7 +2256,10 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
        if (cb->args[1])
                return skb->len;
 
-       list_for_each_entry(set, &ctx->table->sets, list) {
+       rcu_read_lock();
+       cb->seq = ctx->net->nft.base_seq;
+
+       list_for_each_entry_rcu(set, &ctx->table->sets, list) {
                if (idx < s_idx)
                        goto cont;
                if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2237,11 +2267,13 @@ static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
                        cb->args[0] = idx;
                        goto done;
                }
+               nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                idx++;
        }
        cb->args[1] = 1;
 done:
+       rcu_read_unlock();
        return skb->len;
 }
 
@@ -2255,7 +2287,10 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
        if (cb->args[1])
                return skb->len;
 
-       list_for_each_entry(table, &ctx->afi->tables, list) {
+       rcu_read_lock();
+       cb->seq = ctx->net->nft.base_seq;
+
+       list_for_each_entry_rcu(table, &ctx->afi->tables, list) {
                if (cur_table) {
                        if (cur_table != table)
                                continue;
@@ -2264,7 +2299,7 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
                }
                ctx->table = table;
                idx = 0;
-               list_for_each_entry(set, &ctx->table->sets, list) {
+               list_for_each_entry_rcu(set, &ctx->table->sets, list) {
                        if (idx < s_idx)
                                goto cont;
                        if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
@@ -2273,12 +2308,14 @@ static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
                                cb->args[2] = (unsigned long) table;
                                goto done;
                        }
+                       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                        idx++;
                }
        }
        cb->args[1] = 1;
 done:
+       rcu_read_unlock();
        return skb->len;
 }
 
@@ -2295,7 +2332,10 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
        if (cb->args[1])
                return skb->len;
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (cur_family) {
                        if (afi->family != cur_family)
                                continue;
@@ -2303,7 +2343,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
                        cur_family = 0;
                }
 
-               list_for_each_entry(table, &afi->tables, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
                        if (cur_table) {
                                if (cur_table != table)
                                        continue;
@@ -2314,7 +2354,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
                        ctx->table = table;
                        ctx->afi = afi;
                        idx = 0;
-                       list_for_each_entry(set, &ctx->table->sets, list) {
+                       list_for_each_entry_rcu(set, &ctx->table->sets, list) {
                                if (idx < s_idx)
                                        goto cont;
                                if (nf_tables_fill_set(skb, ctx, set,
@@ -2325,6 +2365,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
                                        cb->args[3] = afi->family;
                                        goto done;
                                }
+                               nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                                idx++;
                        }
@@ -2334,6 +2375,7 @@ cont:
        }
        cb->args[1] = 1;
 done:
+       rcu_read_unlock();
        return skb->len;
 }
 
@@ -2592,7 +2634,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        if (err < 0)
                goto err2;
 
-       list_add_tail(&set->list, &table->sets);
+       list_add_tail_rcu(&set->list, &table->sets);
        table->use++;
        return 0;
 
@@ -2612,7 +2654,7 @@ static void nft_set_destroy(struct nft_set *set)
 
 static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
 {
-       list_del(&set->list);
+       list_del_rcu(&set->list);
        nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
        nft_set_destroy(set);
 }
@@ -2647,7 +2689,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       list_del(&set->list);
+       list_del_rcu(&set->list);
        ctx.table->use--;
        return 0;
 }
@@ -2699,14 +2741,14 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
        }
 bind:
        binding->chain = ctx->chain;
-       list_add_tail(&binding->list, &set->bindings);
+       list_add_tail_rcu(&binding->list, &set->bindings);
        return 0;
 }
 
 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
                          struct nft_set_binding *binding)
 {
-       list_del(&binding->list);
+       list_del_rcu(&binding->list);
 
        if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
            !(set->flags & NFT_SET_INACTIVE))
@@ -2845,7 +2887,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
                goto nla_put_failure;
 
        nfmsg = nlmsg_data(nlh);
-       nfmsg->nfgen_family = NFPROTO_UNSPEC;
+       nfmsg->nfgen_family = ctx.afi->family;
        nfmsg->version      = NFNETLINK_V0;
        nfmsg->res_id       = 0;
 
@@ -3341,7 +3383,7 @@ static int nf_tables_commit(struct sk_buff *skb)
        struct nft_set *set;
 
        /* Bump generation counter, invalidate any dump in progress */
-       net->nft.genctr++;
+       while (++net->nft.base_seq == 0);
 
        /* A new generation has just started */
        net->nft.gencursor = gencursor_next(net);
@@ -3486,12 +3528,12 @@ static int nf_tables_abort(struct sk_buff *skb)
                                }
                                nft_trans_destroy(trans);
                        } else {
-                               list_del(&trans->ctx.table->list);
+                               list_del_rcu(&trans->ctx.table->list);
                        }
                        break;
                case NFT_MSG_DELTABLE:
-                       list_add_tail(&trans->ctx.table->list,
-                                     &trans->ctx.afi->tables);
+                       list_add_tail_rcu(&trans->ctx.table->list,
+                                         &trans->ctx.afi->tables);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWCHAIN:
@@ -3502,7 +3544,7 @@ static int nf_tables_abort(struct sk_buff *skb)
                                nft_trans_destroy(trans);
                        } else {
                                trans->ctx.table->use--;
-                               list_del(&trans->ctx.chain->list);
+                               list_del_rcu(&trans->ctx.chain->list);
                                if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
                                    trans->ctx.chain->flags & NFT_BASE_CHAIN) {
                                        nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
@@ -3512,8 +3554,8 @@ static int nf_tables_abort(struct sk_buff *skb)
                        break;
                case NFT_MSG_DELCHAIN:
                        trans->ctx.table->use++;
-                       list_add_tail(&trans->ctx.chain->list,
-                                     &trans->ctx.table->chains);
+                       list_add_tail_rcu(&trans->ctx.chain->list,
+                                         &trans->ctx.table->chains);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWRULE:
@@ -3527,12 +3569,12 @@ static int nf_tables_abort(struct sk_buff *skb)
                        break;
                case NFT_MSG_NEWSET:
                        trans->ctx.table->use--;
-                       list_del(&nft_trans_set(trans)->list);
+                       list_del_rcu(&nft_trans_set(trans)->list);
                        break;
                case NFT_MSG_DELSET:
                        trans->ctx.table->use++;
-                       list_add_tail(&nft_trans_set(trans)->list,
-                                     &trans->ctx.table->sets);
+                       list_add_tail_rcu(&nft_trans_set(trans)->list,
+                                         &trans->ctx.table->sets);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWSETELEM:
@@ -3946,6 +3988,7 @@ static int nf_tables_init_net(struct net *net)
 {
        INIT_LIST_HEAD(&net->nft.af_info);
        INIT_LIST_HEAD(&net->nft.commit_list);
+       net->nft.base_seq = 1;
        return 0;
 }
 
index 345acfb..3b90eb2 100644 (file)
@@ -109,7 +109,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
        struct nft_data data[NFT_REG_MAX + 1];
        unsigned int stackptr = 0;
        struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
-       struct nft_stats __percpu *stats;
+       struct nft_stats *stats;
        int rulenum;
        /*
         * Cache cursor to avoid problems in case that the cursor is updated
@@ -205,9 +205,11 @@ next_rule:
                nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
 
        rcu_read_lock_bh();
-       stats = rcu_dereference(nft_base_chain(basechain)->stats);
-       __this_cpu_inc(stats->pkts);
-       __this_cpu_add(stats->bytes, pkt->skb->len);
+       stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats));
+       u64_stats_update_begin(&stats->syncp);
+       stats->pkts++;
+       stats->bytes += pkt->skb->len;
+       u64_stats_update_end(&stats->syncp);
        rcu_read_unlock_bh();
 
        return nft_base_chain(basechain)->policy;
index 8a779be..1840989 100644 (file)
@@ -195,6 +195,15 @@ static void
 nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
 {
        struct xt_target *target = expr->ops->data;
+       void *info = nft_expr_priv(expr);
+       struct xt_tgdtor_param par;
+
+       par.net = ctx->net;
+       par.target = target;
+       par.targinfo = info;
+       par.family = ctx->afi->family;
+       if (par.target->destroy != NULL)
+               par.target->destroy(&par);
 
        module_put(target->me);
 }
@@ -382,6 +391,15 @@ static void
 nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
 {
        struct xt_match *match = expr->ops->data;
+       void *info = nft_expr_priv(expr);
+       struct xt_mtdtor_param par;
+
+       par.net = ctx->net;
+       par.match = match;
+       par.matchinfo = info;
+       par.family = ctx->afi->family;
+       if (par.match->destroy != NULL)
+               par.match->destroy(&par);
 
        module_put(match->me);
 }
index a0195d2..79ff58c 100644 (file)
@@ -175,12 +175,14 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
        if (nla_put_be32(skb,
                         NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max)))
                goto nla_put_failure;
-       if (nla_put_be32(skb,
-                        NFTA_NAT_REG_PROTO_MIN, htonl(priv->sreg_proto_min)))
-               goto nla_put_failure;
-       if (nla_put_be32(skb,
-                        NFTA_NAT_REG_PROTO_MAX, htonl(priv->sreg_proto_max)))
-               goto nla_put_failure;
+       if (priv->sreg_proto_min) {
+               if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN,
+                                htonl(priv->sreg_proto_min)))
+                       goto nla_put_failure;
+               if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX,
+                                htonl(priv->sreg_proto_max)))
+                       goto nla_put_failure;
+       }
        return 0;
 
 nla_put_failure:
index 15c731f..e6fac7e 100644 (file)
@@ -636,7 +636,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
                while (nlk->cb_running && netlink_dump_space(nlk)) {
                        err = netlink_dump(sk);
                        if (err < 0) {
-                               sk->sk_err = err;
+                               sk->sk_err = -err;
                                sk->sk_error_report(sk);
                                break;
                        }
@@ -2483,7 +2483,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
            atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
                ret = netlink_dump(sk);
                if (ret) {
-                       sk->sk_err = ret;
+                       sk->sk_err = -ret;
                        sk->sk_error_report(sk);
                }
        }
index c36856a..e70d8b1 100644 (file)
@@ -551,6 +551,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
 
                case OVS_ACTION_ATTR_SAMPLE:
                        err = sample(dp, skb, a);
+                       if (unlikely(err)) /* skb already freed. */
+                               return err;
                        break;
                }
 
index 0d407bc..9db4bf6 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -276,7 +276,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
        OVS_CB(skb)->flow = flow;
        OVS_CB(skb)->pkt_key = &key;
 
-       ovs_flow_stats_update(OVS_CB(skb)->flow, skb);
+       ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb);
        ovs_execute_actions(dp, skb);
        stats_counter = &stats->n_hit;
 
@@ -889,8 +889,11 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
                }
                /* The unmasked key has to be the same for flow updates. */
                if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
-                       error = -EEXIST;
-                       goto err_unlock_ovs;
+                       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+                       if (!flow) {
+                               error = -ENOENT;
+                               goto err_unlock_ovs;
+                       }
                }
                /* Update actions. */
                old_acts = ovsl_dereference(flow->sf_acts);
@@ -981,16 +984,12 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
                goto err_unlock_ovs;
        }
        /* Check that the flow exists. */
-       flow = ovs_flow_tbl_lookup(&dp->table, &key);
+       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
        if (unlikely(!flow)) {
                error = -ENOENT;
                goto err_unlock_ovs;
        }
-       /* The unmasked key has to be the same for flow updates. */
-       if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
-               error = -EEXIST;
-               goto err_unlock_ovs;
-       }
+
        /* Update actions, if present. */
        if (likely(acts)) {
                old_acts = ovsl_dereference(flow->sf_acts);
@@ -1063,8 +1062,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
                goto unlock;
        }
 
-       flow = ovs_flow_tbl_lookup(&dp->table, &key);
-       if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
+       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+       if (!flow) {
                err = -ENOENT;
                goto unlock;
        }
@@ -1113,8 +1112,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
                goto unlock;
        }
 
-       flow = ovs_flow_tbl_lookup(&dp->table, &key);
-       if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) {
+       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+       if (unlikely(!flow)) {
                err = -ENOENT;
                goto unlock;
        }
index 334751c..d07ab53 100644 (file)
@@ -61,10 +61,10 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
 
 #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
 
-void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
+void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
+                          struct sk_buff *skb)
 {
        struct flow_stats *stats;
-       __be16 tcp_flags = flow->key.tp.flags;
        int node = numa_node_id();
 
        stats = rcu_dereference(flow->stats[node]);
index ac395d2..5e5aaed 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -180,7 +180,8 @@ struct arp_eth_header {
        unsigned char       ar_tip[4];          /* target IP address        */
 } __packed;
 
-void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *);
+void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
+                          struct sk_buff *);
 void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
                        unsigned long *used, __be16 *tcp_flags);
 void ovs_flow_stats_clear(struct sw_flow *);
index 574c3ab..cf2d853 100644 (file)
@@ -456,6 +456,22 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
        return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
 }
 
+struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
+                                         struct sw_flow_match *match)
+{
+       struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
+       struct sw_flow_mask *mask;
+       struct sw_flow *flow;
+
+       /* Always called under ovs-mutex. */
+       list_for_each_entry(mask, &tbl->mask_list, list) {
+               flow = masked_flow_lookup(ti, match->key, mask);
+               if (flow && ovs_flow_cmp_unmasked_key(flow, match))  /* Found */
+                       return flow;
+       }
+       return NULL;
+}
+
 int ovs_flow_tbl_num_masks(const struct flow_table *table)
 {
        struct sw_flow_mask *mask;
index ca8a582..5918bff 100644 (file)
@@ -76,7 +76,8 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
                                    u32 *n_mask_hit);
 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
                                    const struct sw_flow_key *);
-
+struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
+                                         struct sw_flow_match *match);
 bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
                               struct sw_flow_match *match);
 
index 35ec4fe..f49148a 100644 (file)
@@ -110,6 +110,22 @@ static int gre_rcv(struct sk_buff *skb,
        return PACKET_RCVD;
 }
 
+/* Called with rcu_read_lock and BH disabled. */
+static int gre_err(struct sk_buff *skb, u32 info,
+                  const struct tnl_ptk_info *tpi)
+{
+       struct ovs_net *ovs_net;
+       struct vport *vport;
+
+       ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
+       vport = rcu_dereference(ovs_net->vport_net.gre_vport);
+
+       if (unlikely(!vport))
+               return PACKET_REJECT;
+       else
+               return PACKET_RCVD;
+}
+
 static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
 {
        struct net *net = ovs_dp_get_net(vport->dp);
@@ -186,6 +202,7 @@ error:
 
 static struct gre_cisco_protocol gre_protocol = {
        .handler        = gre_rcv,
+       .err_handler    = gre_err,
        .priority       = 1,
 };
 
index c39b583..70c0be8 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/errno.h>
 #include <linux/rtnetlink.h>
 #include <linux/skbuff.h>
+#include <linux/bitmap.h>
 #include <net/netlink.h>
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
@@ -460,17 +461,25 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
        return 0;
 }
 
+#define NR_U32_NODE (1<<12)
 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
 {
        struct tc_u_knode *n;
-       unsigned int i = 0x7FF;
+       unsigned long i;
+       unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
+                                       GFP_KERNEL);
+       if (!bitmap)
+               return handle | 0xFFF;
 
        for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
-               if (i < TC_U32_NODE(n->handle))
-                       i = TC_U32_NODE(n->handle);
-       i++;
+               set_bit(TC_U32_NODE(n->handle), bitmap);
 
-       return handle | (i > 0xFFF ? 0xFFF : i);
+       i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
+       if (i >= NR_U32_NODE)
+               i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
+
+       kfree(bitmap);
+       return handle | (i >= NR_U32_NODE ? 0xFFF : i);
 }
 
 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
index 9de23a2..06a9ee6 100644 (file)
@@ -1097,6 +1097,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
        asoc->c = new->c;
        asoc->peer.rwnd = new->peer.rwnd;
        asoc->peer.sack_needed = new->peer.sack_needed;
+       asoc->peer.auth_capable = new->peer.auth_capable;
        asoc->peer.i = new->peer.i;
        sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
                         asoc->peer.i.initial_tsn, GFP_ATOMIC);
index dcb1959..12c7e01 100644 (file)
@@ -321,41 +321,40 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
                                loff_t *ppos)
 {
        struct net *net = current->nsproxy->net_ns;
-       char tmp[8];
        struct ctl_table tbl;
-       int ret;
-       int changed = 0;
+       bool changed = false;
        char *none = "none";
+       char tmp[8];
+       int ret;
 
        memset(&tbl, 0, sizeof(struct ctl_table));
 
        if (write) {
                tbl.data = tmp;
-               tbl.maxlen = 8;
+               tbl.maxlen = sizeof(tmp);
        } else {
                tbl.data = net->sctp.sctp_hmac_alg ? : none;
                tbl.maxlen = strlen(tbl.data);
        }
-               ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
 
-       if (write) {
+       ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+       if (write && ret == 0) {
 #ifdef CONFIG_CRYPTO_MD5
                if (!strncmp(tmp, "md5", 3)) {
                        net->sctp.sctp_hmac_alg = "md5";
-                       changed = 1;
+                       changed = true;
                }
 #endif
 #ifdef CONFIG_CRYPTO_SHA1
                if (!strncmp(tmp, "sha1", 4)) {
                        net->sctp.sctp_hmac_alg = "sha1";
-                       changed = 1;
+                       changed = true;
                }
 #endif
                if (!strncmp(tmp, "none", 4)) {
                        net->sctp.sctp_hmac_alg = NULL;
-                       changed = 1;
+                       changed = true;
                }
-
                if (!changed)
                        ret = -EINVAL;
        }
@@ -368,11 +367,10 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
                                loff_t *ppos)
 {
        struct net *net = current->nsproxy->net_ns;
-       int new_value;
-       struct ctl_table tbl;
        unsigned int min = *(unsigned int *) ctl->extra1;
        unsigned int max = *(unsigned int *) ctl->extra2;
-       int ret;
+       struct ctl_table tbl;
+       int ret, new_value;
 
        memset(&tbl, 0, sizeof(struct ctl_table));
        tbl.maxlen = sizeof(unsigned int);
@@ -381,12 +379,15 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
                tbl.data = &new_value;
        else
                tbl.data = &net->sctp.rto_min;
+
        ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
-       if (write) {
-               if (ret || new_value > max || new_value < min)
+       if (write && ret == 0) {
+               if (new_value > max || new_value < min)
                        return -EINVAL;
+
                net->sctp.rto_min = new_value;
        }
+
        return ret;
 }
 
@@ -395,11 +396,10 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
                                loff_t *ppos)
 {
        struct net *net = current->nsproxy->net_ns;
-       int new_value;
-       struct ctl_table tbl;
        unsigned int min = *(unsigned int *) ctl->extra1;
        unsigned int max = *(unsigned int *) ctl->extra2;
-       int ret;
+       struct ctl_table tbl;
+       int ret, new_value;
 
        memset(&tbl, 0, sizeof(struct ctl_table));
        tbl.maxlen = sizeof(unsigned int);
@@ -408,12 +408,15 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
                tbl.data = &new_value;
        else
                tbl.data = &net->sctp.rto_max;
+
        ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
-       if (write) {
-               if (ret || new_value > max || new_value < min)
+       if (write && ret == 0) {
+               if (new_value > max || new_value < min)
                        return -EINVAL;
+
                net->sctp.rto_max = new_value;
        }
+
        return ret;
 }
 
@@ -444,8 +447,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
                tbl.data = &net->sctp.auth_enable;
 
        ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
-
-       if (write) {
+       if (write && ret == 0) {
                struct sock *sk = net->sctp.ctl_sock;
 
                net->sctp.auth_enable = new_value;
index 85c6465..b6842fd 100644 (file)
@@ -366,9 +366,10 @@ fail:
  * specification [SCTP] and any extensions for a list of possible
  * error formats.
  */
-struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
-       const struct sctp_association *asoc, struct sctp_chunk *chunk,
-       __u16 flags, gfp_t gfp)
+struct sctp_ulpevent *
+sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
+                               struct sctp_chunk *chunk, __u16 flags,
+                               gfp_t gfp)
 {
        struct sctp_ulpevent *event;
        struct sctp_remote_error *sre;
@@ -387,8 +388,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
        /* Copy the skb to a new skb with room for us to prepend
         * notification with.
         */
-       skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
-                             0, gfp);
+       skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
 
        /* Pull off the rest of the cause TLV from the chunk.  */
        skb_pull(chunk->skb, elen);
@@ -399,62 +399,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
        event = sctp_skb2event(skb);
        sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
 
-       sre = (struct sctp_remote_error *)
-               skb_push(skb, sizeof(struct sctp_remote_error));
+       sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
 
        /* Trim the buffer to the right length.  */
-       skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
+       skb_trim(skb, sizeof(*sre) + elen);
 
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_type:
-        *   It should be SCTP_REMOTE_ERROR.
-        */
+       /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
+       memset(sre, 0, sizeof(*sre));
        sre->sre_type = SCTP_REMOTE_ERROR;
-
-       /*
-        * Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_flags: 16 bits (unsigned integer)
-        *   Currently unused.
-        */
        sre->sre_flags = 0;
-
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_length: sizeof (__u32)
-        *
-        * This field is the total length of the notification data,
-        * including the notification header.
-        */
        sre->sre_length = skb->len;
-
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_error: 16 bits (unsigned integer)
-        * This value represents one of the Operational Error causes defined in
-        * the SCTP specification, in network byte order.
-        */
        sre->sre_error = cause;
-
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_assoc_id: sizeof (sctp_assoc_t)
-        *
-        * The association id field, holds the identifier for the association.
-        * All notifications for a given association have the same association
-        * identifier.  For TCP style socket, this field is ignored.
-        */
        sctp_ulpevent_set_owner(event, asoc);
        sre->sre_assoc_id = sctp_assoc2id(asoc);
 
        return event;
-
 fail:
        return NULL;
 }
@@ -899,7 +858,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
        return notification->sn_header.sn_type;
 }
 
-/* Copy out the sndrcvinfo into a msghdr.  */
+/* RFC6458, Section 5.3.2. SCTP Header Information Structure
+ * (SCTP_SNDRCV, DEPRECATED)
+ */
 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
                                   struct msghdr *msghdr)
 {
@@ -908,74 +869,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
        if (sctp_ulpevent_is_notification(event))
                return;
 
-       /* Sockets API Extensions for SCTP
-        * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
-        *
-        * sinfo_stream: 16 bits (unsigned integer)
-        *
-        * For recvmsg() the SCTP stack places the message's stream number in
-        * this value.
-       */
+       memset(&sinfo, 0, sizeof(sinfo));
        sinfo.sinfo_stream = event->stream;
-       /* sinfo_ssn: 16 bits (unsigned integer)
-        *
-        * For recvmsg() this value contains the stream sequence number that
-        * the remote endpoint placed in the DATA chunk.  For fragmented
-        * messages this is the same number for all deliveries of the message
-        * (if more than one recvmsg() is needed to read the message).
-        */
        sinfo.sinfo_ssn = event->ssn;
-       /* sinfo_ppid: 32 bits (unsigned integer)
-        *
-        * In recvmsg() this value is
-        * the same information that was passed by the upper layer in the peer
-        * application.  Please note that byte order issues are NOT accounted
-        * for and this information is passed opaquely by the SCTP stack from
-        * one end to the other.
-        */
        sinfo.sinfo_ppid = event->ppid;
-       /* sinfo_flags: 16 bits (unsigned integer)
-        *
-        * This field may contain any of the following flags and is composed of
-        * a bitwise OR of these values.
-        *
-        * recvmsg() flags:
-        *
-        * SCTP_UNORDERED - This flag is present when the message was sent
-        *                 non-ordered.
-        */
        sinfo.sinfo_flags = event->flags;
-       /* sinfo_tsn: 32 bit (unsigned integer)
-        *
-        * For the receiving side, this field holds a TSN that was
-        * assigned to one of the SCTP Data Chunks.
-        */
        sinfo.sinfo_tsn = event->tsn;
-       /* sinfo_cumtsn: 32 bit (unsigned integer)
-        *
-        * This field will hold the current cumulative TSN as
-        * known by the underlying SCTP layer.  Note this field is
-        * ignored when sending and only valid for a receive
-        * operation when sinfo_flags are set to SCTP_UNORDERED.
-        */
        sinfo.sinfo_cumtsn = event->cumtsn;
-       /* sinfo_assoc_id: sizeof (sctp_assoc_t)
-        *
-        * The association handle field, sinfo_assoc_id, holds the identifier
-        * for the association announced in the COMMUNICATION_UP notification.
-        * All notifications for a given association have the same identifier.
-        * Ignored for one-to-one style sockets.
-        */
        sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
-
-       /* context value that is set via SCTP_CONTEXT socket option. */
+       /* Context value that is set via SCTP_CONTEXT socket option. */
        sinfo.sinfo_context = event->asoc->default_rcv_context;
-
        /* These fields are not used while receiving. */
        sinfo.sinfo_timetolive = 0;
 
        put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
-                sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
+                sizeof(sinfo), &sinfo);
 }
 
 /* Do accounting for bytes received and hold a reference to the association
index 247e973..f773667 100644 (file)
@@ -592,6 +592,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags)
        put_group_info(acred.group_info);
        return ret;
 }
+EXPORT_SYMBOL_GPL(rpcauth_lookupcred);
 
 void
 rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
index c0365c1..9358c79 100644 (file)
@@ -250,7 +250,7 @@ void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
 }
 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
 
-static int rpc_wait_bit_killable(void *word)
+static int rpc_wait_bit_killable(struct wait_bit_key *key)
 {
        if (fatal_signal_pending(current))
                return -ERESTARTSYS;
@@ -309,7 +309,7 @@ static int rpc_complete_task(struct rpc_task *task)
  * to enforce taking of the wq->lock and hence avoid races with
  * rpc_complete_task().
  */
-int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
+int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
 {
        if (action == NULL)
                action = rpc_wait_bit_killable;
index 2663167..55c6c9d 100644 (file)
@@ -559,6 +559,7 @@ receive:
 
                buf = node->bclink.deferred_head;
                node->bclink.deferred_head = buf->next;
+               buf->next = NULL;
                node->bclink.deferred_size--;
                goto receive;
        }
index 8be6e94..0a37a47 100644 (file)
@@ -101,9 +101,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
 }
 
 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
- * Let first buffer become head buffer
- * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0
- * Leaves headbuf pointer at NULL if failure
+ * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
+ *            out: set when successful non-complete reassembly, otherwise NULL
+ * @*buf:     in:  the buffer to append. Always defined
+ *            out: head buf after sucessful complete reassembly, otherwise NULL
+ * Returns 1 when reassembly complete, otherwise 0
  */
 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
 {
@@ -122,6 +124,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
                        goto out_free;
                head = *headbuf = frag;
                skb_frag_list_init(head);
+               *buf = NULL;
                return 0;
        }
        if (!head)
@@ -150,5 +153,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
 out_free:
        pr_warn_ratelimited("Unable to build fragment list\n");
        kfree_skb(*buf);
+       kfree_skb(*headbuf);
+       *buf = *headbuf = NULL;
        return 0;
 }
index e9afbf1..7e3a3ce 100644 (file)
@@ -424,7 +424,7 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
        if (end >= start)
                return jiffies_to_msecs(end - start);
 
-       return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1);
+       return jiffies_to_msecs(end + (ULONG_MAX - start) + 1);
 }
 
 void
index ba4f172..6668daf 100644 (file)
@@ -1497,18 +1497,17 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
                }
                CMD(start_p2p_device, START_P2P_DEVICE);
                CMD(set_mcast_rate, SET_MCAST_RATE);
+#ifdef CONFIG_NL80211_TESTMODE
+               CMD(testmode_cmd, TESTMODE);
+#endif
                if (state->split) {
                        CMD(crit_proto_start, CRIT_PROTOCOL_START);
                        CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
                        if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
                                CMD(channel_switch, CHANNEL_SWITCH);
+                       CMD(set_qos_map, SET_QOS_MAP);
                }
-               CMD(set_qos_map, SET_QOS_MAP);
-
-#ifdef CONFIG_NL80211_TESTMODE
-               CMD(testmode_cmd, TESTMODE);
-#endif
-
+               /* add into the if now */
 #undef CMD
 
                if (rdev->ops->connect || rdev->ops->auth) {
index 558b0e3..1afdf45 100644 (file)
@@ -935,7 +935,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
                if (!band_rule_found)
                        band_rule_found = freq_in_rule_band(fr, center_freq);
 
-               bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(5));
+               bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
 
                if (band_rule_found && bw_fits)
                        return rr;
@@ -1019,10 +1019,10 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
 }
 #endif
 
-/* Find an ieee80211_reg_rule such that a 5MHz channel with frequency
- * chan->center_freq fits there.
- * If there is no such reg_rule, disable the channel, otherwise set the
- * flags corresponding to the bandwidths allowed in the particular reg_rule
+/*
+ * Note that right now we assume the desired channel bandwidth
+ * is always 20 MHz for each individual channel (HT40 uses 20 MHz
+ * per channel, the primary and the extension channel).
  */
 static void handle_channel(struct wiphy *wiphy,
                           enum nl80211_reg_initiator initiator,
@@ -1083,12 +1083,8 @@ static void handle_channel(struct wiphy *wiphy,
        if (reg_rule->flags & NL80211_RRF_AUTO_BW)
                max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
-       if (max_bandwidth_khz < MHZ_TO_KHZ(10))
-               bw_flags = IEEE80211_CHAN_NO_10MHZ;
-       if (max_bandwidth_khz < MHZ_TO_KHZ(20))
-               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-               bw_flags |= IEEE80211_CHAN_NO_HT40;
+               bw_flags = IEEE80211_CHAN_NO_HT40;
        if (max_bandwidth_khz < MHZ_TO_KHZ(80))
                bw_flags |= IEEE80211_CHAN_NO_80MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1522,12 +1518,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
        if (reg_rule->flags & NL80211_RRF_AUTO_BW)
                max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
 
-       if (max_bandwidth_khz < MHZ_TO_KHZ(10))
-               bw_flags = IEEE80211_CHAN_NO_10MHZ;
-       if (max_bandwidth_khz < MHZ_TO_KHZ(20))
-               bw_flags |= IEEE80211_CHAN_NO_20MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(40))
-               bw_flags |= IEEE80211_CHAN_NO_HT40;
+               bw_flags = IEEE80211_CHAN_NO_HT40;
        if (max_bandwidth_khz < MHZ_TO_KHZ(80))
                bw_flags |= IEEE80211_CHAN_NO_80MHZ;
        if (max_bandwidth_khz < MHZ_TO_KHZ(160))
index 560ed77..7cc887f 100644 (file)
@@ -2094,7 +2094,8 @@ TRACE_EVENT(cfg80211_michael_mic_failure,
                MAC_ASSIGN(addr, addr);
                __entry->key_type = key_type;
                __entry->key_id = key_id;
-               memcpy(__entry->tsc, tsc, 6);
+               if (tsc)
+                       memcpy(__entry->tsc, tsc, 6);
        ),
        TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT ", key type: %d, key id: %d, tsc: %pm",
                  NETDEV_PR_ARG, MAC_PR_ARG(addr), __entry->key_type,
index a8ef510..0525d78 100644 (file)
@@ -2097,6 +2097,8 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
                                goto no_transform;
                        }
 
+                       dst_hold(&xdst->u.dst);
+                       xdst->u.dst.flags |= DST_NOCACHE;
                        route = xdst->route;
                }
        }
index 412d9dc..d4db6eb 100644 (file)
@@ -177,9 +177,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
                    attrs[XFRMA_ALG_AEAD]       ||
                    attrs[XFRMA_ALG_CRYPT]      ||
                    attrs[XFRMA_ALG_COMP]       ||
-                   attrs[XFRMA_TFCPAD]         ||
-                   (ntohl(p->id.spi) >= 0x10000))
-
+                   attrs[XFRMA_TFCPAD])
                        goto out;
                break;
 
@@ -207,7 +205,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
                    attrs[XFRMA_ALG_AUTH]       ||
                    attrs[XFRMA_ALG_AUTH_TRUNC] ||
                    attrs[XFRMA_ALG_CRYPT]      ||
-                   attrs[XFRMA_TFCPAD])
+                   attrs[XFRMA_TFCPAD]         ||
+                   (ntohl(p->id.spi) >= 0x10000))
                        goto out;
                break;
 
index 6af3732..4764292 100644 (file)
@@ -56,7 +56,8 @@
  * struct:  This defines the way the data will be stored in the ring buffer.
  *    There are currently two types of elements. __field and __array.
  *    a __field is broken up into (type, name). Where type can be any
- *    type but an array.
+ *    primitive type (integer, long or pointer). __field_struct() can
+ *    be any static complex data value (struct, union, but not an array).
  *    For an array. there are three fields. (type, name, size). The
  *    type of elements in the array, the name of the field and the size
  *    of the array.
@@ -86,7 +87,7 @@ TRACE_EVENT(foo_bar,
        ),
 
        TP_fast_assign(
-               strncpy(__entry->foo, foo, 10);
+               strlcpy(__entry->foo, foo, 10);
                __entry->bar    = bar;
        ),
 
index 010b18e..182be0f 100755 (executable)
@@ -3476,12 +3476,17 @@ sub process {
                        }
                }
 
-# unnecessary return in a void function? (a single leading tab, then return;)
-               if ($sline =~ /^\+\treturn\s*;\s*$/ &&
-                   $prevline =~ /^\+/) {
+# unnecessary return in a void function
+# at end-of-function, with the previous line a single leading tab, then return;
+# and the line before that not a goto label target like "out:"
+               if ($sline =~ /^[ \+]}\s*$/ &&
+                   $prevline =~ /^\+\treturn\s*;\s*$/ &&
+                   $linenr >= 3 &&
+                   $lines[$linenr - 3] =~ /^[ +]/ &&
+                   $lines[$linenr - 3] !~ /^[ +]\s*$Ident\s*:/) {
                        WARN("RETURN_VOID",
-                            "void function return statements are not generally useful\n" . $herecurr);
-               }
+                            "void function return statements are not generally useful\n" . $hereprev);
+               }
 
 # if statements using unnecessary parentheses - ie: if ((foo == bar))
                if ($^V && $^V ge 5.10.0 &&
index 4198788..d701627 100755 (executable)
@@ -21,6 +21,7 @@ my $lk_path = "./";
 my $email = 1;
 my $email_usename = 1;
 my $email_maintainer = 1;
+my $email_reviewer = 1;
 my $email_list = 1;
 my $email_subscriber_list = 0;
 my $email_git_penguin_chiefs = 0;
@@ -202,6 +203,7 @@ if (!GetOptions(
                'remove-duplicates!' => \$email_remove_duplicates,
                'mailmap!' => \$email_use_mailmap,
                'm!' => \$email_maintainer,
+               'r!' => \$email_reviewer,
                'n!' => \$email_usename,
                'l!' => \$email_list,
                's!' => \$email_subscriber_list,
@@ -260,7 +262,8 @@ if ($sections) {
 }
 
 if ($email &&
-    ($email_maintainer + $email_list + $email_subscriber_list +
+    ($email_maintainer + $email_reviewer +
+     $email_list + $email_subscriber_list +
      $email_git + $email_git_penguin_chiefs + $email_git_blame) == 0) {
     die "$P: Please select at least 1 email option\n";
 }
@@ -750,6 +753,7 @@ MAINTAINER field selection options:
     --hg-since => hg history to use (default: $email_hg_since)
     --interactive => display a menu (mostly useful if used with the --git option)
     --m => include maintainer(s) if any
+    --r => include reviewer(s) if any
     --n => include name 'Full Name <addr\@domain.tld>'
     --l => include list(s) if any
     --s => include subscriber only list(s) if any
@@ -1064,6 +1068,22 @@ sub add_categories {
                    my $role = get_maintainer_role($i);
                    push_email_addresses($pvalue, $role);
                }
+           } elsif ($ptype eq "R") {
+               my ($name, $address) = parse_email($pvalue);
+               if ($name eq "") {
+                   if ($i > 0) {
+                       my $tv = $typevalue[$i - 1];
+                       if ($tv =~ m/^(\C):\s*(.*)/) {
+                           if ($1 eq "P") {
+                               $name = $2;
+                               $pvalue = format_email($name, $address, $email_usename);
+                           }
+                       }
+                   }
+               }
+               if ($email_reviewer) {
+                   push_email_addresses($pvalue, 'reviewer');
+               }
            } elsif ($ptype eq "T") {
                push(@scm, $pvalue);
            } elsif ($ptype eq "W") {
index da058da..16a07cf 100755 (executable)
@@ -2073,6 +2073,7 @@ sub check_return_section {
 sub dump_function($$) {
     my $prototype = shift;
     my $file = shift;
+    my $noret = 0;
 
     $prototype =~ s/^static +//;
     $prototype =~ s/^extern +//;
@@ -2086,7 +2087,7 @@ sub dump_function($$) {
     $prototype =~ s/__init_or_module +//;
     $prototype =~ s/__must_check +//;
     $prototype =~ s/__weak +//;
-    $prototype =~ s/^#\s*define\s+//; #ak added
+    my $define = $prototype =~ s/^#\s*define\s+//; #ak added
     $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//;
 
     # Yes, this truly is vile.  We are looking for:
@@ -2105,7 +2106,15 @@ sub dump_function($$) {
     # - atomic_set (macro)
     # - pci_match_device, __copy_to_user (long return type)
 
-    if ($prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
+    if ($define && $prototype =~ m/^()([a-zA-Z0-9_~:]+)\s+/) {
+        # This is an object-like macro, it has no return type and no parameter
+        # list.
+        # Function-like macros are not allowed to have spaces between
+        # declaration_name and opening parenthesis (notice the \s+).
+        $return_type = $1;
+        $declaration_name = $2;
+        $noret = 1;
+    } elsif ($prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
        $prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
        $prototype =~ m/^(\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
        $prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
@@ -2140,7 +2149,7 @@ sub dump_function($$) {
         # of warnings goes sufficiently down, the check is only performed in
         # verbose mode.
         # TODO: always perform the check.
-        if ($verbose) {
+        if ($verbose && !$noret) {
                 check_return_section($file, $declaration_name, $return_type);
         }
 
index b5f08f7..35d5a58 100644 (file)
@@ -289,14 +289,16 @@ EOF
 
 fi
 
-# Build header package
-(cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
-(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
-(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
+# Build kernel header package
+(cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles"
+(cd $srctree; find arch/$SRCARCH/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles"
+(cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles"
+(cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles"
+(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
 mkdir -p "$destdir"
-(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
-(cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -)
+(cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -)
+(cd $objtree; tar -c -f - -T -) < "$objtree/debian/hdrobjfiles" | (cd $destdir; tar -xf -)
 (cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be
 ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
 rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
index 995c1ea..e046bff 100644 (file)
@@ -125,12 +125,11 @@ esac
 # Create the tarball
 #
 (
-       cd "${tmpdir}"
        opts=
        if tar --owner=root --group=root --help >/dev/null 2>&1; then
                opts="--owner=root --group=root"
        fi
-       tar cf - boot/* lib/* $opts | ${compress} > "${tarball}${file_ext}"
+       tar cf - -C "$tmpdir" boot/ lib/ $opts | ${compress} > "${tarball}${file_ext}"
 )
 
 echo "Tarball successfully created in ${tarball}${file_ext}"
index 9d1421e..49b582a 100644 (file)
@@ -163,11 +163,11 @@ static int mcount_adjust = 0;
 
 static int MIPS_is_fake_mcount(Elf_Rel const *rp)
 {
-       static Elf_Addr old_r_offset;
+       static Elf_Addr old_r_offset = ~(Elf_Addr)0;
        Elf_Addr current_r_offset = _w(rp->r_offset);
        int is_fake;
 
-       is_fake = old_r_offset &&
+       is_fake = (old_r_offset != ~(Elf_Addr)0) &&
                (current_r_offset - old_r_offset == MIPS_FAKEMCOUNT_OFFSET);
        old_r_offset = current_r_offset;
 
index d9d69e6..188c1d2 100644 (file)
@@ -796,7 +796,7 @@ struct cgroup_subsys devices_cgrp_subsys = {
        .css_free = devcgroup_css_free,
        .css_online = devcgroup_online,
        .css_offline = devcgroup_offline,
-       .base_cftypes = dev_cgroup_files,
+       .legacy_cftypes = dev_cgroup_files,
 };
 
 /**
index d3222b6..9609a7f 100644 (file)
@@ -91,15 +91,6 @@ static void key_gc_timer_func(unsigned long data)
        key_schedule_gc_links();
 }
 
-/*
- * wait_on_bit() sleep function for uninterruptible waiting
- */
-static int key_gc_wait_bit(void *flags)
-{
-       schedule();
-       return 0;
-}
-
 /*
  * Reap keys of dead type.
  *
@@ -123,7 +114,7 @@ void key_gc_keytype(struct key_type *ktype)
        schedule_work(&key_gc_work);
 
        kdebug("sleep");
-       wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
+       wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE,
                    TASK_UNINTERRUPTIBLE);
 
        key_gc_dead_keytype = NULL;
index 3814119..26a94f1 100644 (file)
 
 #define key_negative_timeout   60      /* default timeout on a negative key's existence */
 
-/*
- * wait_on_bit() sleep function for uninterruptible waiting
- */
-static int key_wait_bit(void *flags)
-{
-       schedule();
-       return 0;
-}
-
-/*
- * wait_on_bit() sleep function for interruptible waiting
- */
-static int key_wait_bit_intr(void *flags)
-{
-       schedule();
-       return signal_pending(current) ? -ERESTARTSYS : 0;
-}
-
 /**
  * complete_request_key - Complete the construction of a key.
  * @cons: The key construction record.
@@ -592,10 +574,9 @@ int wait_for_key_construction(struct key *key, bool intr)
        int ret;
 
        ret = wait_on_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT,
-                         intr ? key_wait_bit_intr : key_wait_bit,
                          intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
-       if (ret < 0)
-               return ret;
+       if (ret)
+               return -ERESTARTSYS;
        if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
                smp_rmb();
                return key->type_data.reject_error;
index f038f5a..f0b0e14 100644 (file)
@@ -288,6 +288,10 @@ static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
 {
        struct snd_kcontrol *kctl;
 
+       /* Make sure that the ids assigned to the control do not wrap around */
+       if (card->last_numid >= UINT_MAX - count)
+               card->last_numid = 0;
+
        list_for_each_entry(kctl, &card->controls, list) {
                if (kctl->id.numid < card->last_numid + 1 + count &&
                    kctl->id.numid + kctl->count > card->last_numid + 1) {
@@ -330,6 +334,7 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
 {
        struct snd_ctl_elem_id id;
        unsigned int idx;
+       unsigned int count;
        int err = -EINVAL;
 
        if (! kcontrol)
@@ -337,6 +342,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
        if (snd_BUG_ON(!card || !kcontrol->info))
                goto error;
        id = kcontrol->id;
+       if (id.index > UINT_MAX - kcontrol->count)
+               goto error;
+
        down_write(&card->controls_rwsem);
        if (snd_ctl_find_id(card, &id)) {
                up_write(&card->controls_rwsem);
@@ -358,8 +366,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
        card->controls_count += kcontrol->count;
        kcontrol->id.numid = card->last_numid + 1;
        card->last_numid += kcontrol->count;
+       count = kcontrol->count;
        up_write(&card->controls_rwsem);
-       for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
+       for (idx = 0; idx < count; idx++, id.index++, id.numid++)
                snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
        return 0;
 
@@ -388,6 +397,7 @@ int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
                    bool add_on_replace)
 {
        struct snd_ctl_elem_id id;
+       unsigned int count;
        unsigned int idx;
        struct snd_kcontrol *old;
        int ret;
@@ -423,8 +433,9 @@ add:
        card->controls_count += kcontrol->count;
        kcontrol->id.numid = card->last_numid + 1;
        card->last_numid += kcontrol->count;
+       count = kcontrol->count;
        up_write(&card->controls_rwsem);
-       for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
+       for (idx = 0; idx < count; idx++, id.index++, id.numid++)
                snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
        return 0;
 
@@ -897,9 +908,9 @@ static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
                        result = kctl->put(kctl, control);
                }
                if (result > 0) {
+                       struct snd_ctl_elem_id id = control->id;
                        up_read(&card->controls_rwsem);
-                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
-                                      &control->id);
+                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
                        return 0;
                }
        }
@@ -991,6 +1002,7 @@ static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
 
 struct user_element {
        struct snd_ctl_elem_info info;
+       struct snd_card *card;
        void *elem_data;                /* element data */
        unsigned long elem_data_size;   /* size of element data in bytes */
        void *tlv_data;                 /* TLV data */
@@ -1034,7 +1046,9 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
 {
        struct user_element *ue = kcontrol->private_data;
 
+       mutex_lock(&ue->card->user_ctl_lock);
        memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
+       mutex_unlock(&ue->card->user_ctl_lock);
        return 0;
 }
 
@@ -1043,10 +1057,12 @@ static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
 {
        int change;
        struct user_element *ue = kcontrol->private_data;
-       
+
+       mutex_lock(&ue->card->user_ctl_lock);
        change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
        if (change)
                memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
+       mutex_unlock(&ue->card->user_ctl_lock);
        return change;
 }
 
@@ -1066,19 +1082,32 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
                new_data = memdup_user(tlv, size);
                if (IS_ERR(new_data))
                        return PTR_ERR(new_data);
+               mutex_lock(&ue->card->user_ctl_lock);
                change = ue->tlv_data_size != size;
                if (!change)
                        change = memcmp(ue->tlv_data, new_data, size);
                kfree(ue->tlv_data);
                ue->tlv_data = new_data;
                ue->tlv_data_size = size;
+               mutex_unlock(&ue->card->user_ctl_lock);
        } else {
-               if (! ue->tlv_data_size || ! ue->tlv_data)
-                       return -ENXIO;
-               if (size < ue->tlv_data_size)
-                       return -ENOSPC;
+               int ret = 0;
+
+               mutex_lock(&ue->card->user_ctl_lock);
+               if (!ue->tlv_data_size || !ue->tlv_data) {
+                       ret = -ENXIO;
+                       goto err_unlock;
+               }
+               if (size < ue->tlv_data_size) {
+                       ret = -ENOSPC;
+                       goto err_unlock;
+               }
                if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
-                       return -EFAULT;
+                       ret = -EFAULT;
+err_unlock:
+               mutex_unlock(&ue->card->user_ctl_lock);
+               if (ret)
+                       return ret;
        }
        return change;
 }
@@ -1136,8 +1165,6 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
        struct user_element *ue;
        int idx, err;
 
-       if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)
-               return -ENOMEM;
        if (info->count < 1)
                return -EINVAL;
        access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
@@ -1146,21 +1173,16 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
                                 SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
        info->id.numid = 0;
        memset(&kctl, 0, sizeof(kctl));
-       down_write(&card->controls_rwsem);
-       _kctl = snd_ctl_find_id(card, &info->id);
-       err = 0;
-       if (_kctl) {
-               if (replace)
-                       err = snd_ctl_remove(card, _kctl);
-               else
-                       err = -EBUSY;
-       } else {
-               if (replace)
-                       err = -ENOENT;
+
+       if (replace) {
+               err = snd_ctl_remove_user_ctl(file, &info->id);
+               if (err)
+                       return err;
        }
-       up_write(&card->controls_rwsem);
-       if (err < 0)
-               return err;
+
+       if (card->user_ctl_count >= MAX_USER_CONTROLS)
+               return -ENOMEM;
+
        memcpy(&kctl.id, &info->id, sizeof(info->id));
        kctl.count = info->owner ? info->owner : 1;
        access |= SNDRV_CTL_ELEM_ACCESS_USER;
@@ -1210,6 +1232,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
        ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
        if (ue == NULL)
                return -ENOMEM;
+       ue->card = card;
        ue->info = *info;
        ue->info.access = 0;
        ue->elem_data = (char *)ue + sizeof(*ue);
@@ -1321,8 +1344,9 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
                }
                err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
                if (err > 0) {
+                       struct snd_ctl_elem_id id = kctl->id;
                        up_read(&card->controls_rwsem);
-                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id);
+                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
                        return 0;
                }
        } else {
index 5ee8384..7bdfd19 100644 (file)
@@ -232,6 +232,7 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
        INIT_LIST_HEAD(&card->devices);
        init_rwsem(&card->controls_rwsem);
        rwlock_init(&card->ctl_files_rwlock);
+       mutex_init(&card->user_ctl_lock);
        INIT_LIST_HEAD(&card->controls);
        INIT_LIST_HEAD(&card->ctl_files);
        spin_lock_init(&card->files_lock);
index 6af50eb..70faa3a 100644 (file)
@@ -379,11 +379,11 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
        struct special_params *params = bebob->maudio_special_quirk;
        int err, id;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
        if (id >= ARRAY_SIZE(special_clk_labels))
-               return 0;
+               return -EINVAL;
+
+       mutex_lock(&bebob->mutex);
 
        err = avc_maudio_set_special_clk(bebob, id,
                                         params->dig_in_fmt,
@@ -391,7 +391,10 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
                                         params->clk_lock);
        mutex_unlock(&bebob->mutex);
 
-       return err >= 0;
+       if (err >= 0)
+               err = 1;
+
+       return err;
 }
 static struct snd_kcontrol_new special_clk_ctl = {
        .name   = "Clock Source",
@@ -434,8 +437,8 @@ static struct snd_kcontrol_new special_sync_ctl = {
        .get    = special_sync_ctl_get,
 };
 
-/* Digital interface control for special firmware */
-static char *const special_dig_iface_labels[] = {
+/* Digital input interface control for special firmware */
+static char *const special_dig_in_iface_labels[] = {
        "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical"
 };
 static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
@@ -443,13 +446,13 @@ static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
 {
        einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
        einf->count = 1;
-       einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels);
+       einf->value.enumerated.items = ARRAY_SIZE(special_dig_in_iface_labels);
 
        if (einf->value.enumerated.item >= einf->value.enumerated.items)
                einf->value.enumerated.item = einf->value.enumerated.items - 1;
 
        strcpy(einf->value.enumerated.name,
-              special_dig_iface_labels[einf->value.enumerated.item]);
+              special_dig_in_iface_labels[einf->value.enumerated.item]);
 
        return 0;
 }
@@ -491,26 +494,36 @@ static int special_dig_in_iface_ctl_set(struct snd_kcontrol *kctl,
        unsigned int id, dig_in_fmt, dig_in_iface;
        int err;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
+       if (id >= ARRAY_SIZE(special_dig_in_iface_labels))
+               return -EINVAL;
 
        /* decode user value */
        dig_in_fmt = (id >> 1) & 0x01;
        dig_in_iface = id & 0x01;
 
+       mutex_lock(&bebob->mutex);
+
        err = avc_maudio_set_special_clk(bebob,
                                         params->clk_src,
                                         dig_in_fmt,
                                         params->dig_out_fmt,
                                         params->clk_lock);
-       if ((err < 0) || (params->dig_in_fmt > 0)) /* ADAT */
+       if (err < 0)
+               goto end;
+
+       /* For ADAT, optical interface is only available. */
+       if (params->dig_in_fmt > 0) {
+               err = 1;
                goto end;
+       }
 
+       /* For S/PDIF, optical/coaxial interfaces are selectable. */
        err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface);
        if (err < 0)
                dev_err(&bebob->unit->device,
                        "fail to set digital input interface: %d\n", err);
+       err = 1;
 end:
        special_stream_formation_set(bebob);
        mutex_unlock(&bebob->mutex);
@@ -525,18 +538,22 @@ static struct snd_kcontrol_new special_dig_in_iface_ctl = {
        .put    = special_dig_in_iface_ctl_set
 };
 
+/* Digital output interface control for special firmware */
+static char *const special_dig_out_iface_labels[] = {
+       "S/PDIF Optical and Coaxial", "ADAT Optical"
+};
 static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl,
                                          struct snd_ctl_elem_info *einf)
 {
        einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
        einf->count = 1;
-       einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels) - 1;
+       einf->value.enumerated.items = ARRAY_SIZE(special_dig_out_iface_labels);
 
        if (einf->value.enumerated.item >= einf->value.enumerated.items)
                einf->value.enumerated.item = einf->value.enumerated.items - 1;
 
        strcpy(einf->value.enumerated.name,
-              special_dig_iface_labels[einf->value.enumerated.item + 1]);
+              special_dig_out_iface_labels[einf->value.enumerated.item]);
 
        return 0;
 }
@@ -558,16 +575,20 @@ static int special_dig_out_iface_ctl_set(struct snd_kcontrol *kctl,
        unsigned int id;
        int err;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
+       if (id >= ARRAY_SIZE(special_dig_out_iface_labels))
+               return -EINVAL;
+
+       mutex_lock(&bebob->mutex);
 
        err = avc_maudio_set_special_clk(bebob,
                                         params->clk_src,
                                         params->dig_in_fmt,
                                         id, params->clk_lock);
-       if (err >= 0)
+       if (err >= 0) {
                special_stream_formation_set(bebob);
+               err = 1;
+       }
 
        mutex_unlock(&bebob->mutex);
        return err;
index b684c6e..dabe419 100644 (file)
@@ -898,6 +898,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
                        if (!strcmp(codec->modelname, models->name)) {
                                codec->fixup_id = models->id;
                                codec->fixup_name = models->name;
+                               codec->fixup_list = fixlist;
                                codec->fixup_forced = 1;
                                return;
                        }
index 480bbdd..6df04d9 100644 (file)
@@ -193,7 +193,8 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
                                dsp_unlock(azx_dev);
                                return azx_dev;
                        }
-                       if (!res)
+                       if (!res ||
+                           (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
                                res = azx_dev;
                }
                dsp_unlock(azx_dev);
index 9d07e4e..8b4940b 100644 (file)
 #include <linux/module.h>
 #include <sound/core.h>
 #include <drm/i915_powerwell.h>
+#include "hda_priv.h"
 #include "hda_i915.h"
 
-static void (*get_power)(void);
-static void (*put_power)(void);
+/* Intel HSW/BDW display HDA controller Extended Mode registers.
+ * EM4 (M value) and EM5 (N Value) are used to convert CDClk (Core Display
+ * Clock) to 24MHz BCLK: BCLK = CDCLK * M / N
+ * The values will be lost when the display power well is disabled.
+ */
+#define ICH6_REG_EM4                   0x100c
+#define ICH6_REG_EM5                   0x1010
+
+static int (*get_power)(void);
+static int (*put_power)(void);
+static int (*get_cdclk)(void);
 
-void hda_display_power(bool enable)
+int hda_display_power(bool enable)
 {
        if (!get_power || !put_power)
-               return;
+               return -ENODEV;
 
        pr_debug("HDA display power %s \n",
                        enable ? "Enable" : "Disable");
        if (enable)
-               get_power();
+               return get_power();
        else
-               put_power();
+               return put_power();
+}
+
+void haswell_set_bclk(struct azx *chip)
+{
+       int cdclk_freq;
+       unsigned int bclk_m, bclk_n;
+
+       if (!get_cdclk)
+               return;
+
+       cdclk_freq = get_cdclk();
+       switch (cdclk_freq) {
+       case 337500:
+               bclk_m = 16;
+               bclk_n = 225;
+               break;
+
+       case 450000:
+       default: /* default CDCLK 450MHz */
+               bclk_m = 4;
+               bclk_n = 75;
+               break;
+
+       case 540000:
+               bclk_m = 4;
+               bclk_n = 90;
+               break;
+
+       case 675000:
+               bclk_m = 8;
+               bclk_n = 225;
+               break;
+       }
+
+       azx_writew(chip, EM4, bclk_m);
+       azx_writew(chip, EM5, bclk_n);
 }
 
+
 int hda_i915_init(void)
 {
        int err = 0;
@@ -55,6 +102,10 @@ int hda_i915_init(void)
                return -ENODEV;
        }
 
+       get_cdclk = symbol_request(i915_get_cdclk_freq);
+       if (!get_cdclk) /* may have abnormal BCLK and audio playback rate */
+               pr_warn("hda-i915: get_cdclk symbol get fail\n");
+
        pr_debug("HDA driver get symbol successfully from i915 module\n");
 
        return err;
@@ -70,6 +121,10 @@ int hda_i915_exit(void)
                symbol_put(i915_release_power_well);
                put_power = NULL;
        }
+       if (get_cdclk) {
+               symbol_put(i915_get_cdclk_freq);
+               get_cdclk = NULL;
+       }
 
        return 0;
 }
index 5a63da2..e6072c6 100644 (file)
 #define __SOUND_HDA_I915_H
 
 #ifdef CONFIG_SND_HDA_I915
-void hda_display_power(bool enable);
+int hda_display_power(bool enable);
+void haswell_set_bclk(struct azx *chip);
 int hda_i915_init(void);
 int hda_i915_exit(void);
 #else
-static inline void hda_display_power(bool enable) {}
+static inline int hda_display_power(bool enable) { return 0; }
+static inline void haswell_set_bclk(struct azx *chip) { return; }
 static inline int hda_i915_init(void)
 {
        return -ENODEV;
index bb65a12..83cd190 100644 (file)
@@ -62,9 +62,9 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/firmware.h>
 #include "hda_codec.h"
-#include "hda_i915.h"
 #include "hda_controller.h"
 #include "hda_priv.h"
+#include "hda_i915.h"
 
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
@@ -227,7 +227,7 @@ enum {
 /* quirks for Intel PCH */
 #define AZX_DCAPS_INTEL_PCH_NOPM \
        (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
-        AZX_DCAPS_COUNT_LPIB_DELAY)
+        AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_REVERSE_ASSIGN)
 
 #define AZX_DCAPS_INTEL_PCH \
        (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
@@ -288,6 +288,11 @@ static char *driver_short_names[] = {
        [AZX_DRIVER_GENERIC] = "HD-Audio Generic",
 };
 
+struct hda_intel {
+       struct azx chip;
+};
+
+
 #ifdef CONFIG_X86
 static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
 {
@@ -591,7 +596,7 @@ static int azx_suspend(struct device *dev)
        struct azx *chip = card->private_data;
        struct azx_pcm *p;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
        snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -606,6 +611,7 @@ static int azx_suspend(struct device *dev)
                free_irq(chip->irq, chip);
                chip->irq = -1;
        }
+
        if (chip->msi)
                pci_disable_msi(chip->pci);
        pci_disable_device(pci);
@@ -622,11 +628,13 @@ static int azx_resume(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip = card->private_data;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
-       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
                hda_display_power(true);
+               haswell_set_bclk(chip);
+       }
        pci_set_power_state(pci, PCI_D0);
        pci_restore_state(pci);
        if (pci_enable_device(pci) < 0) {
@@ -657,7 +665,7 @@ static int azx_runtime_suspend(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip = card->private_data;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
        if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
@@ -672,6 +680,7 @@ static int azx_runtime_suspend(struct device *dev)
        azx_clear_irq_pending(chip);
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
                hda_display_power(false);
+
        return 0;
 }
 
@@ -683,14 +692,16 @@ static int azx_runtime_resume(struct device *dev)
        struct hda_codec *codec;
        int status;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
        if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
                return 0;
 
-       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
                hda_display_power(true);
+               haswell_set_bclk(chip);
+       }
 
        /* Read STATESTS before controller reset */
        status = azx_readw(chip, STATESTS);
@@ -718,7 +729,7 @@ static int azx_runtime_idle(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip = card->private_data;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
        if (!power_save_controller ||
@@ -883,6 +894,8 @@ static int register_vga_switcheroo(struct azx *chip)
 static int azx_free(struct azx *chip)
 {
        struct pci_dev *pci = chip->pci;
+       struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+
        int i;
 
        if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
@@ -930,7 +943,7 @@ static int azx_free(struct azx *chip)
                hda_display_power(false);
                hda_i915_exit();
        }
-       kfree(chip);
+       kfree(hda);
 
        return 0;
 }
@@ -1174,6 +1187,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
        static struct snd_device_ops ops = {
                .dev_free = azx_dev_free,
        };
+       struct hda_intel *hda;
        struct azx *chip;
        int err;
 
@@ -1183,13 +1197,14 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
        if (err < 0)
                return err;
 
-       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
-       if (!chip) {
-               dev_err(card->dev, "Cannot allocate chip\n");
+       hda = kzalloc(sizeof(*hda), GFP_KERNEL);
+       if (!hda) {
+               dev_err(card->dev, "Cannot allocate hda\n");
                pci_disable_device(pci);
                return -ENOMEM;
        }
 
+       chip = &hda->chip;
        spin_lock_init(&chip->reg_lock);
        mutex_init(&chip->open_mutex);
        chip->card = card;
@@ -1375,6 +1390,10 @@ static int azx_first_init(struct azx *chip)
 
        /* initialize chip */
        azx_init_pci(chip);
+
+       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+               haswell_set_bclk(chip);
+
        azx_init_chip(chip, (probe_only[dev] & 2) == 0);
 
        /* codec detection */
@@ -1656,8 +1675,13 @@ static int azx_probe_continue(struct azx *chip)
                                "Error request power-well from i915\n");
                        goto out_free;
                }
+               err = hda_display_power(true);
+               if (err < 0) {
+                       dev_err(chip->card->dev,
+                               "Cannot turn on display power on i915\n");
+                       goto out_free;
+               }
 #endif
-               hda_display_power(true);
        }
 
        err = azx_first_init(chip);
index ebd1fa6..4e2d486 100644 (file)
@@ -417,6 +417,27 @@ struct snd_hda_pin_quirk {
        int value;                      /* quirk value */
 };
 
+#ifdef CONFIG_SND_DEBUG_VERBOSE
+
+#define SND_HDA_PIN_QUIRK(_codec, _subvendor, _name, _value, _pins...) \
+       { .codec = _codec,\
+         .subvendor = _subvendor,\
+         .name = _name,\
+         .value = _value,\
+         .pins = (const struct hda_pintbl[]) { _pins } \
+       }
+#else
+
+#define SND_HDA_PIN_QUIRK(_codec, _subvendor, _name, _value, _pins...) \
+       { .codec = _codec,\
+         .subvendor = _subvendor,\
+         .value = _value,\
+         .pins = (const struct hda_pintbl[]) { _pins } \
+       }
+
+#endif
+
+
 /* fixup types */
 enum {
        HDA_FIXUP_INVALID,
index 4a7cb01..e9d1a57 100644 (file)
@@ -186,6 +186,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
 #define AZX_DCAPS_BUFSIZE      (1 << 21)       /* no buffer size alignment */
 #define AZX_DCAPS_ALIGN_BUFSIZE        (1 << 22)       /* buffer size alignment */
 #define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23)   /* BDLE in 4k boundary */
+#define AZX_DCAPS_REVERSE_ASSIGN (1 << 24)     /* Assign devices in reverse order */
 #define AZX_DCAPS_COUNT_LPIB_DELAY  (1 << 25)  /* Take LPIB as delay */
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
 #define AZX_DCAPS_I915_POWERWELL (1 << 27)     /* HSW i915 powerwell support */
index a366ba9..358414d 100644 (file)
@@ -236,6 +236,7 @@ disable_hda:
        return rc;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static void hda_tegra_disable_clocks(struct hda_tegra *data)
 {
        clk_disable_unprepare(data->hda2hdmi_clk);
@@ -243,7 +244,6 @@ static void hda_tegra_disable_clocks(struct hda_tegra *data)
        clk_disable_unprepare(data->hda_clk);
 }
 
-#ifdef CONFIG_PM_SLEEP
 /*
  * power management
  */
index 3e4417b..ba4ca52 100644 (file)
@@ -2204,7 +2204,7 @@ static int generic_hdmi_resume(struct hda_codec *codec)
        struct hdmi_spec *spec = codec->spec;
        int pin_idx;
 
-       generic_hdmi_init(codec);
+       codec->patch_ops.init(codec);
        snd_hda_codec_resume_amp(codec);
        snd_hda_codec_resume_cache(codec);
 
@@ -3337,6 +3337,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0051, .name = "GPU 51 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0060, .name = "GPU 60 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0067, .name = "MCP67 HDMI",      .patch = patch_nvhdmi_2ch },
+{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0071, .name = "GPU 71 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de8001, .name = "MCP73 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x11069f80, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
@@ -3394,6 +3395,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0044");
 MODULE_ALIAS("snd-hda-codec-id:10de0051");
 MODULE_ALIAS("snd-hda-codec-id:10de0060");
 MODULE_ALIAS("snd-hda-codec-id:10de0067");
+MODULE_ALIAS("snd-hda-codec-id:10de0070");
 MODULE_ALIAS("snd-hda-codec-id:10de0071");
 MODULE_ALIAS("snd-hda-codec-id:10de8001");
 MODULE_ALIAS("snd-hda-codec-id:11069f80");
index af76995..b60824e 100644 (file)
@@ -4880,6 +4880,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -4962,228 +4963,141 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
 };
 
 static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
-       {
-               .codec = 0x10ec0255,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60140},
-                       {0x14, 0x90170110},
-                       {0x17, 0x40000000},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x02211020},
-               },
-               .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0255,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60160},
-                       {0x14, 0x90170120},
-                       {0x17, 0x40000000},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x02211030},
-               },
-               .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0255,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60160},
-                       {0x14, 0x90170120},
-                       {0x17, 0x90170140},
-                       {0x18, 0x40000000},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x41163b05},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x0321102f},
-               },
-               .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0255,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60160},
-                       {0x14, 0x90170130},
-                       {0x17, 0x40000000},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x02211040},
-               },
-               .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0255,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60160},
-                       {0x14, 0x90170140},
-                       {0x17, 0x40000000},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x02211050},
-               },
-               .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0255,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60170},
-                       {0x14, 0x90170120},
-                       {0x17, 0x40000000},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x02211030},
-               },
-               .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0255,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60170},
-                       {0x14, 0x90170130},
-                       {0x17, 0x40000000},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x02211040},
-               },
-               .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0283,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60130},
-                       {0x14, 0x90170110},
-                       {0x17, 0x40020008},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40e00001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x0321101f},
-               },
-               .value = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0283,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60160},
-                       {0x14, 0x90170120},
-                       {0x17, 0x40000000},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x02211030},
-               },
-               .value = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0292,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60140},
-                       {0x13, 0x411111f0},
-                       {0x14, 0x90170110},
-                       {0x15, 0x0221401f},
-                       {0x16, 0x411111f0},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-               },
-               .value = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0293,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x40000000},
-                       {0x13, 0x90a60140},
-                       {0x14, 0x90170110},
-                       {0x15, 0x0221401f},
-                       {0x16, 0x21014020},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x21a19030},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-               },
-               .value = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60140},
+               {0x14, 0x90170110},
+               {0x17, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x02211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60160},
+               {0x14, 0x90170120},
+               {0x17, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60160},
+               {0x14, 0x90170120},
+               {0x17, 0x90170140},
+               {0x18, 0x40000000},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x41163b05},
+               {0x1e, 0x411111f0},
+               {0x21, 0x0321102f}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60160},
+               {0x14, 0x90170130},
+               {0x17, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x02211040}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60160},
+               {0x14, 0x90170140},
+               {0x17, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x02211050}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60170},
+               {0x14, 0x90170120},
+               {0x17, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60170},
+               {0x14, 0x90170130},
+               {0x17, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x02211040}),
+       SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x17, 0x40020008},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40e00001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x0321101f}),
+       SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60160},
+               {0x14, 0x90170120},
+               {0x17, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
+               {0x12, 0x90a60140},
+               {0x13, 0x411111f0},
+               {0x14, 0x90170110},
+               {0x15, 0x0221401f},
+               {0x16, 0x411111f0},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x40000000},
+               {0x13, 0x90a60140},
+               {0x14, 0x90170110},
+               {0x15, 0x0221401f},
+               {0x16, 0x21014020},
+               {0x18, 0x411111f0},
+               {0x19, 0x21a19030},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x40000000},
+               {0x13, 0x90a60140},
+               {0x14, 0x90170110},
+               {0x15, 0x0221401f},
+               {0x16, 0x411111f0},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0}),
        {}
 };
 
@@ -6039,90 +5953,66 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
 };
 
 static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
-       {
-               .codec = 0x10ec0668,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x99a30130},
-                       {0x14, 0x90170110},
-                       {0x15, 0x0321101f},
-                       {0x16, 0x03011020},
-                       {0x18, 0x40000008},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x41000001},
-                       {0x1e, 0x411111f0},
-                       {0x1f, 0x411111f0},
-               },
-               .value = ALC668_FIXUP_AUTO_MUTE,
-       },
-       {
-               .codec = 0x10ec0668,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x99a30140},
-                       {0x14, 0x90170110},
-                       {0x15, 0x0321101f},
-                       {0x16, 0x03011020},
-                       {0x18, 0x40000008},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x41000001},
-                       {0x1e, 0x411111f0},
-                       {0x1f, 0x411111f0},
-               },
-               .value = ALC668_FIXUP_AUTO_MUTE,
-       },
-       {
-               .codec = 0x10ec0668,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x99a30150},
-                       {0x14, 0x90170110},
-                       {0x15, 0x0321101f},
-                       {0x16, 0x03011020},
-                       {0x18, 0x40000008},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x41000001},
-                       {0x1e, 0x411111f0},
-                       {0x1f, 0x411111f0},
-               },
-               .value = ALC668_FIXUP_AUTO_MUTE,
-       },
-       {
-               .codec = 0x10ec0668,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x411111f0},
-                       {0x14, 0x90170110},
-                       {0x15, 0x0321101f},
-                       {0x16, 0x03011020},
-                       {0x18, 0x40000008},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x41000001},
-                       {0x1e, 0x411111f0},
-                       {0x1f, 0x411111f0},
-               },
-               .value = ALC668_FIXUP_AUTO_MUTE,
-       },
+       SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
+               {0x12, 0x99a30130},
+               {0x14, 0x90170110},
+               {0x15, 0x0321101f},
+               {0x16, 0x03011020},
+               {0x18, 0x40000008},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x41000001},
+               {0x1e, 0x411111f0},
+               {0x1f, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
+               {0x12, 0x99a30140},
+               {0x14, 0x90170110},
+               {0x15, 0x0321101f},
+               {0x16, 0x03011020},
+               {0x18, 0x40000008},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x41000001},
+               {0x1e, 0x411111f0},
+               {0x1f, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
+               {0x12, 0x99a30150},
+               {0x14, 0x90170110},
+               {0x15, 0x0321101f},
+               {0x16, 0x03011020},
+               {0x18, 0x40000008},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x41000001},
+               {0x1e, 0x411111f0},
+               {0x1f, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
+               {0x12, 0x411111f0},
+               {0x14, 0x90170110},
+               {0x15, 0x0321101f},
+               {0x16, 0x03011020},
+               {0x18, 0x40000008},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x41000001},
+               {0x1e, 0x411111f0},
+               {0x1f, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell XPS 15", ALC668_FIXUP_AUTO_MUTE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x15, 0x0321101f},
+               {0x16, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40d6832d},
+               {0x1e, 0x411111f0},
+               {0x1f, 0x411111f0}),
        {}
 };
 
index 7f40a15..3744ea4 100644 (file)
@@ -121,6 +121,12 @@ enum {
        STAC_92HD71BXX_MODELS
 };
 
+enum {
+       STAC_92HD95_HP_LED,
+       STAC_92HD95_HP_BASS,
+       STAC_92HD95_MODELS
+};
+
 enum {
        STAC_925x_REF,
        STAC_M1,
@@ -4128,6 +4134,48 @@ static const struct snd_pci_quirk stac9205_fixup_tbl[] = {
        {} /* terminator */
 };
 
+static void stac92hd95_fixup_hp_led(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       struct sigmatel_spec *spec = codec->spec;
+
+       if (action != HDA_FIXUP_ACT_PRE_PROBE)
+               return;
+
+       if (find_mute_led_cfg(codec, spec->default_polarity))
+               codec_dbg(codec, "mute LED gpio %d polarity %d\n",
+                               spec->gpio_led,
+                               spec->gpio_led_polarity);
+}
+
+static const struct hda_fixup stac92hd95_fixups[] = {
+       [STAC_92HD95_HP_LED] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = stac92hd95_fixup_hp_led,
+       },
+       [STAC_92HD95_HP_BASS] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       {0x1a, 0x795, 0x00}, /* HPF to 100Hz */
+                       {}
+               },
+               .chained = true,
+               .chain_id = STAC_92HD95_HP_LED,
+       },
+};
+
+static const struct snd_pci_quirk stac92hd95_fixup_tbl[] = {
+       SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1911, "HP Spectre 13", STAC_92HD95_HP_BASS),
+       {} /* terminator */
+};
+
+static const struct hda_model_fixup stac92hd95_models[] = {
+       { .id = STAC_92HD95_HP_LED, .name = "hp-led" },
+       { .id = STAC_92HD95_HP_BASS, .name = "hp-bass" },
+       {}
+};
+
+
 static int stac_parse_auto_config(struct hda_codec *codec)
 {
        struct sigmatel_spec *spec = codec->spec;
@@ -4580,10 +4628,16 @@ static int patch_stac92hd95(struct hda_codec *codec)
        spec->gen.beep_nid = 0x19; /* digital beep */
        spec->pwr_nids = stac92hd95_pwr_nids;
        spec->num_pwrs = ARRAY_SIZE(stac92hd95_pwr_nids);
-       spec->default_polarity = -1; /* no default cfg */
+       spec->default_polarity = 0;
 
        codec->patch_ops = stac_patch_ops;
 
+       snd_hda_pick_fixup(codec, stac92hd95_models, stac92hd95_fixup_tbl,
+                          stac92hd95_fixups);
+       snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
+
+       stac_setup_gpio(codec);
+
        err = stac_parse_auto_config(codec);
        if (err < 0) {
                stac_free(codec);
@@ -4592,6 +4646,8 @@ static int patch_stac92hd95(struct hda_codec *codec)
 
        codec->proc_widget_hook = stac92hd_proc_hook;
 
+       snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
+
        return 0;
 }
 
index cbfa1e1..0b9571c 100644 (file)
@@ -225,11 +225,11 @@ config SND_SOC_ADAU1373
 config SND_SOC_ADAU1701
        tristate "Analog Devices ADAU1701 CODEC"
        depends on I2C
-       select SND_SOC_SIGMADSP
+       select SND_SOC_SIGMADSP_I2C
 
 config SND_SOC_ADAU17X1
        tristate
-       select SND_SOC_SIGMADSP
+       select SND_SOC_SIGMADSP_REGMAP
 
 config SND_SOC_ADAU1761
        tristate
@@ -476,6 +476,14 @@ config SND_SOC_SIGMADSP
        tristate
        select CRC32
 
+config SND_SOC_SIGMADSP_I2C
+       tristate
+       select SND_SOC_SIGMADSP
+
+config SND_SOC_SIGMADSP_REGMAP
+       tristate
+       select SND_SOC_SIGMADSP
+
 config SND_SOC_SIRF_AUDIO_CODEC
        tristate "SiRF SoC internal audio codec"
        select REGMAP_MMIO
index be3377b..1bd6e1c 100644 (file)
@@ -77,6 +77,8 @@ snd-soc-sgtl5000-objs := sgtl5000.o
 snd-soc-alc5623-objs := alc5623.o
 snd-soc-alc5632-objs := alc5632.o
 snd-soc-sigmadsp-objs := sigmadsp.o
+snd-soc-sigmadsp-i2c-objs := sigmadsp-i2c.o
+snd-soc-sigmadsp-regmap-objs := sigmadsp-regmap.o
 snd-soc-si476x-objs := si476x.o
 snd-soc-sirf-audio-codec-objs := sirf-audio-codec.o
 snd-soc-sn95031-objs := sn95031.o
@@ -240,6 +242,8 @@ obj-$(CONFIG_SND_SOC_RT5651)        += snd-soc-rt5651.o
 obj-$(CONFIG_SND_SOC_RT5677)   += snd-soc-rt5677.o
 obj-$(CONFIG_SND_SOC_SGTL5000)  += snd-soc-sgtl5000.o
 obj-$(CONFIG_SND_SOC_SIGMADSP) += snd-soc-sigmadsp.o
+obj-$(CONFIG_SND_SOC_SIGMADSP_I2C)     += snd-soc-sigmadsp-i2c.o
+obj-$(CONFIG_SND_SOC_SIGMADSP_REGMAP)  += snd-soc-sigmadsp-regmap.o
 obj-$(CONFIG_SND_SOC_SI476X)   += snd-soc-si476x.o
 obj-$(CONFIG_SND_SOC_SN95031)  +=snd-soc-sn95031.o
 obj-$(CONFIG_SND_SOC_SPDIF)    += snd-soc-spdif-rx.o snd-soc-spdif-tx.o
diff --git a/sound/soc/codecs/sigmadsp-i2c.c b/sound/soc/codecs/sigmadsp-i2c.c
new file mode 100644 (file)
index 0000000..246081a
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Load Analog Devices SigmaStudio firmware files
+ *
+ * Copyright 2009-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/i2c.h>
+#include <linux/export.h>
+#include <linux/module.h>
+
+#include "sigmadsp.h"
+
+static int sigma_action_write_i2c(void *control_data,
+       const struct sigma_action *sa, size_t len)
+{
+       return i2c_master_send(control_data, (const unsigned char *)&sa->addr,
+               len);
+}
+
+int process_sigma_firmware(struct i2c_client *client, const char *name)
+{
+       struct sigma_firmware ssfw;
+
+       ssfw.control_data = client;
+       ssfw.write = sigma_action_write_i2c;
+
+       return _process_sigma_firmware(&client->dev, &ssfw, name);
+}
+EXPORT_SYMBOL(process_sigma_firmware);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("SigmaDSP I2C firmware loader");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/sigmadsp-regmap.c b/sound/soc/codecs/sigmadsp-regmap.c
new file mode 100644 (file)
index 0000000..f78ed8d
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Load Analog Devices SigmaStudio firmware files
+ *
+ * Copyright 2009-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/regmap.h>
+#include <linux/export.h>
+#include <linux/module.h>
+
+#include "sigmadsp.h"
+
+static int sigma_action_write_regmap(void *control_data,
+       const struct sigma_action *sa, size_t len)
+{
+       return regmap_raw_write(control_data, be16_to_cpu(sa->addr),
+               sa->payload, len - 2);
+}
+
+int process_sigma_firmware_regmap(struct device *dev, struct regmap *regmap,
+       const char *name)
+{
+       struct sigma_firmware ssfw;
+
+       ssfw.control_data = regmap;
+       ssfw.write = sigma_action_write_regmap;
+
+       return _process_sigma_firmware(dev, &ssfw, name);
+}
+EXPORT_SYMBOL(process_sigma_firmware_regmap);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("SigmaDSP regmap firmware loader");
+MODULE_LICENSE("GPL");
index 4068f24..f2de7e0 100644 (file)
@@ -34,23 +34,6 @@ enum {
        SIGMA_ACTION_END,
 };
 
-struct sigma_action {
-       u8 instr;
-       u8 len_hi;
-       __le16 len;
-       __be16 addr;
-       unsigned char payload[];
-} __packed;
-
-struct sigma_firmware {
-       const struct firmware *fw;
-       size_t pos;
-
-       void *control_data;
-       int (*write)(void *control_data, const struct sigma_action *sa,
-                       size_t len);
-};
-
 static inline u32 sigma_action_len(struct sigma_action *sa)
 {
        return (sa->len_hi << 16) | le16_to_cpu(sa->len);
@@ -138,7 +121,7 @@ process_sigma_actions(struct sigma_firmware *ssfw)
        return 0;
 }
 
-static int _process_sigma_firmware(struct device *dev,
+int _process_sigma_firmware(struct device *dev,
        struct sigma_firmware *ssfw, const char *name)
 {
        int ret;
@@ -197,50 +180,6 @@ static int _process_sigma_firmware(struct device *dev,
 
        return ret;
 }
-
-#if IS_ENABLED(CONFIG_I2C)
-
-static int sigma_action_write_i2c(void *control_data,
-       const struct sigma_action *sa, size_t len)
-{
-       return i2c_master_send(control_data, (const unsigned char *)&sa->addr,
-               len);
-}
-
-int process_sigma_firmware(struct i2c_client *client, const char *name)
-{
-       struct sigma_firmware ssfw;
-
-       ssfw.control_data = client;
-       ssfw.write = sigma_action_write_i2c;
-
-       return _process_sigma_firmware(&client->dev, &ssfw, name);
-}
-EXPORT_SYMBOL(process_sigma_firmware);
-
-#endif
-
-#if IS_ENABLED(CONFIG_REGMAP)
-
-static int sigma_action_write_regmap(void *control_data,
-       const struct sigma_action *sa, size_t len)
-{
-       return regmap_raw_write(control_data, be16_to_cpu(sa->addr),
-               sa->payload, len - 2);
-}
-
-int process_sigma_firmware_regmap(struct device *dev, struct regmap *regmap,
-       const char *name)
-{
-       struct sigma_firmware ssfw;
-
-       ssfw.control_data = regmap;
-       ssfw.write = sigma_action_write_regmap;
-
-       return _process_sigma_firmware(dev, &ssfw, name);
-}
-EXPORT_SYMBOL(process_sigma_firmware_regmap);
-
-#endif
+EXPORT_SYMBOL_GPL(_process_sigma_firmware);
 
 MODULE_LICENSE("GPL");
index e439cbd..c47cd23 100644 (file)
 #include <linux/device.h>
 #include <linux/regmap.h>
 
+struct sigma_action {
+       u8 instr;
+       u8 len_hi;
+       __le16 len;
+       __be16 addr;
+       unsigned char payload[];
+} __packed;
+
+struct sigma_firmware {
+       const struct firmware *fw;
+       size_t pos;
+
+       void *control_data;
+       int (*write)(void *control_data, const struct sigma_action *sa,
+                       size_t len);
+};
+
+int _process_sigma_firmware(struct device *dev,
+       struct sigma_firmware *ssfw, const char *name);
+
 struct i2c_client;
 
 extern int process_sigma_firmware(struct i2c_client *client, const char *name);
index 6bb0ea5..a609aaf 100644 (file)
@@ -923,8 +923,8 @@ static int fsl_soc_dma_probe(struct platform_device *pdev)
        dma->dai.pcm_free = fsl_dma_free_dma_buffers;
 
        /* Store the SSI-specific information that we need */
-       dma->ssi_stx_phys = res.start + offsetof(struct ccsr_ssi, stx0);
-       dma->ssi_srx_phys = res.start + offsetof(struct ccsr_ssi, srx0);
+       dma->ssi_stx_phys = res.start + CCSR_SSI_STX0;
+       dma->ssi_srx_phys = res.start + CCSR_SSI_SRX0;
 
        iprop = of_get_property(ssi_np, "fsl,fifo-depth", NULL);
        if (iprop)
index b912d45..d7a6061 100644 (file)
@@ -762,7 +762,7 @@ static int fsl_spdif_vbit_get(struct snd_kcontrol *kcontrol,
        struct regmap *regmap = spdif_priv->regmap;
        u32 val;
 
-       val = regmap_read(regmap, REG_SPDIF_SIS, &val);
+       regmap_read(regmap, REG_SPDIF_SIS, &val);
        ucontrol->value.integer.value[0] = (val & INT_VAL_NOGOOD) != 0;
        regmap_write(regmap, REG_SPDIF_SIC, INT_VAL_NOGOOD);
 
@@ -1076,7 +1076,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
                                goto out;
                        } else if (arate / rate[index] == 1) {
                                /* A little bigger than expect */
-                               sub = (arate - rate[index]) * 100000;
+                               sub = (u64)(arate - rate[index]) * 100000;
                                do_div(sub, rate[index]);
                                if (sub >= savesub)
                                        continue;
@@ -1086,7 +1086,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
                                spdif_priv->txrate[index] = arate;
                        } else if (rate[index] / arate == 1) {
                                /* A little smaller than expect */
-                               sub = (rate[index] - arate) * 100000;
+                               sub = (u64)(rate[index] - arate) * 100000;
                                do_div(sub, rate[index]);
                                if (sub >= savesub)
                                        continue;
index 0849b7b..0db94f4 100644 (file)
@@ -59,7 +59,6 @@ int imx_pcm_dma_init(struct platform_device *pdev)
 {
        return devm_snd_dmaengine_pcm_register(&pdev->dev,
                &imx_dmaengine_pcm_config,
-               SND_DMAENGINE_PCM_FLAG_NO_RESIDUE |
                SND_DMAENGINE_PCM_FLAG_COMPAT);
 }
 EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
index 6acb225..2434b6d 100644 (file)
@@ -11,6 +11,7 @@ config SND_PXA2XX_SOC
 config SND_MMP_SOC
        bool "Soc Audio for Marvell MMP chips"
        depends on ARCH_MMP
+       select MMP_SRAM
        select SND_SOC_GENERIC_DMAENGINE_PCM
        select SND_ARM
        help
@@ -40,7 +41,7 @@ config SND_MMP_SOC_SSPA
 
 config SND_PXA2XX_SOC_CORGI
        tristate "SoC Audio support for Sharp Zaurus SL-C7x0"
-       depends on SND_PXA2XX_SOC && PXA_SHARP_C7xx
+       depends on SND_PXA2XX_SOC && PXA_SHARP_C7xx && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_SOC_WM8731
        help
@@ -49,7 +50,7 @@ config SND_PXA2XX_SOC_CORGI
 
 config SND_PXA2XX_SOC_SPITZ
        tristate "SoC Audio support for Sharp Zaurus SL-Cxx00"
-       depends on SND_PXA2XX_SOC && PXA_SHARP_Cxx00
+       depends on SND_PXA2XX_SOC && PXA_SHARP_Cxx00 && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_SOC_WM8750
        help
@@ -58,7 +59,7 @@ config SND_PXA2XX_SOC_SPITZ
 
 config SND_PXA2XX_SOC_Z2
        tristate "SoC Audio support for Zipit Z2"
-       depends on SND_PXA2XX_SOC && MACH_ZIPIT2
+       depends on SND_PXA2XX_SOC && MACH_ZIPIT2 && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_SOC_WM8750
        help
@@ -66,7 +67,7 @@ config SND_PXA2XX_SOC_Z2
 
 config SND_PXA2XX_SOC_POODLE
        tristate "SoC Audio support for Poodle"
-       depends on SND_PXA2XX_SOC && MACH_POODLE
+       depends on SND_PXA2XX_SOC && MACH_POODLE && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_SOC_WM8731
        help
@@ -181,7 +182,7 @@ config SND_PXA2XX_SOC_HX4700
 
 config SND_PXA2XX_SOC_MAGICIAN
        tristate "SoC Audio support for HTC Magician"
-       depends on SND_PXA2XX_SOC && MACH_MAGICIAN
+       depends on SND_PXA2XX_SOC && MACH_MAGICIAN && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_PXA_SOC_SSP
        select SND_SOC_UDA1380
index 9188015..4e86265 100644 (file)
@@ -315,7 +315,7 @@ static void rsnd_dma_of_name(struct rsnd_dma *dma,
                dst_mod = mod[index];
        } else {
                src_mod = mod[index];
-               dst_mod = mod[index + 1];
+               dst_mod = mod[index - 1];
        }
 
        index = 0;
index a74b9bf..cdc837e 100644 (file)
@@ -2755,7 +2755,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
        unsigned int mask = (1 << fls(max)) - 1;
        unsigned int invert = mc->invert;
        unsigned int val;
-       int connect, change;
+       int connect, change, reg_change = 0;
        struct snd_soc_dapm_update update;
        int ret = 0;
 
@@ -2773,20 +2773,23 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
        mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
 
        change = dapm_kcontrol_set_value(kcontrol, val);
-       if (change) {
-               if (reg != SND_SOC_NOPM) {
-                       mask = mask << shift;
-                       val = val << shift;
-
-                       if (snd_soc_test_bits(codec, reg, mask, val)) {
-                               update.kcontrol = kcontrol;
-                               update.reg = reg;
-                               update.mask = mask;
-                               update.val = val;
-                               card->update = &update;
-                       }
 
+       if (reg != SND_SOC_NOPM) {
+               mask = mask << shift;
+               val = val << shift;
+
+               reg_change = snd_soc_test_bits(codec, reg, mask, val);
+       }
+
+       if (change || reg_change) {
+               if (reg_change) {
+                       update.kcontrol = kcontrol;
+                       update.reg = reg;
+                       update.mask = mask;
+                       update.val = val;
+                       card->update = &update;
                }
+               change |= reg_change;
 
                ret = soc_dapm_mixer_update_power(card, kcontrol, connect);
 
index c3b5b7d..a09e5f3 100644 (file)
@@ -307,6 +307,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
 
 static int snd_usb_audio_free(struct snd_usb_audio *chip)
 {
+       struct list_head *p, *n;
+
+       list_for_each_safe(p, n, &chip->ep_list)
+               snd_usb_endpoint_free(p);
+
        mutex_destroy(&chip->mutex);
        kfree(chip);
        return 0;
@@ -585,7 +590,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
                                     struct snd_usb_audio *chip)
 {
        struct snd_card *card;
-       struct list_head *p, *n;
+       struct list_head *p;
 
        if (chip == (void *)-1L)
                return;
@@ -598,14 +603,16 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
        mutex_lock(&register_mutex);
        chip->num_interfaces--;
        if (chip->num_interfaces <= 0) {
+               struct snd_usb_endpoint *ep;
+
                snd_card_disconnect(card);
                /* release the pcm resources */
                list_for_each(p, &chip->pcm_list) {
                        snd_usb_stream_disconnect(p);
                }
                /* release the endpoint resources */
-               list_for_each_safe(p, n, &chip->ep_list) {
-                       snd_usb_endpoint_free(p);
+               list_for_each_entry(ep, &chip->ep_list, list) {
+                       snd_usb_endpoint_release(ep);
                }
                /* release the midi resources */
                list_for_each(p, &chip->midi_list) {
index 289f582..114e3e7 100644 (file)
@@ -986,20 +986,31 @@ void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
        wait_clear_urbs(ep);
 }
 
+/**
+ * snd_usb_endpoint_release: Tear down an snd_usb_endpoint
+ *
+ * @ep: the endpoint to release
+ *
+ * This function does not care for the endpoint's use count but will tear
+ * down all the streaming URBs immediately.
+ */
+void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
+{
+       release_urbs(ep, 1);
+}
+
 /**
  * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
  *
  * @ep: the list header of the endpoint to free
  *
- * This function does not care for the endpoint's use count but will tear
- * down all the streaming URBs immediately and free all resources.
+ * This free all resources of the given ep.
  */
 void snd_usb_endpoint_free(struct list_head *head)
 {
        struct snd_usb_endpoint *ep;
 
        ep = list_entry(head, struct snd_usb_endpoint, list);
-       release_urbs(ep, 1);
        kfree(ep);
 }
 
index 1c7e8ee..e61ee5c 100644 (file)
@@ -23,6 +23,7 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
 int  snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
+void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_free(struct list_head *head);
 
 int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
index c342f70..ee53a42 100644 (file)
@@ -35,7 +35,7 @@ static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
 
 static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_mutex_lock(&lock->mutex);
 }
 
@@ -47,7 +47,7 @@ static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lo
 
 static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0;
 }
 
index a680ab8..4ec03f8 100644 (file)
@@ -36,7 +36,7 @@ static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock,
 
 static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_rwlock_rdlock(&lock->rwlock);
 
 }
@@ -49,19 +49,19 @@ static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *
 
 static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_rwlock_wrlock(&lock->rwlock);
 }
 
 static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0;
 }
 
 static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0;
 }
 
index 23bd69c..6f80360 100644 (file)
@@ -92,7 +92,7 @@ enum { none, prepare, done, } __init_state;
 static void init_preload(void);
 static void try_init_preload(void)
 {
-       if (!__init_state != done)
+       if (__init_state != done)
                init_preload();
 }
 
@@ -252,7 +252,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
 
        try_init_preload();
 
-       lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL,
+       lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
                        (unsigned long)_RET_IP_);
        /*
         * Here's the thing with pthread mutexes: unlike the kernel variant,
@@ -281,7 +281,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
 
        try_init_preload();
 
-       lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_mutex_trylock(mutex);
        if (r)
                lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -303,7 +303,7 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex)
         */
        r = ll_pthread_mutex_unlock(mutex);
        if (r)
-               lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+               lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
 
        return r;
 }
@@ -352,7 +352,7 @@ int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
 
         init_preload();
 
-       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_rdlock(rwlock);
        if (r)
                lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -366,7 +366,7 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
 
         init_preload();
 
-       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_tryrdlock(rwlock);
        if (r)
                lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -380,7 +380,7 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
 
         init_preload();
 
-       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_trywrlock(rwlock);
        if (r)
                 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -394,7 +394,7 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
 
         init_preload();
 
-       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_wrlock(rwlock);
        if (r)
                lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -411,7 +411,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
        lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_unlock(rwlock);
        if (r)
-               lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+               lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
 
        return r;
 }
@@ -439,8 +439,6 @@ __attribute__((constructor)) static void init_preload(void)
        ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
 #endif
 
-       printf("%p\n", ll_pthread_mutex_trylock);fflush(stdout);
-
        lockdep_init();
 
        __init_state = done;
index b83184f..cf3a44b 100644 (file)
@@ -765,6 +765,9 @@ static void free_arg(struct print_arg *arg)
        case PRINT_BSTRING:
                free(arg->string.string);
                break;
+       case PRINT_BITMASK:
+               free(arg->bitmask.bitmask);
+               break;
        case PRINT_DYNAMIC_ARRAY:
                free(arg->dynarray.index);
                break;
@@ -2268,6 +2271,7 @@ static int arg_num_eval(struct print_arg *arg, long long *val)
        case PRINT_FIELD ... PRINT_SYMBOL:
        case PRINT_STRING:
        case PRINT_BSTRING:
+       case PRINT_BITMASK:
        default:
                do_warning("invalid eval type %d", arg->type);
                ret = 0;
@@ -2296,6 +2300,7 @@ static char *arg_eval (struct print_arg *arg)
        case PRINT_FIELD ... PRINT_SYMBOL:
        case PRINT_STRING:
        case PRINT_BSTRING:
+       case PRINT_BITMASK:
        default:
                do_warning("invalid eval type %d", arg->type);
                break;
@@ -2390,7 +2395,7 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok)
 {
        struct print_arg *field;
        enum event_type type;
-       char *token;
+       char *token = NULL;
 
        memset(arg, 0, sizeof(*arg));
        arg->type = PRINT_FLAGS;
@@ -2443,7 +2448,7 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok)
 {
        struct print_arg *field;
        enum event_type type;
-       char *token;
+       char *token = NULL;
 
        memset(arg, 0, sizeof(*arg));
        arg->type = PRINT_SYMBOL;
@@ -2482,7 +2487,7 @@ process_hex(struct event_format *event, struct print_arg *arg, char **tok)
 {
        struct print_arg *field;
        enum event_type type;
-       char *token;
+       char *token = NULL;
 
        memset(arg, 0, sizeof(*arg));
        arg->type = PRINT_HEX;
@@ -2683,6 +2688,35 @@ process_str(struct event_format *event __maybe_unused, struct print_arg *arg,
        return EVENT_ERROR;
 }
 
+static enum event_type
+process_bitmask(struct event_format *event __maybe_unused, struct print_arg *arg,
+           char **tok)
+{
+       enum event_type type;
+       char *token;
+
+       if (read_expect_type(EVENT_ITEM, &token) < 0)
+               goto out_free;
+
+       arg->type = PRINT_BITMASK;
+       arg->bitmask.bitmask = token;
+       arg->bitmask.offset = -1;
+
+       if (read_expected(EVENT_DELIM, ")") < 0)
+               goto out_err;
+
+       type = read_token(&token);
+       *tok = token;
+
+       return type;
+
+ out_free:
+       free_token(token);
+ out_err:
+       *tok = NULL;
+       return EVENT_ERROR;
+}
+
 static struct pevent_function_handler *
 find_func_handler(struct pevent *pevent, char *func_name)
 {
@@ -2797,6 +2831,10 @@ process_function(struct event_format *event, struct print_arg *arg,
                free_token(token);
                return process_str(event, arg, tok);
        }
+       if (strcmp(token, "__get_bitmask") == 0) {
+               free_token(token);
+               return process_bitmask(event, arg, tok);
+       }
        if (strcmp(token, "__get_dynamic_array") == 0) {
                free_token(token);
                return process_dynamic_array(event, arg, tok);
@@ -3324,6 +3362,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
                return eval_type(val, arg, 0);
        case PRINT_STRING:
        case PRINT_BSTRING:
+       case PRINT_BITMASK:
                return 0;
        case PRINT_FUNC: {
                struct trace_seq s;
@@ -3556,6 +3595,60 @@ static void print_str_to_seq(struct trace_seq *s, const char *format,
                trace_seq_printf(s, format, str);
 }
 
+static void print_bitmask_to_seq(struct pevent *pevent,
+                                struct trace_seq *s, const char *format,
+                                int len_arg, const void *data, int size)
+{
+       int nr_bits = size * 8;
+       int str_size = (nr_bits + 3) / 4;
+       int len = 0;
+       char buf[3];
+       char *str;
+       int index;
+       int i;
+
+       /*
+        * The kernel likes to put in commas every 32 bits, we
+        * can do the same.
+        */
+       str_size += (nr_bits - 1) / 32;
+
+       str = malloc(str_size + 1);
+       if (!str) {
+               do_warning("%s: not enough memory!", __func__);
+               return;
+       }
+       str[str_size] = 0;
+
+       /* Start out with -2 for the two chars per byte */
+       for (i = str_size - 2; i >= 0; i -= 2) {
+               /*
+                * data points to a bit mask of size bytes.
+                * In the kernel, this is an array of long words, thus
+                * endianess is very important.
+                */
+               if (pevent->file_bigendian)
+                       index = size - (len + 1);
+               else
+                       index = len;
+
+               snprintf(buf, 3, "%02x", *((unsigned char *)data + index));
+               memcpy(str + i, buf, 2);
+               len++;
+               if (!(len & 3) && i > 0) {
+                       i--;
+                       str[i] = ',';
+               }
+       }
+
+       if (len_arg >= 0)
+               trace_seq_printf(s, format, len_arg, str);
+       else
+               trace_seq_printf(s, format, str);
+
+       free(str);
+}
+
 static void print_str_arg(struct trace_seq *s, void *data, int size,
                          struct event_format *event, const char *format,
                          int len_arg, struct print_arg *arg)
@@ -3691,6 +3784,23 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
        case PRINT_BSTRING:
                print_str_to_seq(s, format, len_arg, arg->string.string);
                break;
+       case PRINT_BITMASK: {
+               int bitmask_offset;
+               int bitmask_size;
+
+               if (arg->bitmask.offset == -1) {
+                       struct format_field *f;
+
+                       f = pevent_find_any_field(event, arg->bitmask.bitmask);
+                       arg->bitmask.offset = f->offset;
+               }
+               bitmask_offset = data2host4(pevent, data + arg->bitmask.offset);
+               bitmask_size = bitmask_offset >> 16;
+               bitmask_offset &= 0xffff;
+               print_bitmask_to_seq(pevent, s, format, len_arg,
+                                    data + bitmask_offset, bitmask_size);
+               break;
+       }
        case PRINT_OP:
                /*
                 * The only op for string should be ? :
@@ -4822,6 +4932,9 @@ static void print_args(struct print_arg *args)
        case PRINT_BSTRING:
                printf("__get_str(%s)", args->string.string);
                break;
+       case PRINT_BITMASK:
+               printf("__get_bitmask(%s)", args->bitmask.bitmask);
+               break;
        case PRINT_TYPE:
                printf("(%s)", args->typecast.type);
                print_args(args->typecast.item);
index feab942..7a3873f 100644 (file)
@@ -107,8 +107,8 @@ typedef int (*pevent_event_handler_func)(struct trace_seq *s,
 typedef int (*pevent_plugin_load_func)(struct pevent *pevent);
 typedef int (*pevent_plugin_unload_func)(struct pevent *pevent);
 
-struct plugin_option {
-       struct plugin_option            *next;
+struct pevent_plugin_option {
+       struct pevent_plugin_option     *next;
        void                            *handle;
        char                            *file;
        char                            *name;
@@ -135,7 +135,7 @@ struct plugin_option {
  * PEVENT_PLUGIN_OPTIONS:  (optional)
  *   Plugin options that can be set before loading
  *
- *   struct plugin_option PEVENT_PLUGIN_OPTIONS[] = {
+ *   struct pevent_plugin_option PEVENT_PLUGIN_OPTIONS[] = {
  *     {
  *             .name = "option-name",
  *             .plugin_alias = "overide-file-name", (optional)
@@ -208,6 +208,11 @@ struct print_arg_string {
        int                     offset;
 };
 
+struct print_arg_bitmask {
+       char                    *bitmask;
+       int                     offset;
+};
+
 struct print_arg_field {
        char                    *name;
        struct format_field     *field;
@@ -274,6 +279,7 @@ enum print_arg_type {
        PRINT_DYNAMIC_ARRAY,
        PRINT_OP,
        PRINT_FUNC,
+       PRINT_BITMASK,
 };
 
 struct print_arg {
@@ -288,6 +294,7 @@ struct print_arg {
                struct print_arg_hex            hex;
                struct print_arg_func           func;
                struct print_arg_string         string;
+               struct print_arg_bitmask        bitmask;
                struct print_arg_op             op;
                struct print_arg_dynarray       dynarray;
        };
@@ -354,6 +361,8 @@ enum pevent_func_arg_type {
 
 enum pevent_flag {
        PEVENT_NSEC_OUTPUT              = 1,    /* output in NSECS */
+       PEVENT_DISABLE_SYS_PLUGINS      = 1 << 1,
+       PEVENT_DISABLE_PLUGINS          = 1 << 2,
 };
 
 #define PEVENT_ERRORS                                                        \
@@ -410,9 +419,19 @@ enum pevent_errno {
 
 struct plugin_list;
 
+#define INVALID_PLUGIN_LIST_OPTION     ((char **)((unsigned long)-1))
+
 struct plugin_list *traceevent_load_plugins(struct pevent *pevent);
 void traceevent_unload_plugins(struct plugin_list *plugin_list,
                               struct pevent *pevent);
+char **traceevent_plugin_list_options(void);
+void traceevent_plugin_free_options_list(char **list);
+int traceevent_plugin_add_options(const char *name,
+                                 struct pevent_plugin_option *options);
+void traceevent_plugin_remove_options(struct pevent_plugin_option *options);
+void traceevent_print_plugins(struct trace_seq *s,
+                             const char *prefix, const char *suffix,
+                             const struct plugin_list *list);
 
 struct cmdline;
 struct cmdline_list;
index 0c8bf67..136162c 100644 (file)
@@ -18,6 +18,7 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
+#include <stdio.h>
 #include <string.h>
 #include <dlfcn.h>
 #include <stdlib.h>
 
 #define LOCAL_PLUGIN_DIR ".traceevent/plugins"
 
+static struct registered_plugin_options {
+       struct registered_plugin_options        *next;
+       struct pevent_plugin_option             *options;
+} *registered_options;
+
+static struct trace_plugin_options {
+       struct trace_plugin_options     *next;
+       char                            *plugin;
+       char                            *option;
+       char                            *value;
+} *trace_plugin_options;
+
 struct plugin_list {
        struct plugin_list      *next;
        char                    *name;
        void                    *handle;
 };
 
+/**
+ * traceevent_plugin_list_options - get list of plugin options
+ *
+ * Returns an array of char strings that list the currently registered
+ * plugin options in the format of <plugin>:<option>. This list can be
+ * used by toggling the option.
+ *
+ * Returns NULL if there's no options registered. On error it returns
+ * INVALID_PLUGIN_LIST_OPTION
+ *
+ * Must be freed with traceevent_plugin_free_options_list().
+ */
+char **traceevent_plugin_list_options(void)
+{
+       struct registered_plugin_options *reg;
+       struct pevent_plugin_option *op;
+       char **list = NULL;
+       char *name;
+       int count = 0;
+
+       for (reg = registered_options; reg; reg = reg->next) {
+               for (op = reg->options; op->name; op++) {
+                       char *alias = op->plugin_alias ? op->plugin_alias : op->file;
+                       char **temp = list;
+
+                       name = malloc(strlen(op->name) + strlen(alias) + 2);
+                       if (!name)
+                               goto err;
+
+                       sprintf(name, "%s:%s", alias, op->name);
+                       list = realloc(list, count + 2);
+                       if (!list) {
+                               list = temp;
+                               free(name);
+                               goto err;
+                       }
+                       list[count++] = name;
+                       list[count] = NULL;
+               }
+       }
+       return list;
+
+ err:
+       while (--count >= 0)
+               free(list[count]);
+       free(list);
+
+       return INVALID_PLUGIN_LIST_OPTION;
+}
+
+void traceevent_plugin_free_options_list(char **list)
+{
+       int i;
+
+       if (!list)
+               return;
+
+       if (list == INVALID_PLUGIN_LIST_OPTION)
+               return;
+
+       for (i = 0; list[i]; i++)
+               free(list[i]);
+
+       free(list);
+}
+
+static int
+update_option(const char *file, struct pevent_plugin_option *option)
+{
+       struct trace_plugin_options *op;
+       char *plugin;
+
+       if (option->plugin_alias) {
+               plugin = strdup(option->plugin_alias);
+               if (!plugin)
+                       return -1;
+       } else {
+               char *p;
+               plugin = strdup(file);
+               if (!plugin)
+                       return -1;
+               p = strstr(plugin, ".");
+               if (p)
+                       *p = '\0';
+       }
+
+       /* first look for named options */
+       for (op = trace_plugin_options; op; op = op->next) {
+               if (!op->plugin)
+                       continue;
+               if (strcmp(op->plugin, plugin) != 0)
+                       continue;
+               if (strcmp(op->option, option->name) != 0)
+                       continue;
+
+               option->value = op->value;
+               option->set ^= 1;
+               goto out;
+       }
+
+       /* first look for unnamed options */
+       for (op = trace_plugin_options; op; op = op->next) {
+               if (op->plugin)
+                       continue;
+               if (strcmp(op->option, option->name) != 0)
+                       continue;
+
+               option->value = op->value;
+               option->set ^= 1;
+               break;
+       }
+
+ out:
+       free(plugin);
+       return 0;
+}
+
+/**
+ * traceevent_plugin_add_options - Add a set of options by a plugin
+ * @name: The name of the plugin adding the options
+ * @options: The set of options being loaded
+ *
+ * Sets the options with the values that have been added by user.
+ */
+int traceevent_plugin_add_options(const char *name,
+                                 struct pevent_plugin_option *options)
+{
+       struct registered_plugin_options *reg;
+
+       reg = malloc(sizeof(*reg));
+       if (!reg)
+               return -1;
+       reg->next = registered_options;
+       reg->options = options;
+       registered_options = reg;
+
+       while (options->name) {
+               update_option(name, options);
+               options++;
+       }
+       return 0;
+}
+
+/**
+ * traceevent_plugin_remove_options - remove plugin options that were registered
+ * @options: Options to removed that were registered with traceevent_plugin_add_options
+ */
+void traceevent_plugin_remove_options(struct pevent_plugin_option *options)
+{
+       struct registered_plugin_options **last;
+       struct registered_plugin_options *reg;
+
+       for (last = &registered_options; *last; last = &(*last)->next) {
+               if ((*last)->options == options) {
+                       reg = *last;
+                       *last = reg->next;
+                       free(reg);
+                       return;
+               }
+       }
+}
+
+/**
+ * traceevent_print_plugins - print out the list of plugins loaded
+ * @s: the trace_seq descripter to write to
+ * @prefix: The prefix string to add before listing the option name
+ * @suffix: The suffix string ot append after the option name
+ * @list: The list of plugins (usually returned by traceevent_load_plugins()
+ *
+ * Writes to the trace_seq @s the list of plugins (files) that is
+ * returned by traceevent_load_plugins(). Use @prefix and @suffix for formating:
+ * @prefix = "  ", @suffix = "\n".
+ */
+void traceevent_print_plugins(struct trace_seq *s,
+                             const char *prefix, const char *suffix,
+                             const struct plugin_list *list)
+{
+       while (list) {
+               trace_seq_printf(s, "%s%s%s", prefix, list->name, suffix);
+               list = list->next;
+       }
+}
+
 static void
 load_plugin(struct pevent *pevent, const char *path,
            const char *file, void *data)
@@ -148,12 +344,17 @@ load_plugins(struct pevent *pevent, const char *suffix,
        char *path;
        char *envdir;
 
+       if (pevent->flags & PEVENT_DISABLE_PLUGINS)
+               return;
+
        /*
         * If a system plugin directory was defined,
         * check that first.
         */
 #ifdef PLUGIN_DIR
-       load_plugins_dir(pevent, suffix, PLUGIN_DIR, load_plugin, data);
+       if (!(pevent->flags & PEVENT_DISABLE_SYS_PLUGINS))
+               load_plugins_dir(pevent, suffix, PLUGIN_DIR,
+                                load_plugin, data);
 #endif
 
        /*
index c066b25..4592d84 100644 (file)
@@ -5,8 +5,7 @@
 #include "event-parse.h"
 
 static unsigned long long
-process___le16_to_cpup(struct trace_seq *s,
-                      unsigned long long *args)
+process___le16_to_cpup(struct trace_seq *s, unsigned long long *args)
 {
        uint16_t *val = (uint16_t *) (unsigned long) args[0];
        return val ? (long long) le16toh(*val) : 0;
index 80ba4ff..a00ec19 100644 (file)
@@ -33,6 +33,29 @@ static int cpus = -1;
 
 #define STK_BLK 10
 
+struct pevent_plugin_option plugin_options[] =
+{
+       {
+               .name = "parent",
+               .plugin_alias = "ftrace",
+               .description =
+               "Print parent of functions for function events",
+       },
+       {
+               .name = "indent",
+               .plugin_alias = "ftrace",
+               .description =
+               "Try to show function call indents, based on parents",
+               .set = 1,
+       },
+       {
+               .name = NULL,
+       }
+};
+
+static struct pevent_plugin_option *ftrace_parent = &plugin_options[0];
+static struct pevent_plugin_option *ftrace_indent = &plugin_options[1];
+
 static void add_child(struct func_stack *stack, const char *child, int pos)
 {
        int i;
@@ -119,7 +142,8 @@ static int function_handler(struct trace_seq *s, struct pevent_record *record,
 
        parent = pevent_find_function(pevent, pfunction);
 
-       index = add_and_get_index(parent, func, record->cpu);
+       if (parent && ftrace_indent->set)
+               index = add_and_get_index(parent, func, record->cpu);
 
        trace_seq_printf(s, "%*s", index*3, "");
 
@@ -128,11 +152,13 @@ static int function_handler(struct trace_seq *s, struct pevent_record *record,
        else
                trace_seq_printf(s, "0x%llx", function);
 
-       trace_seq_printf(s, " <-- ");
-       if (parent)
-               trace_seq_printf(s, "%s", parent);
-       else
-               trace_seq_printf(s, "0x%llx", pfunction);
+       if (ftrace_parent->set) {
+               trace_seq_printf(s, " <-- ");
+               if (parent)
+                       trace_seq_printf(s, "%s", parent);
+               else
+                       trace_seq_printf(s, "0x%llx", pfunction);
+       }
 
        return 0;
 }
@@ -141,6 +167,9 @@ int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
 {
        pevent_register_event_handler(pevent, -1, "ftrace", "function",
                                      function_handler, NULL);
+
+       traceevent_plugin_add_options("ftrace", plugin_options);
+
        return 0;
 }
 
@@ -157,6 +186,8 @@ void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
                free(fstack[i].stack);
        }
 
+       traceevent_plugin_remove_options(plugin_options);
+
        free(fstack);
        fstack = NULL;
        cpus = -1;
index 0db714c..5c23d5b 100644 (file)
@@ -30,8 +30,7 @@
 #define MINOR(dev)     ((unsigned int) ((dev) & MINORMASK))
 
 static unsigned long long
-process_jbd2_dev_to_name(struct trace_seq *s,
-                        unsigned long long *args)
+process_jbd2_dev_to_name(struct trace_seq *s, unsigned long long *args)
 {
        unsigned int dev = args[0];
 
@@ -40,8 +39,7 @@ process_jbd2_dev_to_name(struct trace_seq *s,
 }
 
 static unsigned long long
-process_jiffies_to_msecs(struct trace_seq *s,
-                        unsigned long long *args)
+process_jiffies_to_msecs(struct trace_seq *s, unsigned long long *args)
 {
        unsigned long long jiffies = args[0];
 
index 9e0e8c6..88fe83d 100644 (file)
@@ -240,25 +240,38 @@ static const char *find_exit_reason(unsigned isa, int val)
        for (i = 0; strings[i].val >= 0; i++)
                if (strings[i].val == val)
                        break;
-       if (strings[i].str)
-               return strings[i].str;
-       return "UNKNOWN";
+
+       return strings[i].str;
 }
 
-static int kvm_exit_handler(struct trace_seq *s, struct pevent_record *record,
-                           struct event_format *event, void *context)
+static int print_exit_reason(struct trace_seq *s, struct pevent_record *record,
+                            struct event_format *event, const char *field)
 {
        unsigned long long isa;
        unsigned long long val;
-       unsigned long long info1 = 0, info2 = 0;
+       const char *reason;
 
-       if (pevent_get_field_val(s, event, "exit_reason", record, &val, 1) < 0)
+       if (pevent_get_field_val(s, event, field, record, &val, 1) < 0)
                return -1;
 
        if (pevent_get_field_val(s, event, "isa", record, &isa, 0) < 0)
                isa = 1;
 
-       trace_seq_printf(s, "reason %s", find_exit_reason(isa, val));
+       reason = find_exit_reason(isa, val);
+       if (reason)
+               trace_seq_printf(s, "reason %s", reason);
+       else
+               trace_seq_printf(s, "reason UNKNOWN (%llu)", val);
+       return 0;
+}
+
+static int kvm_exit_handler(struct trace_seq *s, struct pevent_record *record,
+                           struct event_format *event, void *context)
+{
+       unsigned long long info1 = 0, info2 = 0;
+
+       if (print_exit_reason(s, record, event, "exit_reason") < 0)
+               return -1;
 
        pevent_print_num_field(s, " rip 0x%lx", event, "guest_rip", record, 1);
 
@@ -313,6 +326,29 @@ static int kvm_emulate_insn_handler(struct trace_seq *s,
        return 0;
 }
 
+
+static int kvm_nested_vmexit_inject_handler(struct trace_seq *s, struct pevent_record *record,
+                                           struct event_format *event, void *context)
+{
+       if (print_exit_reason(s, record, event, "exit_code") < 0)
+               return -1;
+
+       pevent_print_num_field(s, " info1 %llx", event, "exit_info1", record, 1);
+       pevent_print_num_field(s, " info2 %llx", event, "exit_info2", record, 1);
+       pevent_print_num_field(s, " int_info %llx", event, "exit_int_info", record, 1);
+       pevent_print_num_field(s, " int_info_err %llx", event, "exit_int_info_err", record, 1);
+
+       return 0;
+}
+
+static int kvm_nested_vmexit_handler(struct trace_seq *s, struct pevent_record *record,
+                                    struct event_format *event, void *context)
+{
+       pevent_print_num_field(s, "rip %llx ", event, "rip", record, 1);
+
+       return kvm_nested_vmexit_inject_handler(s, record, event, context);
+}
+
 union kvm_mmu_page_role {
        unsigned word;
        struct {
@@ -409,6 +445,12 @@ int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
        pevent_register_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
                                      kvm_emulate_insn_handler, NULL);
 
+       pevent_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
+                                     kvm_nested_vmexit_handler, NULL);
+
+       pevent_register_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
+                                     kvm_nested_vmexit_inject_handler, NULL);
+
        pevent_register_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
                                      kvm_mmu_get_page_handler, NULL);
 
@@ -443,6 +485,12 @@ void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
        pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_emulate_insn",
                                        kvm_emulate_insn_handler, NULL);
 
+       pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit",
+                                       kvm_nested_vmexit_handler, NULL);
+
+       pevent_unregister_event_handler(pevent, -1, "kvm", "kvm_nested_vmexit_inject",
+                                       kvm_nested_vmexit_inject_handler, NULL);
+
        pevent_unregister_event_handler(pevent, -1, "kvmmmu", "kvm_mmu_get_page",
                                        kvm_mmu_get_page_handler, NULL);
 
index 4464ad7..f6480cb 100644 (file)
@@ -16,6 +16,10 @@ This 'perf bench' command is a general framework for benchmark suites.
 
 COMMON OPTIONS
 --------------
+-r::
+--repeat=::
+Specify amount of times to repeat the run (default 10).
+
 -f::
 --format=::
 Specify format style.
index a00a342..dc7442c 100644 (file)
@@ -41,6 +41,9 @@ OPTIONS
        tasks slept. sched_switch contains a callchain where a task slept and
        sched_stat contains a timeslice how long a task slept.
 
+--kallsyms=<file>::
+       kallsyms pathname
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1]
index 52276a6..6e689dc 100644 (file)
@@ -51,9 +51,9 @@ There are a couple of variants of perf kvm:
   'perf kvm stat <command>' to run a command and gather performance counter
   statistics.
   Especially, perf 'kvm stat record/report' generates a statistical analysis
-  of KVM events. Currently, vmexit, mmio and ioport events are supported.
-  'perf kvm stat record <command>' records kvm events and the events between
-  start and end <command>.
+  of KVM events. Currently, vmexit, mmio (x86 only) and ioport (x86 only)
+  events are supported. 'perf kvm stat record <command>' records kvm events
+  and the events between start and end <command>.
   And this command produces a file which contains tracing results of kvm
   events.
 
@@ -103,8 +103,8 @@ STAT REPORT OPTIONS
        analyze events which occures on this vcpu. (default: all vcpus)
 
 --event=<value>::
-       event to be analyzed. Possible values: vmexit, mmio, ioport.
-       (default: vmexit)
+       event to be analyzed. Possible values: vmexit, mmio (x86 only),
+       ioport (x86 only). (default: vmexit)
 -k::
 --key=<value>::
        Sorting key. Possible values: sample (default, sort by samples
@@ -138,7 +138,8 @@ STAT LIVE OPTIONS
 
 
 --event=<value>::
-       event to be analyzed. Possible values: vmexit, mmio, ioport.
+       event to be analyzed. Possible values: vmexit,
+       mmio (x86 only), ioport (x86 only).
        (default: vmexit)
 
 -k::
@@ -147,7 +148,8 @@ STAT LIVE OPTIONS
        number), time (sort by average time).
 
 --duration=<value>::
-       Show events other than HLT that take longer than duration usecs.
+       Show events other than HLT (x86 only) or Wait state (s390 only)
+       that take longer than duration usecs.
 
 SEE ALSO
 --------
index cefdf43..d2b59af 100644 (file)
@@ -117,6 +117,22 @@ OPTIONS
        By default, every sort keys not specified in -F will be appended
        automatically.
 
+       If --mem-mode option is used, following sort keys are also available
+       (incompatible with --branch-stack):
+       symbol_daddr, dso_daddr, locked, tlb, mem, snoop, dcacheline.
+
+       - symbol_daddr: name of data symbol being executed on at the time of sample
+       - dso_daddr: name of library or module containing the data being executed
+       on at the time of sample
+       - locked: whether the bus was locked at the time of sample
+       - tlb: type of tlb access for the data at the time of sample
+       - mem: type of memory access for the data at the time of sample
+       - snoop: type of snoop (if any) for the data at the time of sample
+       - dcacheline: the cacheline the data address is on at the time of sample
+
+       And default sort keys are changed to local_weight, mem, sym, dso,
+       symbol_daddr, dso_daddr, snoop, tlb, locked, see '--mem-mode'.
+
 -p::
 --parent=<regex>::
         A regex filter to identify parent. The parent is a caller of this
@@ -260,6 +276,13 @@ OPTIONS
        Demangle symbol names to human readable form. It's enabled by default,
        disable with --no-demangle.
 
+--mem-mode::
+       Use the data addresses of samples in addition to instruction addresses
+       to build the histograms.  To generate meaningful output, the perf.data
+       file must have been obtained using perf record -d -W and using a
+       special event -e cpu/mem-loads/ or -e cpu/mem-stores/. See
+       'perf mem' for simpler access.
+
 --percent-limit::
        Do not show entries which have an overhead under that percent.
        (Default: 0).
index bc5990c..df98d1c 100644 (file)
@@ -15,10 +15,20 @@ DESCRIPTION
 There are two variants of perf timechart:
 
   'perf timechart record <command>' to record the system level events
-  of an arbitrary workload.
+  of an arbitrary workload. By default timechart records only scheduler
+  and CPU events (task switches, running times, CPU power states, etc),
+  but it's possible to record IO (disk, network) activity using -I argument.
 
   'perf timechart' to turn a trace into a Scalable Vector Graphics file,
-  that can be viewed with popular SVG viewers such as 'Inkscape'.
+  that can be viewed with popular SVG viewers such as 'Inkscape'. Depending
+  on the events in the perf.data file, timechart will contain scheduler/cpu
+  events or IO events.
+
+  In IO mode, every bar has two charts: upper and lower.
+  Upper bar shows incoming events (disk reads, ingress network packets).
+  Lower bar shows outgoing events (disk writes, egress network packets).
+  There are also poll bars which show how much time application spent
+  in poll/epoll/select syscalls.
 
 TIMECHART OPTIONS
 -----------------
@@ -43,27 +53,6 @@ TIMECHART OPTIONS
 
 --symfs=<directory>::
         Look for files with symbols relative to this directory.
-
-EXAMPLES
---------
-
-$ perf timechart record git pull
-
-  [ perf record: Woken up 13 times to write data ]
-  [ perf record: Captured and wrote 4.253 MB perf.data (~185801 samples) ]
-
-$ perf timechart
-
-  Written 10.2 seconds of trace to output.svg.
-
-Record system-wide timechart:
-
-  $ perf timechart record
-
-  then generate timechart and highlight 'gcc' tasks:
-
-  $ perf timechart --highlight gcc
-
 -n::
 --proc-num::
         Print task info for at least given number of tasks.
@@ -75,6 +64,19 @@ Record system-wide timechart:
        duration or tasks with given name. If number is given it's interpreted
        as number of nanoseconds. If non-numeric string is given it's
        interpreted as task name.
+--io-skip-eagain::
+       Don't draw EAGAIN IO events.
+--io-min-time=<nsecs>::
+       Draw small events as if they lasted min-time. Useful when you need
+       to see very small and fast IO. It's possible to specify ms or us
+       suffix to specify time in milliseconds or microseconds.
+       Default value is 1ms.
+--io-merge-dist=<nsecs>::
+       Merge events that are merge-dist nanoseconds apart.
+       Reduces number of figures on the SVG and makes it more render-friendly.
+       It's possible to specify ms or us suffix to specify time in
+       milliseconds or microseconds.
+       Default value is 1us.
 
 RECORD OPTIONS
 --------------
@@ -84,10 +86,41 @@ RECORD OPTIONS
 -T::
 --tasks-only::
         Record only tasks-related events
+-I::
+--io-only::
+        Record only io-related events
 -g::
 --callchain::
         Do call-graph (stack chain/backtrace) recording
 
+EXAMPLES
+--------
+
+$ perf timechart record git pull
+
+  [ perf record: Woken up 13 times to write data ]
+  [ perf record: Captured and wrote 4.253 MB perf.data (~185801 samples) ]
+
+$ perf timechart
+
+  Written 10.2 seconds of trace to output.svg.
+
+Record system-wide timechart:
+
+  $ perf timechart record
+
+  then generate timechart and highlight 'gcc' tasks:
+
+  $ perf timechart --highlight gcc
+
+Record system-wide IO events:
+
+  $ perf timechart record -I
+
+  then generate timechart:
+
+  $ perf timechart
+
 SEE ALSO
 --------
 linkperf:perf-record[1]
index fae38d9..02aac83 100644 (file)
@@ -107,6 +107,52 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
        Show tool stats such as number of times fd->pathname was discovered thru
        hooking the open syscall return + vfs_getname or via reading /proc/pid/fd, etc.
 
+-F=[all|min|maj]::
+--pf=[all|min|maj]::
+       Trace pagefaults. Optionally, you can specify whether you want minor,
+       major or all pagefaults. Default value is maj.
+
+--syscalls::
+       Trace system calls. This options is enabled by default.
+
+PAGEFAULTS
+----------
+
+When tracing pagefaults, the format of the trace is as follows:
+
+<min|maj>fault [<ip.symbol>+<ip.offset>] => <addr.dso@addr.offset> (<map type><addr level>).
+
+- min/maj indicates whether fault event is minor or major;
+- ip.symbol shows symbol for instruction pointer (the code that generated the
+  fault); if no debug symbols available, perf trace will print raw IP;
+- addr.dso shows DSO for the faulted address;
+- map type is either 'd' for non-executable maps or 'x' for executable maps;
+- addr level is either 'k' for kernel dso or '.' for user dso.
+
+For symbols resolution you may need to install debugging symbols.
+
+Please be aware that duration is currently always 0 and doesn't reflect actual
+time it took for fault to be handled!
+
+When --verbose specified, perf trace tries to print all available information
+for both IP and fault address in the form of dso@symbol+offset.
+
+EXAMPLES
+--------
+
+Trace only major pagefaults:
+
+ $ perf trace --no-syscalls -F
+
+Trace syscalls, major and minor pagefaults:
+
+ $ perf trace -F all
+
+  1416.547 ( 0.000 ms): python/20235 majfault [CRYPTO_push_info_+0x0] => /lib/x86_64-linux-gnu/libcrypto.so.1.0.0@0x61be0 (x.)
+
+  As you can see, there was major pagefault in python process, from
+  CRYPTO_push_info_ routine which faulted somewhere in libcrypto.so.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-script[1]
index 0eeb247..d240bb2 100644 (file)
@@ -8,7 +8,15 @@ perf - Performance analysis tools for Linux
 SYNOPSIS
 --------
 [verse]
-'perf' [--version] [--help] COMMAND [ARGS]
+'perf' [--version] [--help] [OPTIONS] COMMAND [ARGS]
+
+OPTIONS
+-------
+--debug::
+       Setup debug variable (just verbose for now) in value
+       range (0, 10). Use like:
+         --debug verbose   # sets verbose = 1
+         --debug verbose=2 # sets verbose = 2
 
 DESCRIPTION
 -----------
index 45da209..344c4d3 100644 (file)
@@ -37,3 +37,6 @@ arch/x86/include/asm/kvm_host.h
 arch/x86/include/uapi/asm/svm.h
 arch/x86/include/uapi/asm/vmx.h
 arch/x86/include/uapi/asm/kvm.h
+arch/x86/include/uapi/asm/kvm_perf.h
+arch/s390/include/uapi/asm/sie.h
+arch/s390/include/uapi/asm/kvm_perf.h
index ae20edf..2240974 100644 (file)
@@ -295,11 +295,13 @@ LIB_H += util/intlist.h
 LIB_H += util/perf_regs.h
 LIB_H += util/unwind.h
 LIB_H += util/vdso.h
+LIB_H += util/tsc.h
 LIB_H += ui/helpline.h
 LIB_H += ui/progress.h
 LIB_H += ui/util.h
 LIB_H += ui/ui.h
 LIB_H += util/data.h
+LIB_H += util/kvm-stat.h
 
 LIB_OBJS += $(OUTPUT)util/abspath.o
 LIB_OBJS += $(OUTPUT)util/alias.o
@@ -373,6 +375,8 @@ LIB_OBJS += $(OUTPUT)util/stat.o
 LIB_OBJS += $(OUTPUT)util/record.o
 LIB_OBJS += $(OUTPUT)util/srcline.o
 LIB_OBJS += $(OUTPUT)util/data.o
+LIB_OBJS += $(OUTPUT)util/tsc.o
+LIB_OBJS += $(OUTPUT)util/cloexec.o
 
 LIB_OBJS += $(OUTPUT)ui/setup.o
 LIB_OBJS += $(OUTPUT)ui/helpline.o
@@ -819,15 +823,15 @@ TAG_FOLDERS= . ../lib/traceevent ../lib/api ../lib/symbol
 TAG_FILES= ../../include/uapi/linux/perf_event.h
 
 TAGS:
-       $(RM) TAGS
+       $(QUIET_GEN)$(RM) TAGS; \
        $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs etags -a $(TAG_FILES)
 
 tags:
-       $(RM) tags
+       $(QUIET_GEN)$(RM) tags; \
        $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs ctags -a $(TAG_FILES)
 
 cscope:
-       $(RM) cscope*
+       $(QUIET_GEN)$(RM) cscope*; \
        $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs cscope -b $(TAG_FILES)
 
 ### Detect prefix changes
index 744e629..b92219b 100644 (file)
@@ -3,3 +3,4 @@ PERF_HAVE_DWARF_REGS := 1
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
 endif
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/skip-callchain-idx.o
index 2f7073d..6c1b8a7 100644 (file)
@@ -5,9 +5,7 @@
 #include <string.h>
 
 #include "../../util/header.h"
-
-#define __stringify_1(x)        #x
-#define __stringify(x)          __stringify_1(x)
+#include "../../util/util.h"
 
 #define mfspr(rn)       ({unsigned long rval; \
                         asm volatile("mfspr %0," __stringify(rn) \
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
new file mode 100644 (file)
index 0000000..a7c23a4
--- /dev/null
@@ -0,0 +1,266 @@
+/*
+ * Use DWARF Debug information to skip unnecessary callchain entries.
+ *
+ * Copyright (C) 2014 Sukadev Bhattiprolu, IBM Corporation.
+ * Copyright (C) 2014 Ulrich Weigand, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <inttypes.h>
+#include <dwarf.h>
+#include <elfutils/libdwfl.h>
+
+#include "util/thread.h"
+#include "util/callchain.h"
+
+/*
+ * When saving the callchain on Power, the kernel conservatively saves
+ * excess entries in the callchain. A few of these entries are needed
+ * in some cases but not others. If the unnecessary entries are not
+ * ignored, we end up with duplicate arcs in the call-graphs. Use
+ * DWARF debug information to skip over any unnecessary callchain
+ * entries.
+ *
+ * See function header for arch_adjust_callchain() below for more details.
+ *
+ * The libdwfl code in this file is based on code from elfutils
+ * (libdwfl/argp-std.c, libdwfl/tests/addrcfi.c, etc).
+ */
+static char *debuginfo_path;
+
+static const Dwfl_Callbacks offline_callbacks = {
+       .debuginfo_path = &debuginfo_path,
+       .find_debuginfo = dwfl_standard_find_debuginfo,
+       .section_address = dwfl_offline_section_address,
+};
+
+
+/*
+ * Use the DWARF expression for the Call-frame-address and determine
+ * if return address is in LR and if a new frame was allocated.
+ */
+static int check_return_reg(int ra_regno, Dwarf_Frame *frame)
+{
+       Dwarf_Op ops_mem[2];
+       Dwarf_Op dummy;
+       Dwarf_Op *ops = &dummy;
+       size_t nops;
+       int result;
+
+       result = dwarf_frame_register(frame, ra_regno, ops_mem, &ops, &nops);
+       if (result < 0) {
+               pr_debug("dwarf_frame_register() %s\n", dwarf_errmsg(-1));
+               return -1;
+       }
+
+       /*
+        * Check if return address is on the stack.
+        */
+       if (nops != 0 || ops != NULL)
+               return 0;
+
+       /*
+        * Return address is in LR. Check if a frame was allocated
+        * but not-yet used.
+        */
+       result = dwarf_frame_cfa(frame, &ops, &nops);
+       if (result < 0) {
+               pr_debug("dwarf_frame_cfa() returns %d, %s\n", result,
+                                       dwarf_errmsg(-1));
+               return -1;
+       }
+
+       /*
+        * If call frame address is in r1, no new frame was allocated.
+        */
+       if (nops == 1 && ops[0].atom == DW_OP_bregx && ops[0].number == 1 &&
+                               ops[0].number2 == 0)
+               return 1;
+
+       /*
+        * A new frame was allocated but has not yet been used.
+        */
+       return 2;
+}
+
+/*
+ * Get the DWARF frame from the .eh_frame section.
+ */
+static Dwarf_Frame *get_eh_frame(Dwfl_Module *mod, Dwarf_Addr pc)
+{
+       int             result;
+       Dwarf_Addr      bias;
+       Dwarf_CFI       *cfi;
+       Dwarf_Frame     *frame;
+
+       cfi = dwfl_module_eh_cfi(mod, &bias);
+       if (!cfi) {
+               pr_debug("%s(): no CFI - %s\n", __func__, dwfl_errmsg(-1));
+               return NULL;
+       }
+
+       result = dwarf_cfi_addrframe(cfi, pc, &frame);
+       if (result) {
+               pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
+               return NULL;
+       }
+
+       return frame;
+}
+
+/*
+ * Get the DWARF frame from the .debug_frame section.
+ */
+static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc)
+{
+       Dwarf_CFI       *cfi;
+       Dwarf_Addr      bias;
+       Dwarf_Frame     *frame;
+       int             result;
+
+       cfi = dwfl_module_dwarf_cfi(mod, &bias);
+       if (!cfi) {
+               pr_debug("%s(): no CFI - %s\n", __func__, dwfl_errmsg(-1));
+               return NULL;
+       }
+
+       result = dwarf_cfi_addrframe(cfi, pc, &frame);
+       if (result) {
+               pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
+               return NULL;
+       }
+
+       return frame;
+}
+
+/*
+ * Return:
+ *     0 if return address for the program counter @pc is on stack
+ *     1 if return address is in LR and no new stack frame was allocated
+ *     2 if return address is in LR and a new frame was allocated (but not
+ *             yet used)
+ *     -1 in case of errors
+ */
+static int check_return_addr(const char *exec_file, Dwarf_Addr pc)
+{
+       int             rc = -1;
+       Dwfl            *dwfl;
+       Dwfl_Module     *mod;
+       Dwarf_Frame     *frame;
+       int             ra_regno;
+       Dwarf_Addr      start = pc;
+       Dwarf_Addr      end = pc;
+       bool            signalp;
+
+       dwfl = dwfl_begin(&offline_callbacks);
+       if (!dwfl) {
+               pr_debug("dwfl_begin() failed: %s\n", dwarf_errmsg(-1));
+               return -1;
+       }
+
+       if (dwfl_report_offline(dwfl, "",  exec_file, -1) == NULL) {
+               pr_debug("dwfl_report_offline() failed %s\n", dwarf_errmsg(-1));
+               goto out;
+       }
+
+       mod = dwfl_addrmodule(dwfl, pc);
+       if (!mod) {
+               pr_debug("dwfl_addrmodule() failed, %s\n", dwarf_errmsg(-1));
+               goto out;
+       }
+
+       /*
+        * To work with split debug info files (eg: glibc), check both
+        * .eh_frame and .debug_frame sections of the ELF header.
+        */
+       frame = get_eh_frame(mod, pc);
+       if (!frame) {
+               frame = get_dwarf_frame(mod, pc);
+               if (!frame)
+                       goto out;
+       }
+
+       ra_regno = dwarf_frame_info(frame, &start, &end, &signalp);
+       if (ra_regno < 0) {
+               pr_debug("Return address register unavailable: %s\n",
+                               dwarf_errmsg(-1));
+               goto out;
+       }
+
+       rc = check_return_reg(ra_regno, frame);
+
+out:
+       dwfl_end(dwfl);
+       return rc;
+}
+
+/*
+ * The callchain saved by the kernel always includes the link register (LR).
+ *
+ *     0:      PERF_CONTEXT_USER
+ *     1:      Program counter (Next instruction pointer)
+ *     2:      LR value
+ *     3:      Caller's caller
+ *     4:      ...
+ *
+ * The value in LR is only needed when it holds a return address. If the
+ * return address is on the stack, we should ignore the LR value.
+ *
+ * Further, when the return address is in the LR, if a new frame was just
+ * allocated but the LR was not saved into it, then the LR contains the
+ * caller, slot 4: contains the caller's caller and the contents of slot 3:
+ * (chain->ips[3]) is undefined and must be ignored.
+ *
+ * Use DWARF debug information to determine if any entries need to be skipped.
+ *
+ * Return:
+ *     index:  of callchain entry that needs to be ignored (if any)
+ *     -1      if no entry needs to be ignored or in case of errors
+ */
+int arch_skip_callchain_idx(struct machine *machine, struct thread *thread,
+                               struct ip_callchain *chain)
+{
+       struct addr_location al;
+       struct dso *dso = NULL;
+       int rc;
+       u64 ip;
+       u64 skip_slot = -1;
+
+       if (chain->nr < 3)
+               return skip_slot;
+
+       ip = chain->ips[2];
+
+       thread__find_addr_location(thread, machine, PERF_RECORD_MISC_USER,
+                       MAP__FUNCTION, ip, &al);
+
+       if (al.map)
+               dso = al.map->dso;
+
+       if (!dso) {
+               pr_debug("%" PRIx64 " dso is NULL\n", ip);
+               return skip_slot;
+       }
+
+       rc = check_return_addr(dso->long_name, ip);
+
+       pr_debug("DSO %s, nr %" PRIx64 ", ip 0x%" PRIx64 "rc %d\n",
+                               dso->long_name, chain->nr, ip, rc);
+
+       if (rc == 0) {
+               /*
+                * Return address on stack. Ignore LR value in callchain
+                */
+               skip_slot = 2;
+       } else if (rc == 2) {
+               /*
+                * New frame allocated but return address still in LR.
+                * Ignore the caller's caller entry in callchain.
+                */
+               skip_slot = 3;
+       }
+       return skip_slot;
+}
index 15130b5..798ac73 100644 (file)
@@ -2,3 +2,6 @@ ifndef NO_DWARF
 PERF_HAVE_DWARF_REGS := 1
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o
 endif
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
+HAVE_KVM_STAT_SUPPORT := 1
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/kvm-stat.o
diff --git a/tools/perf/arch/s390/util/header.c b/tools/perf/arch/s390/util/header.c
new file mode 100644 (file)
index 0000000..9fa6c3e
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Implementation of get_cpuid().
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "../../util/header.h"
+
+int get_cpuid(char *buffer, size_t sz)
+{
+       const char *cpuid = "IBM/S390";
+
+       if (strlen(cpuid) + 1 > sz)
+               return -1;
+
+       strcpy(buffer, cpuid);
+       return 0;
+}
diff --git a/tools/perf/arch/s390/util/kvm-stat.c b/tools/perf/arch/s390/util/kvm-stat.c
new file mode 100644 (file)
index 0000000..a5dbc07
--- /dev/null
@@ -0,0 +1,105 @@
+/*
+ * Arch specific functions for perf kvm stat.
+ *
+ * Copyright 2014 IBM Corp.
+ * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (version 2 only)
+ * as published by the Free Software Foundation.
+ */
+
+#include "../../util/kvm-stat.h"
+#include <asm/kvm_perf.h>
+
+define_exit_reasons_table(sie_exit_reasons, sie_intercept_code);
+define_exit_reasons_table(sie_icpt_insn_codes, icpt_insn_codes);
+define_exit_reasons_table(sie_sigp_order_codes, sigp_order_codes);
+define_exit_reasons_table(sie_diagnose_codes, diagnose_codes);
+define_exit_reasons_table(sie_icpt_prog_codes, icpt_prog_codes);
+
+static void event_icpt_insn_get_key(struct perf_evsel *evsel,
+                                   struct perf_sample *sample,
+                                   struct event_key *key)
+{
+       unsigned long insn;
+
+       insn = perf_evsel__intval(evsel, sample, "instruction");
+       key->key = icpt_insn_decoder(insn);
+       key->exit_reasons = sie_icpt_insn_codes;
+}
+
+static void event_sigp_get_key(struct perf_evsel *evsel,
+                              struct perf_sample *sample,
+                              struct event_key *key)
+{
+       key->key = perf_evsel__intval(evsel, sample, "order_code");
+       key->exit_reasons = sie_sigp_order_codes;
+}
+
+static void event_diag_get_key(struct perf_evsel *evsel,
+                              struct perf_sample *sample,
+                              struct event_key *key)
+{
+       key->key = perf_evsel__intval(evsel, sample, "code");
+       key->exit_reasons = sie_diagnose_codes;
+}
+
+static void event_icpt_prog_get_key(struct perf_evsel *evsel,
+                                   struct perf_sample *sample,
+                                   struct event_key *key)
+{
+       key->key = perf_evsel__intval(evsel, sample, "code");
+       key->exit_reasons = sie_icpt_prog_codes;
+}
+
+static struct child_event_ops child_events[] = {
+       { .name = "kvm:kvm_s390_intercept_instruction",
+         .get_key = event_icpt_insn_get_key },
+       { .name = "kvm:kvm_s390_handle_sigp",
+         .get_key = event_sigp_get_key },
+       { .name = "kvm:kvm_s390_handle_diag",
+         .get_key = event_diag_get_key },
+       { .name = "kvm:kvm_s390_intercept_prog",
+         .get_key = event_icpt_prog_get_key },
+       { NULL, NULL },
+};
+
+static struct kvm_events_ops exit_events = {
+       .is_begin_event = exit_event_begin,
+       .is_end_event = exit_event_end,
+       .child_ops = child_events,
+       .decode_key = exit_event_decode_key,
+       .name = "VM-EXIT"
+};
+
+const char * const kvm_events_tp[] = {
+       "kvm:kvm_s390_sie_enter",
+       "kvm:kvm_s390_sie_exit",
+       "kvm:kvm_s390_intercept_instruction",
+       "kvm:kvm_s390_handle_sigp",
+       "kvm:kvm_s390_handle_diag",
+       "kvm:kvm_s390_intercept_prog",
+       NULL,
+};
+
+struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+       { .name = "vmexit", .ops = &exit_events },
+       { NULL, NULL },
+};
+
+const char * const kvm_skip_events[] = {
+       "Wait state",
+       NULL,
+};
+
+int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
+{
+       if (strstr(cpuid, "IBM/S390")) {
+               kvm->exit_reasons = sie_exit_reasons;
+               kvm->exit_reasons_isa = "SIE";
+       } else
+               return -ENOTSUP;
+
+       return 0;
+}
index 1641542..9b21881 100644 (file)
@@ -15,3 +15,5 @@ endif
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o
 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/tsc.o
 LIB_H += arch/$(ARCH)/util/tsc.h
+HAVE_KVM_STAT_SUPPORT := 1
+LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/kvm-stat.o
index 9f89f89..d8bbf7a 100644 (file)
@@ -3,6 +3,7 @@
 #include "thread.h"
 #include "map.h"
 #include "event.h"
+#include "debug.h"
 #include "tests/tests.h"
 
 #define STACK_SIZE 8192
diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c
new file mode 100644 (file)
index 0000000..14e4e66
--- /dev/null
@@ -0,0 +1,156 @@
+#include "../../util/kvm-stat.h"
+#include <asm/kvm_perf.h>
+
+define_exit_reasons_table(vmx_exit_reasons, VMX_EXIT_REASONS);
+define_exit_reasons_table(svm_exit_reasons, SVM_EXIT_REASONS);
+
+static struct kvm_events_ops exit_events = {
+       .is_begin_event = exit_event_begin,
+       .is_end_event = exit_event_end,
+       .decode_key = exit_event_decode_key,
+       .name = "VM-EXIT"
+};
+
+/*
+ * For the mmio events, we treat:
+ * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
+ * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
+ */
+static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample,
+                              struct event_key *key)
+{
+       key->key  = perf_evsel__intval(evsel, sample, "gpa");
+       key->info = perf_evsel__intval(evsel, sample, "type");
+}
+
+#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
+#define KVM_TRACE_MMIO_READ 1
+#define KVM_TRACE_MMIO_WRITE 2
+
+static bool mmio_event_begin(struct perf_evsel *evsel,
+                            struct perf_sample *sample, struct event_key *key)
+{
+       /* MMIO read begin event in kernel. */
+       if (kvm_exit_event(evsel))
+               return true;
+
+       /* MMIO write begin event in kernel. */
+       if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
+           perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
+               mmio_event_get_key(evsel, sample, key);
+               return true;
+       }
+
+       return false;
+}
+
+static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample,
+                          struct event_key *key)
+{
+       /* MMIO write end event in kernel. */
+       if (kvm_entry_event(evsel))
+               return true;
+
+       /* MMIO read end event in kernel.*/
+       if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
+           perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
+               mmio_event_get_key(evsel, sample, key);
+               return true;
+       }
+
+       return false;
+}
+
+static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
+                                 struct event_key *key,
+                                 char *decode)
+{
+       scnprintf(decode, DECODE_STR_LEN, "%#lx:%s",
+                 (unsigned long)key->key,
+                 key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
+}
+
+static struct kvm_events_ops mmio_events = {
+       .is_begin_event = mmio_event_begin,
+       .is_end_event = mmio_event_end,
+       .decode_key = mmio_event_decode_key,
+       .name = "MMIO Access"
+};
+
+ /* The time of emulation pio access is from kvm_pio to kvm_entry. */
+static void ioport_event_get_key(struct perf_evsel *evsel,
+                                struct perf_sample *sample,
+                                struct event_key *key)
+{
+       key->key  = perf_evsel__intval(evsel, sample, "port");
+       key->info = perf_evsel__intval(evsel, sample, "rw");
+}
+
+static bool ioport_event_begin(struct perf_evsel *evsel,
+                              struct perf_sample *sample,
+                              struct event_key *key)
+{
+       if (!strcmp(evsel->name, "kvm:kvm_pio")) {
+               ioport_event_get_key(evsel, sample, key);
+               return true;
+       }
+
+       return false;
+}
+
+static bool ioport_event_end(struct perf_evsel *evsel,
+                            struct perf_sample *sample __maybe_unused,
+                            struct event_key *key __maybe_unused)
+{
+       return kvm_entry_event(evsel);
+}
+
+static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
+                                   struct event_key *key,
+                                   char *decode)
+{
+       scnprintf(decode, DECODE_STR_LEN, "%#llx:%s",
+                 (unsigned long long)key->key,
+                 key->info ? "POUT" : "PIN");
+}
+
+static struct kvm_events_ops ioport_events = {
+       .is_begin_event = ioport_event_begin,
+       .is_end_event = ioport_event_end,
+       .decode_key = ioport_event_decode_key,
+       .name = "IO Port Access"
+};
+
+const char * const kvm_events_tp[] = {
+       "kvm:kvm_entry",
+       "kvm:kvm_exit",
+       "kvm:kvm_mmio",
+       "kvm:kvm_pio",
+       NULL,
+};
+
+struct kvm_reg_events_ops kvm_reg_events_ops[] = {
+       { .name = "vmexit", .ops = &exit_events },
+       { .name = "mmio", .ops = &mmio_events },
+       { .name = "ioport", .ops = &ioport_events },
+       { NULL, NULL },
+};
+
+const char * const kvm_skip_events[] = {
+       "HLT",
+       NULL,
+};
+
+int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid)
+{
+       if (strstr(cpuid, "Intel")) {
+               kvm->exit_reasons = vmx_exit_reasons;
+               kvm->exit_reasons_isa = "VMX";
+       } else if (strstr(cpuid, "AMD")) {
+               kvm->exit_reasons = svm_exit_reasons;
+               kvm->exit_reasons_isa = "SVM";
+       } else
+               return -ENOTSUP;
+
+       return 0;
+}
index 40021fa..fd28684 100644 (file)
@@ -6,29 +6,9 @@
 #include "../../perf.h"
 #include <linux/types.h>
 #include "../../util/debug.h"
+#include "../../util/tsc.h"
 #include "tsc.h"
 
-u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc)
-{
-       u64 t, quot, rem;
-
-       t = ns - tc->time_zero;
-       quot = t / tc->time_mult;
-       rem  = t % tc->time_mult;
-       return (quot << tc->time_shift) +
-              (rem << tc->time_shift) / tc->time_mult;
-}
-
-u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
-{
-       u64 quot, rem;
-
-       quot = cyc >> tc->time_shift;
-       rem  = cyc & ((1 << tc->time_shift) - 1);
-       return tc->time_zero + quot * tc->time_mult +
-              ((rem * tc->time_mult) >> tc->time_shift);
-}
-
 int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
                             struct perf_tsc_conversion *tc)
 {
@@ -57,3 +37,12 @@ int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
 
        return 0;
 }
+
+u64 rdtsc(void)
+{
+       unsigned int low, high;
+
+       asm volatile("rdtsc" : "=a" (low), "=d" (high));
+
+       return low | ((u64)high) << 32;
+}
index 2affe03..2edc4d3 100644 (file)
@@ -14,7 +14,4 @@ struct perf_event_mmap_page;
 int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc,
                             struct perf_tsc_conversion *tc);
 
-u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc);
-u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc);
-
 #endif /* TOOLS_PERF_ARCH_X86_UTIL_TSC_H__ */
index 3261f68..db25e93 100644 (file)
@@ -3,6 +3,7 @@
 #include <libunwind.h>
 #include "perf_regs.h"
 #include "../../util/unwind.h"
+#include "../../util/debug.h"
 
 #ifdef HAVE_ARCH_X86_64_SUPPORT
 int libunwind__arch_reg_id(int regnum)
index eba4670..3c4dd44 100644 (file)
@@ -43,5 +43,6 @@ extern int bench_futex_requeue(int argc, const char **argv, const char *prefix);
 #define BENCH_FORMAT_UNKNOWN           -1
 
 extern int bench_format;
+extern unsigned int bench_repeat;
 
 #endif
index a162558..732403b 100644 (file)
@@ -29,13 +29,6 @@ static u_int32_t futex1 = 0, futex2 = 0;
  */
 static unsigned int nrequeue = 1;
 
-/*
- * There can be significant variance from run to run,
- * the more repeats, the more exact the overall avg and
- * the better idea of the futex latency.
- */
-static unsigned int repeat = 10;
-
 static pthread_t *worker;
 static bool done = 0, silent = 0;
 static pthread_mutex_t thread_lock;
@@ -46,7 +39,6 @@ static unsigned int ncpus, threads_starting, nthreads = 0;
 static const struct option options[] = {
        OPT_UINTEGER('t', "threads",  &nthreads, "Specify amount of threads"),
        OPT_UINTEGER('q', "nrequeue", &nrequeue, "Specify amount of threads to requeue at once"),
-       OPT_UINTEGER('r', "repeat",   &repeat,   "Specify amount of times to repeat the run"),
        OPT_BOOLEAN( 's', "silent",   &silent,   "Silent mode: do not display data/details"),
        OPT_END()
 };
@@ -146,7 +138,7 @@ int bench_futex_requeue(int argc, const char **argv,
        pthread_cond_init(&thread_parent, NULL);
        pthread_cond_init(&thread_worker, NULL);
 
-       for (j = 0; j < repeat && !done; j++) {
+       for (j = 0; j < bench_repeat && !done; j++) {
                unsigned int nrequeued = 0;
                struct timeval start, end, runtime;
 
index d096169..50022cb 100644 (file)
@@ -30,15 +30,8 @@ static u_int32_t futex1 = 0;
  */
 static unsigned int nwakes = 1;
 
-/*
- * There can be significant variance from run to run,
- * the more repeats, the more exact the overall avg and
- * the better idea of the futex latency.
- */
-static unsigned int repeat = 10;
-
 pthread_t *worker;
-static bool done = 0, silent = 0;
+static bool done = false, silent = false;
 static pthread_mutex_t thread_lock;
 static pthread_cond_t thread_parent, thread_worker;
 static struct stats waketime_stats, wakeup_stats;
@@ -47,7 +40,6 @@ static unsigned int ncpus, threads_starting, nthreads = 0;
 static const struct option options[] = {
        OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
        OPT_UINTEGER('w', "nwakes",  &nwakes,   "Specify amount of threads to wake at once"),
-       OPT_UINTEGER('r', "repeat",  &repeat,   "Specify amount of times to repeat the run"),
        OPT_BOOLEAN( 's', "silent",  &silent,   "Silent mode: do not display data/details"),
        OPT_END()
 };
@@ -149,7 +141,7 @@ int bench_futex_wake(int argc, const char **argv,
        pthread_cond_init(&thread_parent, NULL);
        pthread_cond_init(&thread_worker, NULL);
 
-       for (j = 0; j < repeat && !done; j++) {
+       for (j = 0; j < bench_repeat && !done; j++) {
                unsigned int nwoken = 0;
                struct timeval start, end, runtime;
 
index 5ce71d3..2465141 100644 (file)
@@ -10,6 +10,7 @@
 #include "../util/util.h"
 #include "../util/parse-options.h"
 #include "../util/header.h"
+#include "../util/cloexec.h"
 #include "bench.h"
 #include "mem-memcpy-arch.h"
 
@@ -83,7 +84,8 @@ static struct perf_event_attr cycle_attr = {
 
 static void init_cycle(void)
 {
-       cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, 0);
+       cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1,
+                                      perf_event_open_cloexec_flag());
 
        if (cycle_fd < 0 && errno == ENOSYS)
                die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
@@ -189,6 +191,11 @@ int bench_mem_memcpy(int argc, const char **argv,
        argc = parse_options(argc, argv, options,
                             bench_mem_memcpy_usage, 0);
 
+       if (no_prefault && only_prefault) {
+               fprintf(stderr, "Invalid options: -o and -n are mutually exclusive\n");
+               return 1;
+       }
+
        if (use_cycle)
                init_cycle();
 
index 9af79d2..75fc3e6 100644 (file)
@@ -10,6 +10,7 @@
 #include "../util/util.h"
 #include "../util/parse-options.h"
 #include "../util/header.h"
+#include "../util/cloexec.h"
 #include "bench.h"
 #include "mem-memset-arch.h"
 
@@ -83,7 +84,8 @@ static struct perf_event_attr cycle_attr = {
 
 static void init_cycle(void)
 {
-       cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1, 0);
+       cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1,
+                                      perf_event_open_cloexec_flag());
 
        if (cycle_fd < 0 && errno == ENOSYS)
                die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
@@ -181,6 +183,11 @@ int bench_mem_memset(int argc, const char **argv,
        argc = parse_options(argc, argv, options,
                             bench_mem_memset_usage, 0);
 
+       if (no_prefault && only_prefault) {
+               fprintf(stderr, "Invalid options: -o and -n are mutually exclusive\n");
+               return 1;
+       }
+
        if (use_cycle)
                init_cycle();
 
index cc1190a..52a5659 100644 (file)
@@ -28,6 +28,7 @@
 #include <sys/time.h>
 #include <sys/poll.h>
 #include <limits.h>
+#include <err.h>
 
 #define DATASIZE 100
 
@@ -50,12 +51,6 @@ struct receiver_context {
        int wakefd;
 };
 
-static void barf(const char *msg)
-{
-       fprintf(stderr, "%s (error: %s)\n", msg, strerror(errno));
-       exit(1);
-}
-
 static void fdpair(int fds[2])
 {
        if (use_pipes) {
@@ -66,7 +61,7 @@ static void fdpair(int fds[2])
                        return;
        }
 
-       barf(use_pipes ? "pipe()" : "socketpair()");
+       err(EXIT_FAILURE, use_pipes ? "pipe()" : "socketpair()");
 }
 
 /* Block until we're ready to go */
@@ -77,11 +72,11 @@ static void ready(int ready_out, int wakefd)
 
        /* Tell them we're ready. */
        if (write(ready_out, &dummy, 1) != 1)
-               barf("CLIENT: ready write");
+               err(EXIT_FAILURE, "CLIENT: ready write");
 
        /* Wait for "GO" signal */
        if (poll(&pollfd, 1, -1) != 1)
-               barf("poll");
+               err(EXIT_FAILURE, "poll");
 }
 
 /* Sender sprays loops messages down each file descriptor */
@@ -101,7 +96,7 @@ again:
                        ret = write(ctx->out_fds[j], data + done,
                                    sizeof(data)-done);
                        if (ret < 0)
-                               barf("SENDER: write");
+                               err(EXIT_FAILURE, "SENDER: write");
                        done += ret;
                        if (done < DATASIZE)
                                goto again;
@@ -131,7 +126,7 @@ static void *receiver(struct receiver_context* ctx)
 again:
                ret = read(ctx->in_fds[0], data + done, DATASIZE - done);
                if (ret < 0)
-                       barf("SERVER: read");
+                       err(EXIT_FAILURE, "SERVER: read");
                done += ret;
                if (done < DATASIZE)
                        goto again;
@@ -144,14 +139,14 @@ static pthread_t create_worker(void *ctx, void *(*func)(void *))
 {
        pthread_attr_t attr;
        pthread_t childid;
-       int err;
+       int ret;
 
        if (!thread_mode) {
                /* process mode */
                /* Fork the receiver. */
                switch (fork()) {
                case -1:
-                       barf("fork()");
+                       err(EXIT_FAILURE, "fork()");
                        break;
                case 0:
                        (*func) (ctx);
@@ -165,19 +160,17 @@ static pthread_t create_worker(void *ctx, void *(*func)(void *))
        }
 
        if (pthread_attr_init(&attr) != 0)
-               barf("pthread_attr_init:");
+               err(EXIT_FAILURE, "pthread_attr_init:");
 
 #ifndef __ia64__
        if (pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN) != 0)
-               barf("pthread_attr_setstacksize");
+               err(EXIT_FAILURE, "pthread_attr_setstacksize");
 #endif
 
-       err = pthread_create(&childid, &attr, func, ctx);
-       if (err != 0) {
-               fprintf(stderr, "pthread_create failed: %s (%d)\n",
-                       strerror(err), err);
-               exit(-1);
-       }
+       ret = pthread_create(&childid, &attr, func, ctx);
+       if (ret != 0)
+               err(EXIT_FAILURE, "pthread_create failed");
+
        return childid;
 }
 
@@ -207,14 +200,14 @@ static unsigned int group(pthread_t *pth,
                        + num_fds * sizeof(int));
 
        if (!snd_ctx)
-               barf("malloc()");
+               err(EXIT_FAILURE, "malloc()");
 
        for (i = 0; i < num_fds; i++) {
                int fds[2];
                struct receiver_context *ctx = malloc(sizeof(*ctx));
 
                if (!ctx)
-                       barf("malloc()");
+                       err(EXIT_FAILURE, "malloc()");
 
 
                /* Create the pipe between client and server */
@@ -281,7 +274,7 @@ int bench_sched_messaging(int argc, const char **argv,
 
        pth_tab = malloc(num_fds * 2 * num_groups * sizeof(pthread_t));
        if (!pth_tab)
-               barf("main:malloc()");
+               err(EXIT_FAILURE, "main:malloc()");
 
        fdpair(readyfds);
        fdpair(wakefds);
@@ -294,13 +287,13 @@ int bench_sched_messaging(int argc, const char **argv,
        /* Wait for everyone to be ready */
        for (i = 0; i < total_children; i++)
                if (read(readyfds[0], &dummy, 1) != 1)
-                       barf("Reading for readyfds");
+                       err(EXIT_FAILURE, "Reading for readyfds");
 
        gettimeofday(&start, NULL);
 
        /* Kick them off */
        if (write(wakefds[1], &dummy, 1) != 1)
-               barf("Writing to start them");
+               err(EXIT_FAILURE, "Writing to start them");
 
        /* Reap them all */
        for (i = 0; i < total_children; i++)
@@ -332,5 +325,7 @@ int bench_sched_messaging(int argc, const char **argv,
                break;
        }
 
+       free(pth_tab);
+
        return 0;
 }
index 1e6e777..b9a56fa 100644 (file)
@@ -104,9 +104,11 @@ static const char *bench_format_str;
 
 /* Output/formatting style, exported to benchmark modules: */
 int bench_format = BENCH_FORMAT_DEFAULT;
+unsigned int bench_repeat = 10; /* default number of times to repeat the run */
 
 static const struct option bench_options[] = {
        OPT_STRING('f', "format", &bench_format_str, "default", "Specify format style"),
+       OPT_UINTEGER('r', "repeat",  &bench_repeat,   "Specify amount of times to repeat the run"),
        OPT_END()
 };
 
@@ -226,6 +228,11 @@ int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused)
                goto end;
        }
 
+       if (bench_repeat == 0) {
+               printf("Invalid repeat option: Must specify a positive value\n");
+               goto end;
+       }
+
        if (argc < 1) {
                print_usage();
                goto end;
index b22dbb1..2a2c78f 100644 (file)
@@ -125,7 +125,8 @@ static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir,
        return ret;
 }
 
-static int build_id_cache__add_kcore(const char *filename, const char *debugdir)
+static int build_id_cache__add_kcore(const char *filename, const char *debugdir,
+                                    bool force)
 {
        char dir[32], sbuildid[BUILD_ID_SIZE * 2 + 1];
        char from_dir[PATH_MAX], to_dir[PATH_MAX];
@@ -144,7 +145,8 @@ static int build_id_cache__add_kcore(const char *filename, const char *debugdir)
        scnprintf(to_dir, sizeof(to_dir), "%s/[kernel.kcore]/%s",
                  debugdir, sbuildid);
 
-       if (!build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) {
+       if (!force &&
+           !build_id_cache__kcore_existing(from_dir, to_dir, sizeof(to_dir))) {
                pr_debug("same kcore found in %s\n", to_dir);
                return 0;
        }
@@ -389,7 +391,7 @@ int cmd_buildid_cache(int argc, const char **argv,
        }
 
        if (kcore_filename &&
-           build_id_cache__add_kcore(kcore_filename, debugdir))
+           build_id_cache__add_kcore(kcore_filename, debugdir, force))
                pr_warning("Couldn't add %s\n", kcore_filename);
 
        return ret;
index c99e0de..66e12f5 100644 (file)
@@ -15,6 +15,7 @@
 #include "util/parse-options.h"
 #include "util/session.h"
 #include "util/data.h"
+#include "util/debug.h"
 
 static int __cmd_evlist(const char *file_name, struct perf_attr_details *details)
 {
index 178b88a..0384d93 100644 (file)
@@ -11,6 +11,7 @@
 #include "util/parse-options.h"
 #include "util/run-command.h"
 #include "util/help.h"
+#include "util/debug.h"
 
 static struct man_viewer_list {
        struct man_viewer_list *next;
index 6a3af00..9a02807 100644 (file)
@@ -72,7 +72,7 @@ static int perf_event__repipe_attr(struct perf_tool *tool,
        if (ret)
                return ret;
 
-       if (&inject->output.is_pipe)
+       if (!inject->output.is_pipe)
                return 0;
 
        return perf_event__repipe_synth(tool, event);
@@ -389,6 +389,9 @@ static int __cmd_inject(struct perf_inject *inject)
        ret = perf_session__process_events(session, &inject->tool);
 
        if (!file_out->is_pipe) {
+               if (inject->build_ids)
+                       perf_header__set_feat(&session->header,
+                                             HEADER_BUILD_ID);
                session->header.data_size = inject->bytes_written;
                perf_session__write_header(session, session->evlist, file_out->fd, true);
        }
@@ -436,6 +439,8 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
                            "where and how long tasks slept"),
                OPT_INCR('v', "verbose", &verbose,
                         "be more verbose (show build ids, etc)"),
+               OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
+                          "kallsyms pathname"),
                OPT_END()
        };
        const char * const inject_usage[] = {
index 0f1e5a2..43367eb 100644 (file)
 #include <pthread.h>
 #include <math.h>
 
-#if defined(__i386__) || defined(__x86_64__)
-#include <asm/svm.h>
-#include <asm/vmx.h>
-#include <asm/kvm.h>
-
-struct event_key {
-       #define INVALID_KEY     (~0ULL)
-       u64 key;
-       int info;
-};
-
-struct kvm_event_stats {
-       u64 time;
-       struct stats stats;
-};
-
-struct kvm_event {
-       struct list_head hash_entry;
-       struct rb_node rb;
-
-       struct event_key key;
-
-       struct kvm_event_stats total;
-
-       #define DEFAULT_VCPU_NUM 8
-       int max_vcpu;
-       struct kvm_event_stats *vcpu;
-};
-
-typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int);
-
-struct kvm_event_key {
-       const char *name;
-       key_cmp_fun key;
-};
-
-
-struct perf_kvm_stat;
-
-struct kvm_events_ops {
-       bool (*is_begin_event)(struct perf_evsel *evsel,
-                              struct perf_sample *sample,
-                              struct event_key *key);
-       bool (*is_end_event)(struct perf_evsel *evsel,
-                            struct perf_sample *sample, struct event_key *key);
-       void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key,
-                          char decode[20]);
-       const char *name;
-};
-
-struct exit_reasons_table {
-       unsigned long exit_code;
-       const char *reason;
-};
+#ifdef HAVE_KVM_STAT_SUPPORT
+#include <asm/kvm_perf.h>
+#include "util/kvm-stat.h"
 
-#define EVENTS_BITS            12
-#define EVENTS_CACHE_SIZE      (1UL << EVENTS_BITS)
-
-struct perf_kvm_stat {
-       struct perf_tool    tool;
-       struct record_opts  opts;
-       struct perf_evlist  *evlist;
-       struct perf_session *session;
-
-       const char *file_name;
-       const char *report_event;
-       const char *sort_key;
-       int trace_vcpu;
-
-       struct exit_reasons_table *exit_reasons;
-       int exit_reasons_size;
-       const char *exit_reasons_isa;
-
-       struct kvm_events_ops *events_ops;
-       key_cmp_fun compare;
-       struct list_head kvm_events_cache[EVENTS_CACHE_SIZE];
-
-       u64 total_time;
-       u64 total_count;
-       u64 lost_events;
-       u64 duration;
-
-       const char *pid_str;
-       struct intlist *pid_list;
-
-       struct rb_root result;
-
-       int timerfd;
-       unsigned int display_time;
-       bool live;
-};
-
-
-static void exit_event_get_key(struct perf_evsel *evsel,
-                              struct perf_sample *sample,
-                              struct event_key *key)
+void exit_event_get_key(struct perf_evsel *evsel,
+                       struct perf_sample *sample,
+                       struct event_key *key)
 {
        key->info = 0;
-       key->key = perf_evsel__intval(evsel, sample, "exit_reason");
+       key->key = perf_evsel__intval(evsel, sample, KVM_EXIT_REASON);
 }
 
-static bool kvm_exit_event(struct perf_evsel *evsel)
+bool kvm_exit_event(struct perf_evsel *evsel)
 {
-       return !strcmp(evsel->name, "kvm:kvm_exit");
+       return !strcmp(evsel->name, KVM_EXIT_TRACE);
 }
 
-static bool exit_event_begin(struct perf_evsel *evsel,
-                            struct perf_sample *sample, struct event_key *key)
+bool exit_event_begin(struct perf_evsel *evsel,
+                     struct perf_sample *sample, struct event_key *key)
 {
        if (kvm_exit_event(evsel)) {
                exit_event_get_key(evsel, sample, key);
@@ -146,32 +57,23 @@ static bool exit_event_begin(struct perf_evsel *evsel,
        return false;
 }
 
-static bool kvm_entry_event(struct perf_evsel *evsel)
+bool kvm_entry_event(struct perf_evsel *evsel)
 {
-       return !strcmp(evsel->name, "kvm:kvm_entry");
+       return !strcmp(evsel->name, KVM_ENTRY_TRACE);
 }
 
-static bool exit_event_end(struct perf_evsel *evsel,
-                          struct perf_sample *sample __maybe_unused,
-                          struct event_key *key __maybe_unused)
+bool exit_event_end(struct perf_evsel *evsel,
+                   struct perf_sample *sample __maybe_unused,
+                   struct event_key *key __maybe_unused)
 {
        return kvm_entry_event(evsel);
 }
 
-static struct exit_reasons_table vmx_exit_reasons[] = {
-       VMX_EXIT_REASONS
-};
-
-static struct exit_reasons_table svm_exit_reasons[] = {
-       SVM_EXIT_REASONS
-};
-
-static const char *get_exit_reason(struct perf_kvm_stat *kvm, u64 exit_code)
+static const char *get_exit_reason(struct perf_kvm_stat *kvm,
+                                  struct exit_reasons_table *tbl,
+                                  u64 exit_code)
 {
-       int i = kvm->exit_reasons_size;
-       struct exit_reasons_table *tbl = kvm->exit_reasons;
-
-       while (i--) {
+       while (tbl->reason != NULL) {
                if (tbl->exit_code == exit_code)
                        return tbl->reason;
                tbl++;
@@ -182,148 +84,30 @@ static const char *get_exit_reason(struct perf_kvm_stat *kvm, u64 exit_code)
        return "UNKNOWN";
 }
 
-static void exit_event_decode_key(struct perf_kvm_stat *kvm,
-                                 struct event_key *key,
-                                 char decode[20])
+void exit_event_decode_key(struct perf_kvm_stat *kvm,
+                          struct event_key *key,
+                          char *decode)
 {
-       const char *exit_reason = get_exit_reason(kvm, key->key);
+       const char *exit_reason = get_exit_reason(kvm, key->exit_reasons,
+                                                 key->key);
 
-       scnprintf(decode, 20, "%s", exit_reason);
+       scnprintf(decode, DECODE_STR_LEN, "%s", exit_reason);
 }
 
-static struct kvm_events_ops exit_events = {
-       .is_begin_event = exit_event_begin,
-       .is_end_event = exit_event_end,
-       .decode_key = exit_event_decode_key,
-       .name = "VM-EXIT"
-};
-
-/*
- * For the mmio events, we treat:
- * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry
- * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...).
- */
-static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample,
-                              struct event_key *key)
-{
-       key->key  = perf_evsel__intval(evsel, sample, "gpa");
-       key->info = perf_evsel__intval(evsel, sample, "type");
-}
-
-#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
-#define KVM_TRACE_MMIO_READ 1
-#define KVM_TRACE_MMIO_WRITE 2
-
-static bool mmio_event_begin(struct perf_evsel *evsel,
-                            struct perf_sample *sample, struct event_key *key)
-{
-       /* MMIO read begin event in kernel. */
-       if (kvm_exit_event(evsel))
-               return true;
-
-       /* MMIO write begin event in kernel. */
-       if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
-           perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) {
-               mmio_event_get_key(evsel, sample, key);
-               return true;
-       }
-
-       return false;
-}
-
-static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample,
-                          struct event_key *key)
-{
-       /* MMIO write end event in kernel. */
-       if (kvm_entry_event(evsel))
-               return true;
-
-       /* MMIO read end event in kernel.*/
-       if (!strcmp(evsel->name, "kvm:kvm_mmio") &&
-           perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) {
-               mmio_event_get_key(evsel, sample, key);
-               return true;
-       }
-
-       return false;
-}
-
-static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
-                                 struct event_key *key,
-                                 char decode[20])
-{
-       scnprintf(decode, 20, "%#lx:%s", (unsigned long)key->key,
-                               key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R");
-}
-
-static struct kvm_events_ops mmio_events = {
-       .is_begin_event = mmio_event_begin,
-       .is_end_event = mmio_event_end,
-       .decode_key = mmio_event_decode_key,
-       .name = "MMIO Access"
-};
-
- /* The time of emulation pio access is from kvm_pio to kvm_entry. */
-static void ioport_event_get_key(struct perf_evsel *evsel,
-                                struct perf_sample *sample,
-                                struct event_key *key)
+static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
 {
-       key->key  = perf_evsel__intval(evsel, sample, "port");
-       key->info = perf_evsel__intval(evsel, sample, "rw");
-}
+       struct kvm_reg_events_ops *events_ops = kvm_reg_events_ops;
 
-static bool ioport_event_begin(struct perf_evsel *evsel,
-                              struct perf_sample *sample,
-                              struct event_key *key)
-{
-       if (!strcmp(evsel->name, "kvm:kvm_pio")) {
-               ioport_event_get_key(evsel, sample, key);
-               return true;
+       for (events_ops = kvm_reg_events_ops; events_ops->name; events_ops++) {
+               if (!strcmp(events_ops->name, kvm->report_event)) {
+                       kvm->events_ops = events_ops->ops;
+                       return true;
+               }
        }
 
        return false;
 }
 
-static bool ioport_event_end(struct perf_evsel *evsel,
-                            struct perf_sample *sample __maybe_unused,
-                            struct event_key *key __maybe_unused)
-{
-       return kvm_entry_event(evsel);
-}
-
-static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused,
-                                   struct event_key *key,
-                                   char decode[20])
-{
-       scnprintf(decode, 20, "%#llx:%s", (unsigned long long)key->key,
-                               key->info ? "POUT" : "PIN");
-}
-
-static struct kvm_events_ops ioport_events = {
-       .is_begin_event = ioport_event_begin,
-       .is_end_event = ioport_event_end,
-       .decode_key = ioport_event_decode_key,
-       .name = "IO Port Access"
-};
-
-static bool register_kvm_events_ops(struct perf_kvm_stat *kvm)
-{
-       bool ret = true;
-
-       if (!strcmp(kvm->report_event, "vmexit"))
-               kvm->events_ops = &exit_events;
-       else if (!strcmp(kvm->report_event, "mmio"))
-               kvm->events_ops = &mmio_events;
-       else if (!strcmp(kvm->report_event, "ioport"))
-               kvm->events_ops = &ioport_events;
-       else {
-               pr_err("Unknown report event:%s\n", kvm->report_event);
-               ret = false;
-       }
-
-       return ret;
-}
-
 struct vcpu_event_record {
        int vcpu_id;
        u64 start_time;
@@ -477,6 +261,54 @@ static bool update_kvm_event(struct kvm_event *event, int vcpu_id,
        return true;
 }
 
+static bool is_child_event(struct perf_kvm_stat *kvm,
+                          struct perf_evsel *evsel,
+                          struct perf_sample *sample,
+                          struct event_key *key)
+{
+       struct child_event_ops *child_ops;
+
+       child_ops = kvm->events_ops->child_ops;
+
+       if (!child_ops)
+               return false;
+
+       for (; child_ops->name; child_ops++) {
+               if (!strcmp(evsel->name, child_ops->name)) {
+                       child_ops->get_key(evsel, sample, key);
+                       return true;
+               }
+       }
+
+       return false;
+}
+
+static bool handle_child_event(struct perf_kvm_stat *kvm,
+                              struct vcpu_event_record *vcpu_record,
+                              struct event_key *key,
+                              struct perf_sample *sample __maybe_unused)
+{
+       struct kvm_event *event = NULL;
+
+       if (key->key != INVALID_KEY)
+               event = find_create_kvm_event(kvm, key);
+
+       vcpu_record->last_event = event;
+
+       return true;
+}
+
+static bool skip_event(const char *event)
+{
+       const char * const *skip_events;
+
+       for (skip_events = kvm_skip_events; *skip_events; skip_events++)
+               if (!strcmp(event, *skip_events))
+                       return true;
+
+       return false;
+}
+
 static bool handle_end_event(struct perf_kvm_stat *kvm,
                             struct vcpu_event_record *vcpu_record,
                             struct event_key *key,
@@ -525,10 +357,10 @@ static bool handle_end_event(struct perf_kvm_stat *kvm,
        time_diff = sample->time - time_begin;
 
        if (kvm->duration && time_diff > kvm->duration) {
-               char decode[32];
+               char decode[DECODE_STR_LEN];
 
                kvm->events_ops->decode_key(kvm, &event->key, decode);
-               if (strcmp(decode, "HLT")) {
+               if (!skip_event(decode)) {
                        pr_info("%" PRIu64 " VM %d, vcpu %d: %s event took %" PRIu64 "usec\n",
                                 sample->time, sample->pid, vcpu_record->vcpu_id,
                                 decode, time_diff/1000);
@@ -553,7 +385,7 @@ struct vcpu_event_record *per_vcpu_record(struct thread *thread,
                        return NULL;
                }
 
-               vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, "vcpu_id");
+               vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, VCPU_ID);
                thread->priv = vcpu_record;
        }
 
@@ -566,7 +398,8 @@ static bool handle_kvm_event(struct perf_kvm_stat *kvm,
                             struct perf_sample *sample)
 {
        struct vcpu_event_record *vcpu_record;
-       struct event_key key = {.key = INVALID_KEY};
+       struct event_key key = { .key = INVALID_KEY,
+                                .exit_reasons = kvm->exit_reasons };
 
        vcpu_record = per_vcpu_record(thread, evsel, sample);
        if (!vcpu_record)
@@ -580,6 +413,9 @@ static bool handle_kvm_event(struct perf_kvm_stat *kvm,
        if (kvm->events_ops->is_begin_event(evsel, sample, &key))
                return handle_begin_event(kvm, vcpu_record, &key, sample->time);
 
+       if (is_child_event(kvm, evsel, sample, &key))
+               return handle_child_event(kvm, vcpu_record, &key, sample);
+
        if (kvm->events_ops->is_end_event(evsel, sample, &key))
                return handle_end_event(kvm, vcpu_record, &key, sample);
 
@@ -740,7 +576,7 @@ static void show_timeofday(void)
 
 static void print_result(struct perf_kvm_stat *kvm)
 {
-       char decode[20];
+       char decode[DECODE_STR_LEN];
        struct kvm_event *event;
        int vcpu = kvm->trace_vcpu;
 
@@ -751,7 +587,7 @@ static void print_result(struct perf_kvm_stat *kvm)
 
        pr_info("\n\n");
        print_vcpu_info(kvm);
-       pr_info("%20s ", kvm->events_ops->name);
+       pr_info("%*s ", DECODE_STR_LEN, kvm->events_ops->name);
        pr_info("%10s ", "Samples");
        pr_info("%9s ", "Samples%");
 
@@ -770,7 +606,7 @@ static void print_result(struct perf_kvm_stat *kvm)
                min = get_event_min(event, vcpu);
 
                kvm->events_ops->decode_key(kvm, &event->key, decode);
-               pr_info("%20s ", decode);
+               pr_info("%*s ", DECODE_STR_LEN, decode);
                pr_info("%10llu ", (unsigned long long)ecount);
                pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100);
                pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100);
@@ -839,34 +675,28 @@ static int process_sample_event(struct perf_tool *tool,
 static int cpu_isa_config(struct perf_kvm_stat *kvm)
 {
        char buf[64], *cpuid;
-       int err, isa;
+       int err;
 
        if (kvm->live) {
                err = get_cpuid(buf, sizeof(buf));
                if (err != 0) {
-                       pr_err("Failed to look up CPU type (Intel or AMD)\n");
+                       pr_err("Failed to look up CPU type\n");
                        return err;
                }
                cpuid = buf;
        } else
                cpuid = kvm->session->header.env.cpuid;
 
-       if (strstr(cpuid, "Intel"))
-               isa = 1;
-       else if (strstr(cpuid, "AMD"))
-               isa = 0;
-       else {
-               pr_err("CPU %s is not supported.\n", cpuid);
-               return -ENOTSUP;
+       if (!cpuid) {
+               pr_err("Failed to look up CPU type\n");
+               return -EINVAL;
        }
 
-       if (isa == 1) {
-               kvm->exit_reasons = vmx_exit_reasons;
-               kvm->exit_reasons_size = ARRAY_SIZE(vmx_exit_reasons);
-               kvm->exit_reasons_isa = "VMX";
-       }
+       err = cpu_isa_init(kvm, cpuid);
+       if (err == -ENOTSUP)
+               pr_err("CPU %s is not supported.\n", cpuid);
 
-       return 0;
+       return err;
 }
 
 static bool verify_vcpu(int vcpu)
@@ -1300,13 +1130,6 @@ exit:
        return ret;
 }
 
-static const char * const kvm_events_tp[] = {
-       "kvm:kvm_entry",
-       "kvm:kvm_exit",
-       "kvm:kvm_mmio",
-       "kvm:kvm_pio",
-};
-
 #define STRDUP_FAIL_EXIT(s)            \
        ({      char *_p;               \
        _p = strdup(s);         \
@@ -1318,7 +1141,7 @@ static const char * const kvm_events_tp[] = {
 static int
 kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
 {
-       unsigned int rec_argc, i, j;
+       unsigned int rec_argc, i, j, events_tp_size;
        const char **rec_argv;
        const char * const record_args[] = {
                "record",
@@ -1326,9 +1149,14 @@ kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
                "-m", "1024",
                "-c", "1",
        };
+       const char * const *events_tp;
+       events_tp_size = 0;
+
+       for (events_tp = kvm_events_tp; *events_tp; events_tp++)
+               events_tp_size++;
 
        rec_argc = ARRAY_SIZE(record_args) + argc + 2 +
-                  2 * ARRAY_SIZE(kvm_events_tp);
+                  2 * events_tp_size;
        rec_argv = calloc(rec_argc + 1, sizeof(char *));
 
        if (rec_argv == NULL)
@@ -1337,7 +1165,7 @@ kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv)
        for (i = 0; i < ARRAY_SIZE(record_args); i++)
                rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]);
 
-       for (j = 0; j < ARRAY_SIZE(kvm_events_tp); j++) {
+       for (j = 0; j < events_tp_size; j++) {
                rec_argv[i++] = "-e";
                rec_argv[i++] = STRDUP_FAIL_EXIT(kvm_events_tp[j]);
        }
@@ -1356,7 +1184,8 @@ kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv)
 {
        const struct option kvm_events_report_options[] = {
                OPT_STRING(0, "event", &kvm->report_event, "report event",
-                           "event for reporting: vmexit, mmio, ioport"),
+                          "event for reporting: vmexit, "
+                          "mmio (x86 only), ioport (x86 only)"),
                OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
                            "vcpu id to report"),
                OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
@@ -1391,16 +1220,16 @@ static struct perf_evlist *kvm_live_event_list(void)
 {
        struct perf_evlist *evlist;
        char *tp, *name, *sys;
-       unsigned int j;
        int err = -1;
+       const char * const *events_tp;
 
        evlist = perf_evlist__new();
        if (evlist == NULL)
                return NULL;
 
-       for (j = 0; j < ARRAY_SIZE(kvm_events_tp); j++) {
+       for (events_tp = kvm_events_tp; *events_tp; events_tp++) {
 
-               tp = strdup(kvm_events_tp[j]);
+               tp = strdup(*events_tp);
                if (tp == NULL)
                        goto out;
 
@@ -1409,7 +1238,7 @@ static struct perf_evlist *kvm_live_event_list(void)
                name = strchr(tp, ':');
                if (name == NULL) {
                        pr_err("Error parsing %s tracepoint: subsystem delimiter not found\n",
-                               kvm_events_tp[j]);
+                              *events_tp);
                        free(tp);
                        goto out;
                }
@@ -1417,7 +1246,7 @@ static struct perf_evlist *kvm_live_event_list(void)
                name++;
 
                if (perf_evlist__add_newtp(evlist, sys, name, NULL)) {
-                       pr_err("Failed to add %s tracepoint to the list\n", kvm_events_tp[j]);
+                       pr_err("Failed to add %s tracepoint to the list\n", *events_tp);
                        free(tp);
                        goto out;
                }
@@ -1462,7 +1291,9 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
                        "key for sorting: sample(sort by samples number)"
                        " time (sort by avg time)"),
                OPT_U64(0, "duration", &kvm->duration,
-                   "show events other than HALT that take longer than duration usecs"),
+                       "show events other than"
+                       " HLT (x86 only) or Wait state (s390 only)"
+                       " that take longer than duration usecs"),
                OPT_END()
        };
        const char * const live_usage[] = {
@@ -1585,9 +1416,6 @@ static int kvm_cmd_stat(const char *file_name, int argc, const char **argv)
                .report_event   = "vmexit",
                .sort_key       = "sample",
 
-               .exit_reasons = svm_exit_reasons,
-               .exit_reasons_size = ARRAY_SIZE(svm_exit_reasons),
-               .exit_reasons_isa = "SVM",
        };
 
        if (argc == 1) {
@@ -1609,7 +1437,7 @@ static int kvm_cmd_stat(const char *file_name, int argc, const char **argv)
 perf_stat:
        return cmd_stat(argc, argv, NULL);
 }
-#endif
+#endif /* HAVE_KVM_STAT_SUPPORT */
 
 static int __cmd_record(const char *file_name, int argc, const char **argv)
 {
@@ -1726,7 +1554,7 @@ int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
                return cmd_top(argc, argv, NULL);
        else if (!strncmp(argv[0], "buildid-list", 12))
                return __cmd_buildid_list(file_name, argc, argv);
-#if defined(__i386__) || defined(__x86_64__)
+#ifdef HAVE_KVM_STAT_SUPPORT
        else if (!strncmp(argv[0], "stat", 4))
                return kvm_cmd_stat(file_name, argc, argv);
 #endif
index cdcd4eb..c63fa29 100644 (file)
@@ -288,6 +288,13 @@ static void cleanup_params(void)
        memset(&params, 0, sizeof(params));
 }
 
+static void pr_err_with_code(const char *msg, int err)
+{
+       pr_err("%s", msg);
+       pr_debug(" Reason: %s (Code: %d)", strerror(-err), err);
+       pr_err("\n");
+}
+
 static int
 __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
 {
@@ -379,7 +386,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                }
                ret = parse_probe_event_argv(argc, argv);
                if (ret < 0) {
-                       pr_err("  Error: Parse Error.  (%d)\n", ret);
+                       pr_err_with_code("  Error: Command Parse Error.", ret);
                        return ret;
                }
        }
@@ -419,8 +426,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                }
                ret = show_perf_probe_events();
                if (ret < 0)
-                       pr_err("  Error: Failed to show event list. (%d)\n",
-                              ret);
+                       pr_err_with_code("  Error: Failed to show event list.", ret);
                return ret;
        }
        if (params.show_funcs) {
@@ -445,8 +451,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                strfilter__delete(params.filter);
                params.filter = NULL;
                if (ret < 0)
-                       pr_err("  Error: Failed to show functions."
-                              " (%d)\n", ret);
+                       pr_err_with_code("  Error: Failed to show functions.", ret);
                return ret;
        }
 
@@ -464,7 +469,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
 
                ret = show_line_range(&params.line_range, params.target);
                if (ret < 0)
-                       pr_err("  Error: Failed to show lines. (%d)\n", ret);
+                       pr_err_with_code("  Error: Failed to show lines.", ret);
                return ret;
        }
        if (params.show_vars) {
@@ -485,7 +490,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                strfilter__delete(params.filter);
                params.filter = NULL;
                if (ret < 0)
-                       pr_err("  Error: Failed to show vars. (%d)\n", ret);
+                       pr_err_with_code("  Error: Failed to show vars.", ret);
                return ret;
        }
 #endif
@@ -493,7 +498,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
        if (params.dellist) {
                ret = del_perf_probe_events(params.dellist);
                if (ret < 0) {
-                       pr_err("  Error: Failed to delete events. (%d)\n", ret);
+                       pr_err_with_code("  Error: Failed to delete events.", ret);
                        return ret;
                }
        }
@@ -504,7 +509,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                                            params.target,
                                            params.force_add);
                if (ret < 0) {
-                       pr_err("  Error: Failed to add events. (%d)\n", ret);
+                       pr_err_with_code("  Error: Failed to add events.", ret);
                        return ret;
                }
        }
index 378b85b..4869050 100644 (file)
@@ -238,6 +238,7 @@ static struct perf_event_header finished_round_event = {
 
 static int record__mmap_read_all(struct record *rec)
 {
+       u64 bytes_written = rec->bytes_written;
        int i;
        int rc = 0;
 
@@ -250,7 +251,11 @@ static int record__mmap_read_all(struct record *rec)
                }
        }
 
-       if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
+       /*
+        * Mark the round finished in case we wrote
+        * at least one event.
+        */
+       if (bytes_written != rec->bytes_written)
                rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
 
 out:
index c38d06c..f83c08c 100644 (file)
@@ -10,6 +10,7 @@
 #include "util/header.h"
 #include "util/session.h"
 #include "util/tool.h"
+#include "util/cloexec.h"
 
 #include "util/parse-options.h"
 #include "util/trace-event.h"
@@ -434,7 +435,8 @@ static int self_open_counters(void)
        attr.type = PERF_TYPE_SOFTWARE;
        attr.config = PERF_COUNT_SW_TASK_CLOCK;
 
-       fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+       fd = sys_perf_event_open(&attr, 0, -1, -1,
+                                perf_event_open_cloexec_flag());
 
        if (fd < 0)
                pr_err("Error: sys_perf_event_open() syscall returned "
@@ -935,8 +937,8 @@ static int latency_switch_event(struct perf_sched *sched,
                return -1;
        }
 
-       sched_out = machine__findnew_thread(machine, 0, prev_pid);
-       sched_in = machine__findnew_thread(machine, 0, next_pid);
+       sched_out = machine__findnew_thread(machine, -1, prev_pid);
+       sched_in = machine__findnew_thread(machine, -1, next_pid);
 
        out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
        if (!out_events) {
@@ -979,7 +981,7 @@ static int latency_runtime_event(struct perf_sched *sched,
 {
        const u32 pid      = perf_evsel__intval(evsel, sample, "pid");
        const u64 runtime  = perf_evsel__intval(evsel, sample, "runtime");
-       struct thread *thread = machine__findnew_thread(machine, 0, pid);
+       struct thread *thread = machine__findnew_thread(machine, -1, pid);
        struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
        u64 timestamp = sample->time;
        int cpu = sample->cpu;
@@ -1012,7 +1014,7 @@ static int latency_wakeup_event(struct perf_sched *sched,
        struct thread *wakee;
        u64 timestamp = sample->time;
 
-       wakee = machine__findnew_thread(machine, 0, pid);
+       wakee = machine__findnew_thread(machine, -1, pid);
        atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
        if (!atoms) {
                if (thread_atoms_insert(sched, wakee))
@@ -1072,7 +1074,7 @@ static int latency_migrate_task_event(struct perf_sched *sched,
        if (sched->profile_cpu == -1)
                return 0;
 
-       migrant = machine__findnew_thread(machine, 0, pid);
+       migrant = machine__findnew_thread(machine, -1, pid);
        atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
        if (!atoms) {
                if (thread_atoms_insert(sched, migrant))
@@ -1290,7 +1292,7 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
                return -1;
        }
 
-       sched_in = machine__findnew_thread(machine, 0, next_pid);
+       sched_in = machine__findnew_thread(machine, -1, next_pid);
 
        sched->curr_thread[this_cpu] = sched_in;
 
index 9e9c91f..f57035b 100644 (file)
@@ -358,27 +358,6 @@ static void print_sample_start(struct perf_sample *sample,
        }
 }
 
-static bool is_bts_event(struct perf_event_attr *attr)
-{
-       return ((attr->type == PERF_TYPE_HARDWARE) &&
-               (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
-               (attr->sample_period == 1));
-}
-
-static bool sample_addr_correlates_sym(struct perf_event_attr *attr)
-{
-       if ((attr->type == PERF_TYPE_SOFTWARE) &&
-           ((attr->config == PERF_COUNT_SW_PAGE_FAULTS) ||
-            (attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN) ||
-            (attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)))
-               return true;
-
-       if (is_bts_event(attr))
-               return true;
-
-       return false;
-}
-
 static void print_sample_addr(union perf_event *event,
                          struct perf_sample *sample,
                          struct machine *machine,
@@ -386,24 +365,13 @@ static void print_sample_addr(union perf_event *event,
                          struct perf_event_attr *attr)
 {
        struct addr_location al;
-       u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 
        printf("%16" PRIx64, sample->addr);
 
        if (!sample_addr_correlates_sym(attr))
                return;
 
-       thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
-                             sample->addr, &al);
-       if (!al.map)
-               thread__find_addr_map(thread, machine, cpumode, MAP__VARIABLE,
-                                     sample->addr, &al);
-
-       al.cpu = sample->cpu;
-       al.sym = NULL;
-
-       if (al.map)
-               al.sym = map__find_symbol(al.map, al.addr, NULL);
+       perf_event__preprocess_sample_addr(event, sample, machine, thread, &al);
 
        if (PRINT_FIELD(SYM)) {
                printf(" ");
@@ -427,25 +395,35 @@ static void print_sample_bts(union perf_event *event,
                             struct addr_location *al)
 {
        struct perf_event_attr *attr = &evsel->attr;
+       bool print_srcline_last = false;
 
        /* print branch_from information */
        if (PRINT_FIELD(IP)) {
-               if (!symbol_conf.use_callchain)
-                       printf(" ");
-               else
+               unsigned int print_opts = output[attr->type].print_ip_opts;
+
+               if (symbol_conf.use_callchain && sample->callchain) {
                        printf("\n");
-               perf_evsel__print_ip(evsel, sample, al,
-                                    output[attr->type].print_ip_opts,
+               } else {
+                       printf(" ");
+                       if (print_opts & PRINT_IP_OPT_SRCLINE) {
+                               print_srcline_last = true;
+                               print_opts &= ~PRINT_IP_OPT_SRCLINE;
+                       }
+               }
+               perf_evsel__print_ip(evsel, sample, al, print_opts,
                                     PERF_MAX_STACK_DEPTH);
        }
 
-       printf(" => ");
-
        /* print branch_to information */
        if (PRINT_FIELD(ADDR) ||
            ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
-            !output[attr->type].user_set))
+            !output[attr->type].user_set)) {
+               printf(" => ");
                print_sample_addr(event, sample, al->machine, thread, attr);
+       }
+
+       if (print_srcline_last)
+               map__fprintf_srcline(al->map, al->addr, "\n  ", stdout);
 
        printf("\n");
 }
index 65a151e..3e80aa1 100644 (file)
@@ -184,7 +184,7 @@ static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
 {
        evsel->priv = zalloc(sizeof(struct perf_stat));
-       if (evsel == NULL)
+       if (evsel->priv == NULL)
                return -ENOMEM;
        perf_evsel__reset_stat_priv(evsel);
        return 0;
index 74db256..2f1a522 100644 (file)
@@ -37,6 +37,7 @@
 #include "util/svghelper.h"
 #include "util/tool.h"
 #include "util/data.h"
+#include "util/debug.h"
 
 #define SUPPORT_OLD_POWER_EVENTS 1
 #define PWR_EVENT_EXIT -1
@@ -60,10 +61,17 @@ struct timechart {
                                tasks_only,
                                with_backtrace,
                                topology;
+       /* IO related settings */
+       u64                     io_events;
+       bool                    io_only,
+                               skip_eagain;
+       u64                     min_time,
+                               merge_dist;
 };
 
 struct per_pidcomm;
 struct cpu_sample;
+struct io_sample;
 
 /*
  * Datastructure layout:
@@ -84,6 +92,7 @@ struct per_pid {
        u64             start_time;
        u64             end_time;
        u64             total_time;
+       u64             total_bytes;
        int             display;
 
        struct per_pidcomm *all;
@@ -97,6 +106,8 @@ struct per_pidcomm {
        u64             start_time;
        u64             end_time;
        u64             total_time;
+       u64             max_bytes;
+       u64             total_bytes;
 
        int             Y;
        int             display;
@@ -107,6 +118,7 @@ struct per_pidcomm {
        char            *comm;
 
        struct cpu_sample *samples;
+       struct io_sample  *io_samples;
 };
 
 struct sample_wrapper {
@@ -131,6 +143,27 @@ struct cpu_sample {
        const char *backtrace;
 };
 
+enum {
+       IOTYPE_READ,
+       IOTYPE_WRITE,
+       IOTYPE_SYNC,
+       IOTYPE_TX,
+       IOTYPE_RX,
+       IOTYPE_POLL,
+};
+
+struct io_sample {
+       struct io_sample *next;
+
+       u64 start_time;
+       u64 end_time;
+       u64 bytes;
+       int type;
+       int fd;
+       int err;
+       int merges;
+};
+
 #define CSTATE 1
 #define PSTATE 2
 
@@ -213,7 +246,7 @@ static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
                pid_set_comm(tchart, pid, pp->current->comm);
 
        p->start_time = timestamp;
-       if (p->current) {
+       if (p->current && !p->current->start_time) {
                p->current->start_time = timestamp;
                p->current->state_since = timestamp;
        }
@@ -682,6 +715,249 @@ static void end_sample_processing(struct timechart *tchart)
        }
 }
 
+static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
+                              u64 start, int fd)
+{
+       struct per_pid *p = find_create_pid(tchart, pid);
+       struct per_pidcomm *c = p->current;
+       struct io_sample *sample;
+       struct io_sample *prev;
+
+       if (!c) {
+               c = zalloc(sizeof(*c));
+               if (!c)
+                       return -ENOMEM;
+               p->current = c;
+               c->next = p->all;
+               p->all = c;
+       }
+
+       prev = c->io_samples;
+
+       if (prev && prev->start_time && !prev->end_time) {
+               pr_warning("Skip invalid start event: "
+                          "previous event already started!\n");
+
+               /* remove previous event that has been started,
+                * we are not sure we will ever get an end for it */
+               c->io_samples = prev->next;
+               free(prev);
+               return 0;
+       }
+
+       sample = zalloc(sizeof(*sample));
+       if (!sample)
+               return -ENOMEM;
+       sample->start_time = start;
+       sample->type = type;
+       sample->fd = fd;
+       sample->next = c->io_samples;
+       c->io_samples = sample;
+
+       if (c->start_time == 0 || c->start_time > start)
+               c->start_time = start;
+
+       return 0;
+}
+
+static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
+                            u64 end, long ret)
+{
+       struct per_pid *p = find_create_pid(tchart, pid);
+       struct per_pidcomm *c = p->current;
+       struct io_sample *sample, *prev;
+
+       if (!c) {
+               pr_warning("Invalid pidcomm!\n");
+               return -1;
+       }
+
+       sample = c->io_samples;
+
+       if (!sample) /* skip partially captured events */
+               return 0;
+
+       if (sample->end_time) {
+               pr_warning("Skip invalid end event: "
+                          "previous event already ended!\n");
+               return 0;
+       }
+
+       if (sample->type != type) {
+               pr_warning("Skip invalid end event: invalid event type!\n");
+               return 0;
+       }
+
+       sample->end_time = end;
+       prev = sample->next;
+
+       /* we want to be able to see small and fast transfers, so make them
+        * at least min_time long, but don't overlap them */
+       if (sample->end_time - sample->start_time < tchart->min_time)
+               sample->end_time = sample->start_time + tchart->min_time;
+       if (prev && sample->start_time < prev->end_time) {
+               if (prev->err) /* try to make errors more visible */
+                       sample->start_time = prev->end_time;
+               else
+                       prev->end_time = sample->start_time;
+       }
+
+       if (ret < 0) {
+               sample->err = ret;
+       } else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
+                  type == IOTYPE_TX || type == IOTYPE_RX) {
+
+               if ((u64)ret > c->max_bytes)
+                       c->max_bytes = ret;
+
+               c->total_bytes += ret;
+               p->total_bytes += ret;
+               sample->bytes = ret;
+       }
+
+       /* merge two requests to make svg smaller and render-friendly */
+       if (prev &&
+           prev->type == sample->type &&
+           prev->err == sample->err &&
+           prev->fd == sample->fd &&
+           prev->end_time + tchart->merge_dist >= sample->start_time) {
+
+               sample->bytes += prev->bytes;
+               sample->merges += prev->merges + 1;
+
+               sample->start_time = prev->start_time;
+               sample->next = prev->next;
+               free(prev);
+
+               if (!sample->err && sample->bytes > c->max_bytes)
+                       c->max_bytes = sample->bytes;
+       }
+
+       tchart->io_events++;
+
+       return 0;
+}
+
+static int
+process_enter_read(struct timechart *tchart,
+                  struct perf_evsel *evsel,
+                  struct perf_sample *sample)
+{
+       long fd = perf_evsel__intval(evsel, sample, "fd");
+       return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
+                                  sample->time, fd);
+}
+
+static int
+process_exit_read(struct timechart *tchart,
+                 struct perf_evsel *evsel,
+                 struct perf_sample *sample)
+{
+       long ret = perf_evsel__intval(evsel, sample, "ret");
+       return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
+                                sample->time, ret);
+}
+
+static int
+process_enter_write(struct timechart *tchart,
+                   struct perf_evsel *evsel,
+                   struct perf_sample *sample)
+{
+       long fd = perf_evsel__intval(evsel, sample, "fd");
+       return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
+                                  sample->time, fd);
+}
+
+static int
+process_exit_write(struct timechart *tchart,
+                  struct perf_evsel *evsel,
+                  struct perf_sample *sample)
+{
+       long ret = perf_evsel__intval(evsel, sample, "ret");
+       return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
+                                sample->time, ret);
+}
+
+static int
+process_enter_sync(struct timechart *tchart,
+                  struct perf_evsel *evsel,
+                  struct perf_sample *sample)
+{
+       long fd = perf_evsel__intval(evsel, sample, "fd");
+       return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
+                                  sample->time, fd);
+}
+
+static int
+process_exit_sync(struct timechart *tchart,
+                 struct perf_evsel *evsel,
+                 struct perf_sample *sample)
+{
+       long ret = perf_evsel__intval(evsel, sample, "ret");
+       return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
+                                sample->time, ret);
+}
+
+static int
+process_enter_tx(struct timechart *tchart,
+                struct perf_evsel *evsel,
+                struct perf_sample *sample)
+{
+       long fd = perf_evsel__intval(evsel, sample, "fd");
+       return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
+                                  sample->time, fd);
+}
+
+static int
+process_exit_tx(struct timechart *tchart,
+               struct perf_evsel *evsel,
+               struct perf_sample *sample)
+{
+       long ret = perf_evsel__intval(evsel, sample, "ret");
+       return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
+                                sample->time, ret);
+}
+
+static int
+process_enter_rx(struct timechart *tchart,
+                struct perf_evsel *evsel,
+                struct perf_sample *sample)
+{
+       long fd = perf_evsel__intval(evsel, sample, "fd");
+       return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
+                                  sample->time, fd);
+}
+
+static int
+process_exit_rx(struct timechart *tchart,
+               struct perf_evsel *evsel,
+               struct perf_sample *sample)
+{
+       long ret = perf_evsel__intval(evsel, sample, "ret");
+       return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
+                                sample->time, ret);
+}
+
+static int
+process_enter_poll(struct timechart *tchart,
+                  struct perf_evsel *evsel,
+                  struct perf_sample *sample)
+{
+       long fd = perf_evsel__intval(evsel, sample, "fd");
+       return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
+                                  sample->time, fd);
+}
+
+static int
+process_exit_poll(struct timechart *tchart,
+                 struct perf_evsel *evsel,
+                 struct perf_sample *sample)
+{
+       long ret = perf_evsel__intval(evsel, sample, "ret");
+       return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
+                                sample->time, ret);
+}
+
 /*
  * Sort the pid datastructure
  */
@@ -852,6 +1128,121 @@ static void draw_cpu_usage(struct timechart *tchart)
        }
 }
 
+static void draw_io_bars(struct timechart *tchart)
+{
+       const char *suf;
+       double bytes;
+       char comm[256];
+       struct per_pid *p;
+       struct per_pidcomm *c;
+       struct io_sample *sample;
+       int Y = 1;
+
+       p = tchart->all_data;
+       while (p) {
+               c = p->all;
+               while (c) {
+                       if (!c->display) {
+                               c->Y = 0;
+                               c = c->next;
+                               continue;
+                       }
+
+                       svg_box(Y, c->start_time, c->end_time, "process3");
+                       sample = c->io_samples;
+                       for (sample = c->io_samples; sample; sample = sample->next) {
+                               double h = (double)sample->bytes / c->max_bytes;
+
+                               if (tchart->skip_eagain &&
+                                   sample->err == -EAGAIN)
+                                       continue;
+
+                               if (sample->err)
+                                       h = 1;
+
+                               if (sample->type == IOTYPE_SYNC)
+                                       svg_fbox(Y,
+                                               sample->start_time,
+                                               sample->end_time,
+                                               1,
+                                               sample->err ? "error" : "sync",
+                                               sample->fd,
+                                               sample->err,
+                                               sample->merges);
+                               else if (sample->type == IOTYPE_POLL)
+                                       svg_fbox(Y,
+                                               sample->start_time,
+                                               sample->end_time,
+                                               1,
+                                               sample->err ? "error" : "poll",
+                                               sample->fd,
+                                               sample->err,
+                                               sample->merges);
+                               else if (sample->type == IOTYPE_READ)
+                                       svg_ubox(Y,
+                                               sample->start_time,
+                                               sample->end_time,
+                                               h,
+                                               sample->err ? "error" : "disk",
+                                               sample->fd,
+                                               sample->err,
+                                               sample->merges);
+                               else if (sample->type == IOTYPE_WRITE)
+                                       svg_lbox(Y,
+                                               sample->start_time,
+                                               sample->end_time,
+                                               h,
+                                               sample->err ? "error" : "disk",
+                                               sample->fd,
+                                               sample->err,
+                                               sample->merges);
+                               else if (sample->type == IOTYPE_RX)
+                                       svg_ubox(Y,
+                                               sample->start_time,
+                                               sample->end_time,
+                                               h,
+                                               sample->err ? "error" : "net",
+                                               sample->fd,
+                                               sample->err,
+                                               sample->merges);
+                               else if (sample->type == IOTYPE_TX)
+                                       svg_lbox(Y,
+                                               sample->start_time,
+                                               sample->end_time,
+                                               h,
+                                               sample->err ? "error" : "net",
+                                               sample->fd,
+                                               sample->err,
+                                               sample->merges);
+                       }
+
+                       suf = "";
+                       bytes = c->total_bytes;
+                       if (bytes > 1024) {
+                               bytes = bytes / 1024;
+                               suf = "K";
+                       }
+                       if (bytes > 1024) {
+                               bytes = bytes / 1024;
+                               suf = "M";
+                       }
+                       if (bytes > 1024) {
+                               bytes = bytes / 1024;
+                               suf = "G";
+                       }
+
+
+                       sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
+                       svg_text(Y, c->start_time, comm);
+
+                       c->Y = Y;
+                       Y++;
+                       c = c->next;
+               }
+               p = p->next;
+       }
+}
+
 static void draw_process_bars(struct timechart *tchart)
 {
        struct per_pid *p;
@@ -987,9 +1378,6 @@ static int determine_display_tasks(struct timechart *tchart, u64 threshold)
        struct per_pidcomm *c;
        int count = 0;
 
-       if (process_filter)
-               return determine_display_tasks_filtered(tchart);
-
        p = tchart->all_data;
        while (p) {
                p->display = 0;
@@ -1025,15 +1413,46 @@ static int determine_display_tasks(struct timechart *tchart, u64 threshold)
        return count;
 }
 
+static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
+{
+       struct per_pid *p;
+       struct per_pidcomm *c;
+       int count = 0;
+
+       p = timechart->all_data;
+       while (p) {
+               /* no exit marker, task kept running to the end */
+               if (p->end_time == 0)
+                       p->end_time = timechart->last_time;
 
+               c = p->all;
 
+               while (c) {
+                       c->display = 0;
+
+                       if (c->total_bytes >= threshold) {
+                               c->display = 1;
+                               count++;
+                       }
+
+                       if (c->end_time == 0)
+                               c->end_time = timechart->last_time;
+
+                       c = c->next;
+               }
+               p = p->next;
+       }
+       return count;
+}
+
+#define BYTES_THRESH (1 * 1024 * 1024)
 #define TIME_THRESH 10000000
 
 static void write_svg_file(struct timechart *tchart, const char *filename)
 {
        u64 i;
        int count;
-       int thresh = TIME_THRESH;
+       int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
 
        if (tchart->power_only)
                tchart->proc_num = 0;
@@ -1041,28 +1460,43 @@ static void write_svg_file(struct timechart *tchart, const char *filename)
        /* We'd like to show at least proc_num tasks;
         * be less picky if we have fewer */
        do {
-               count = determine_display_tasks(tchart, thresh);
+               if (process_filter)
+                       count = determine_display_tasks_filtered(tchart);
+               else if (tchart->io_events)
+                       count = determine_display_io_tasks(tchart, thresh);
+               else
+                       count = determine_display_tasks(tchart, thresh);
                thresh /= 10;
        } while (!process_filter && thresh && count < tchart->proc_num);
 
        if (!tchart->proc_num)
                count = 0;
 
-       open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
+       if (tchart->io_events) {
+               open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
 
-       svg_time_grid();
-       svg_legenda();
+               svg_time_grid(0.5);
+               svg_io_legenda();
+
+               draw_io_bars(tchart);
+       } else {
+               open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
 
-       for (i = 0; i < tchart->numcpus; i++)
-               svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
+               svg_time_grid(0);
 
-       draw_cpu_usage(tchart);
-       if (tchart->proc_num)
-               draw_process_bars(tchart);
-       if (!tchart->tasks_only)
-               draw_c_p_states(tchart);
-       if (tchart->proc_num)
-               draw_wakeups(tchart);
+               svg_legenda();
+
+               for (i = 0; i < tchart->numcpus; i++)
+                       svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
+
+               draw_cpu_usage(tchart);
+               if (tchart->proc_num)
+                       draw_process_bars(tchart);
+               if (!tchart->tasks_only)
+                       draw_c_p_states(tchart);
+               if (tchart->proc_num)
+                       draw_wakeups(tchart);
+       }
 
        svg_close();
 }
@@ -1110,6 +1544,56 @@ static int __cmd_timechart(struct timechart *tchart, const char *output_name)
                { "power:power_end",            process_sample_power_end },
                { "power:power_frequency",      process_sample_power_frequency },
 #endif
+
+               { "syscalls:sys_enter_read",            process_enter_read },
+               { "syscalls:sys_enter_pread64",         process_enter_read },
+               { "syscalls:sys_enter_readv",           process_enter_read },
+               { "syscalls:sys_enter_preadv",          process_enter_read },
+               { "syscalls:sys_enter_write",           process_enter_write },
+               { "syscalls:sys_enter_pwrite64",        process_enter_write },
+               { "syscalls:sys_enter_writev",          process_enter_write },
+               { "syscalls:sys_enter_pwritev",         process_enter_write },
+               { "syscalls:sys_enter_sync",            process_enter_sync },
+               { "syscalls:sys_enter_sync_file_range", process_enter_sync },
+               { "syscalls:sys_enter_fsync",           process_enter_sync },
+               { "syscalls:sys_enter_msync",           process_enter_sync },
+               { "syscalls:sys_enter_recvfrom",        process_enter_rx },
+               { "syscalls:sys_enter_recvmmsg",        process_enter_rx },
+               { "syscalls:sys_enter_recvmsg",         process_enter_rx },
+               { "syscalls:sys_enter_sendto",          process_enter_tx },
+               { "syscalls:sys_enter_sendmsg",         process_enter_tx },
+               { "syscalls:sys_enter_sendmmsg",        process_enter_tx },
+               { "syscalls:sys_enter_epoll_pwait",     process_enter_poll },
+               { "syscalls:sys_enter_epoll_wait",      process_enter_poll },
+               { "syscalls:sys_enter_poll",            process_enter_poll },
+               { "syscalls:sys_enter_ppoll",           process_enter_poll },
+               { "syscalls:sys_enter_pselect6",        process_enter_poll },
+               { "syscalls:sys_enter_select",          process_enter_poll },
+
+               { "syscalls:sys_exit_read",             process_exit_read },
+               { "syscalls:sys_exit_pread64",          process_exit_read },
+               { "syscalls:sys_exit_readv",            process_exit_read },
+               { "syscalls:sys_exit_preadv",           process_exit_read },
+               { "syscalls:sys_exit_write",            process_exit_write },
+               { "syscalls:sys_exit_pwrite64",         process_exit_write },
+               { "syscalls:sys_exit_writev",           process_exit_write },
+               { "syscalls:sys_exit_pwritev",          process_exit_write },
+               { "syscalls:sys_exit_sync",             process_exit_sync },
+               { "syscalls:sys_exit_sync_file_range",  process_exit_sync },
+               { "syscalls:sys_exit_fsync",            process_exit_sync },
+               { "syscalls:sys_exit_msync",            process_exit_sync },
+               { "syscalls:sys_exit_recvfrom",         process_exit_rx },
+               { "syscalls:sys_exit_recvmmsg",         process_exit_rx },
+               { "syscalls:sys_exit_recvmsg",          process_exit_rx },
+               { "syscalls:sys_exit_sendto",           process_exit_tx },
+               { "syscalls:sys_exit_sendmsg",          process_exit_tx },
+               { "syscalls:sys_exit_sendmmsg",         process_exit_tx },
+               { "syscalls:sys_exit_epoll_pwait",      process_exit_poll },
+               { "syscalls:sys_exit_epoll_wait",       process_exit_poll },
+               { "syscalls:sys_exit_poll",             process_exit_poll },
+               { "syscalls:sys_exit_ppoll",            process_exit_poll },
+               { "syscalls:sys_exit_pselect6",         process_exit_poll },
+               { "syscalls:sys_exit_select",           process_exit_poll },
        };
        struct perf_data_file file = {
                .path = input_name,
@@ -1154,6 +1638,139 @@ out_delete:
        return ret;
 }
 
+static int timechart__io_record(int argc, const char **argv)
+{
+       unsigned int rec_argc, i;
+       const char **rec_argv;
+       const char **p;
+       char *filter = NULL;
+
+       const char * const common_args[] = {
+               "record", "-a", "-R", "-c", "1",
+       };
+       unsigned int common_args_nr = ARRAY_SIZE(common_args);
+
+       const char * const disk_events[] = {
+               "syscalls:sys_enter_read",
+               "syscalls:sys_enter_pread64",
+               "syscalls:sys_enter_readv",
+               "syscalls:sys_enter_preadv",
+               "syscalls:sys_enter_write",
+               "syscalls:sys_enter_pwrite64",
+               "syscalls:sys_enter_writev",
+               "syscalls:sys_enter_pwritev",
+               "syscalls:sys_enter_sync",
+               "syscalls:sys_enter_sync_file_range",
+               "syscalls:sys_enter_fsync",
+               "syscalls:sys_enter_msync",
+
+               "syscalls:sys_exit_read",
+               "syscalls:sys_exit_pread64",
+               "syscalls:sys_exit_readv",
+               "syscalls:sys_exit_preadv",
+               "syscalls:sys_exit_write",
+               "syscalls:sys_exit_pwrite64",
+               "syscalls:sys_exit_writev",
+               "syscalls:sys_exit_pwritev",
+               "syscalls:sys_exit_sync",
+               "syscalls:sys_exit_sync_file_range",
+               "syscalls:sys_exit_fsync",
+               "syscalls:sys_exit_msync",
+       };
+       unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
+
+       const char * const net_events[] = {
+               "syscalls:sys_enter_recvfrom",
+               "syscalls:sys_enter_recvmmsg",
+               "syscalls:sys_enter_recvmsg",
+               "syscalls:sys_enter_sendto",
+               "syscalls:sys_enter_sendmsg",
+               "syscalls:sys_enter_sendmmsg",
+
+               "syscalls:sys_exit_recvfrom",
+               "syscalls:sys_exit_recvmmsg",
+               "syscalls:sys_exit_recvmsg",
+               "syscalls:sys_exit_sendto",
+               "syscalls:sys_exit_sendmsg",
+               "syscalls:sys_exit_sendmmsg",
+       };
+       unsigned int net_events_nr = ARRAY_SIZE(net_events);
+
+       const char * const poll_events[] = {
+               "syscalls:sys_enter_epoll_pwait",
+               "syscalls:sys_enter_epoll_wait",
+               "syscalls:sys_enter_poll",
+               "syscalls:sys_enter_ppoll",
+               "syscalls:sys_enter_pselect6",
+               "syscalls:sys_enter_select",
+
+               "syscalls:sys_exit_epoll_pwait",
+               "syscalls:sys_exit_epoll_wait",
+               "syscalls:sys_exit_poll",
+               "syscalls:sys_exit_ppoll",
+               "syscalls:sys_exit_pselect6",
+               "syscalls:sys_exit_select",
+       };
+       unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
+
+       rec_argc = common_args_nr +
+               disk_events_nr * 4 +
+               net_events_nr * 4 +
+               poll_events_nr * 4 +
+               argc;
+       rec_argv = calloc(rec_argc + 1, sizeof(char *));
+
+       if (rec_argv == NULL)
+               return -ENOMEM;
+
+       if (asprintf(&filter, "common_pid != %d", getpid()) < 0)
+               return -ENOMEM;
+
+       p = rec_argv;
+       for (i = 0; i < common_args_nr; i++)
+               *p++ = strdup(common_args[i]);
+
+       for (i = 0; i < disk_events_nr; i++) {
+               if (!is_valid_tracepoint(disk_events[i])) {
+                       rec_argc -= 4;
+                       continue;
+               }
+
+               *p++ = "-e";
+               *p++ = strdup(disk_events[i]);
+               *p++ = "--filter";
+               *p++ = filter;
+       }
+       for (i = 0; i < net_events_nr; i++) {
+               if (!is_valid_tracepoint(net_events[i])) {
+                       rec_argc -= 4;
+                       continue;
+               }
+
+               *p++ = "-e";
+               *p++ = strdup(net_events[i]);
+               *p++ = "--filter";
+               *p++ = filter;
+       }
+       for (i = 0; i < poll_events_nr; i++) {
+               if (!is_valid_tracepoint(poll_events[i])) {
+                       rec_argc -= 4;
+                       continue;
+               }
+
+               *p++ = "-e";
+               *p++ = strdup(poll_events[i]);
+               *p++ = "--filter";
+               *p++ = filter;
+       }
+
+       for (i = 0; i < (unsigned int)argc; i++)
+               *p++ = argv[i];
+
+       return cmd_record(rec_argc, rec_argv, NULL);
+}
+
+
 static int timechart__record(struct timechart *tchart, int argc, const char **argv)
 {
        unsigned int rec_argc, i, j;
@@ -1270,6 +1887,30 @@ parse_highlight(const struct option *opt __maybe_unused, const char *arg,
        return 0;
 }
 
+static int
+parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
+{
+       char unit = 'n';
+       u64 *value = opt->value;
+
+       if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
+               switch (unit) {
+               case 'm':
+                       *value *= 1000000;
+                       break;
+               case 'u':
+                       *value *= 1000;
+                       break;
+               case 'n':
+                       break;
+               default:
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
 int cmd_timechart(int argc, const char **argv,
                  const char *prefix __maybe_unused)
 {
@@ -1282,6 +1923,8 @@ int cmd_timechart(int argc, const char **argv,
                        .ordered_samples = true,
                },
                .proc_num = 15,
+               .min_time = 1000000,
+               .merge_dist = 1000,
        };
        const char *output_name = "output.svg";
        const struct option timechart_options[] = {
@@ -1303,6 +1946,14 @@ int cmd_timechart(int argc, const char **argv,
                    "min. number of tasks to print"),
        OPT_BOOLEAN('t', "topology", &tchart.topology,
                    "sort CPUs according to topology"),
+       OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
+                   "skip EAGAIN errors"),
+       OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
+                    "all IO faster than min-time will visually appear longer",
+                    parse_time),
+       OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
+                    "merge events that are merge-dist us apart",
+                    parse_time),
        OPT_END()
        };
        const char * const timechart_usage[] = {
@@ -1314,6 +1965,8 @@ int cmd_timechart(int argc, const char **argv,
        OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
        OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
                    "output processes data only"),
+       OPT_BOOLEAN('I', "io-only", &tchart.io_only,
+                   "record only IO data"),
        OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
        OPT_END()
        };
@@ -1340,7 +1993,10 @@ int cmd_timechart(int argc, const char **argv,
                        return -1;
                }
 
-               return timechart__record(&tchart, argc, argv);
+               if (tchart.io_only)
+                       return timechart__io_record(argc, argv);
+               else
+                       return timechart__record(&tchart, argc, argv);
        } else if (argc)
                usage_with_options(timechart_usage, timechart_options);
 
index f954c26..a6c3752 100644 (file)
@@ -1108,6 +1108,7 @@ struct syscall {
        struct event_format *tp_format;
        const char          *name;
        bool                filtered;
+       bool                is_exit;
        struct syscall_fmt  *fmt;
        size_t              (**arg_scnprintf)(char *bf, size_t size, struct syscall_arg *arg);
        void                **arg_parm;
@@ -1132,6 +1133,7 @@ struct thread_trace {
        u64               exit_time;
        bool              entry_pending;
        unsigned long     nr_events;
+       unsigned long     pfmaj, pfmin;
        char              *entry_str;
        double            runtime_ms;
        struct {
@@ -1177,6 +1179,9 @@ fail:
        return NULL;
 }
 
+#define TRACE_PFMAJ            (1 << 0)
+#define TRACE_PFMIN            (1 << 1)
+
 struct trace {
        struct perf_tool        tool;
        struct {
@@ -1211,6 +1216,8 @@ struct trace {
        bool                    summary_only;
        bool                    show_comm;
        bool                    show_tool_stats;
+       bool                    trace_syscalls;
+       int                     trace_pgfaults;
 };
 
 static int trace__set_fd_pathname(struct thread *thread, int fd, const char *pathname)
@@ -1276,11 +1283,11 @@ static const char *thread__fd_path(struct thread *thread, int fd,
        if (fd < 0)
                return NULL;
 
-       if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL))
+       if ((fd > ttrace->paths.max || ttrace->paths.table[fd] == NULL)) {
                if (!trace->live)
                        return NULL;
                ++trace->stats.proc_getname;
-               if (thread__read_fd_path(thread, fd)) {
+               if (thread__read_fd_path(thread, fd))
                        return NULL;
        }
 
@@ -1473,6 +1480,8 @@ static int trace__read_syscall_info(struct trace *trace, int id)
        if (sc->tp_format == NULL)
                return -1;
 
+       sc->is_exit = !strcmp(name, "exit_group") || !strcmp(name, "exit");
+
        return syscall__set_arg_fmts(sc);
 }
 
@@ -1535,6 +1544,7 @@ static size_t syscall__scnprintf_args(struct syscall *sc, char *bf, size_t size,
 }
 
 typedef int (*tracepoint_handler)(struct trace *trace, struct perf_evsel *evsel,
+                                 union perf_event *event,
                                  struct perf_sample *sample);
 
 static struct syscall *trace__syscall_info(struct trace *trace,
@@ -1607,6 +1617,7 @@ static void thread__update_stats(struct thread_trace *ttrace,
 }
 
 static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
+                           union perf_event *event __maybe_unused,
                            struct perf_sample *sample)
 {
        char *msg;
@@ -1629,7 +1640,6 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
                return -1;
 
        args = perf_evsel__sc_tp_ptr(evsel, args, sample);
-       ttrace = thread->priv;
 
        if (ttrace->entry_str == NULL) {
                ttrace->entry_str = malloc(1024);
@@ -1644,7 +1654,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
        printed += syscall__scnprintf_args(sc, msg + printed, 1024 - printed,
                                           args, trace, thread);
 
-       if (!strcmp(sc->name, "exit_group") || !strcmp(sc->name, "exit")) {
+       if (sc->is_exit) {
                if (!trace->duration_filter && !trace->summary_only) {
                        trace__fprintf_entry_head(trace, thread, 1, sample->time, trace->output);
                        fprintf(trace->output, "%-70s\n", ttrace->entry_str);
@@ -1656,6 +1666,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
 }
 
 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
+                          union perf_event *event __maybe_unused,
                           struct perf_sample *sample)
 {
        int ret;
@@ -1687,8 +1698,6 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
                ++trace->stats.vfs_getname;
        }
 
-       ttrace = thread->priv;
-
        ttrace->exit_time = sample->time;
 
        if (ttrace->entry_time) {
@@ -1735,6 +1744,7 @@ out:
 }
 
 static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
+                             union perf_event *event __maybe_unused,
                              struct perf_sample *sample)
 {
        trace->last_vfs_getname = perf_evsel__rawptr(evsel, sample, "pathname");
@@ -1742,6 +1752,7 @@ static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
 }
 
 static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evsel,
+                                    union perf_event *event __maybe_unused,
                                     struct perf_sample *sample)
 {
         u64 runtime = perf_evsel__intval(evsel, sample, "runtime");
@@ -1768,6 +1779,80 @@ out_dump:
        return 0;
 }
 
+static void print_location(FILE *f, struct perf_sample *sample,
+                          struct addr_location *al,
+                          bool print_dso, bool print_sym)
+{
+
+       if ((verbose || print_dso) && al->map)
+               fprintf(f, "%s@", al->map->dso->long_name);
+
+       if ((verbose || print_sym) && al->sym)
+               fprintf(f, "%s+0x%" PRIx64, al->sym->name,
+                       al->addr - al->sym->start);
+       else if (al->map)
+               fprintf(f, "0x%" PRIx64, al->addr);
+       else
+               fprintf(f, "0x%" PRIx64, sample->addr);
+}
+
+static int trace__pgfault(struct trace *trace,
+                         struct perf_evsel *evsel,
+                         union perf_event *event,
+                         struct perf_sample *sample)
+{
+       struct thread *thread;
+       u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+       struct addr_location al;
+       char map_type = 'd';
+       struct thread_trace *ttrace;
+
+       thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
+       ttrace = thread__trace(thread, trace->output);
+       if (ttrace == NULL)
+               return -1;
+
+       if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
+               ttrace->pfmaj++;
+       else
+               ttrace->pfmin++;
+
+       if (trace->summary_only)
+               return 0;
+
+       thread__find_addr_location(thread, trace->host, cpumode, MAP__FUNCTION,
+                             sample->ip, &al);
+
+       trace__fprintf_entry_head(trace, thread, 0, sample->time, trace->output);
+
+       fprintf(trace->output, "%sfault [",
+               evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ?
+               "maj" : "min");
+
+       print_location(trace->output, sample, &al, false, true);
+
+       fprintf(trace->output, "] => ");
+
+       thread__find_addr_location(thread, trace->host, cpumode, MAP__VARIABLE,
+                                  sample->addr, &al);
+
+       if (!al.map) {
+               thread__find_addr_location(thread, trace->host, cpumode,
+                                          MAP__FUNCTION, sample->addr, &al);
+
+               if (al.map)
+                       map_type = 'x';
+               else
+                       map_type = '?';
+       }
+
+       print_location(trace->output, sample, &al, true, false);
+
+       fprintf(trace->output, " (%c%c)\n", map_type, al.level);
+
+       return 0;
+}
+
 static bool skip_sample(struct trace *trace, struct perf_sample *sample)
 {
        if ((trace->pid_list && intlist__find(trace->pid_list, sample->pid)) ||
@@ -1781,7 +1866,7 @@ static bool skip_sample(struct trace *trace, struct perf_sample *sample)
 }
 
 static int trace__process_sample(struct perf_tool *tool,
-                                union perf_event *event __maybe_unused,
+                                union perf_event *event,
                                 struct perf_sample *sample,
                                 struct perf_evsel *evsel,
                                 struct machine *machine __maybe_unused)
@@ -1799,7 +1884,7 @@ static int trace__process_sample(struct perf_tool *tool,
 
        if (handler) {
                ++trace->nr_events;
-               handler(trace, evsel, sample);
+               handler(trace, evsel, event, sample);
        }
 
        return err;
@@ -1826,7 +1911,7 @@ static int parse_target_str(struct trace *trace)
        return 0;
 }
 
-static int trace__record(int argc, const char **argv)
+static int trace__record(struct trace *trace, int argc, const char **argv)
 {
        unsigned int rec_argc, i, j;
        const char **rec_argv;
@@ -1835,34 +1920,54 @@ static int trace__record(int argc, const char **argv)
                "-R",
                "-m", "1024",
                "-c", "1",
-               "-e",
        };
 
+       const char * const sc_args[] = { "-e", };
+       unsigned int sc_args_nr = ARRAY_SIZE(sc_args);
+       const char * const majpf_args[] = { "-e", "major-faults" };
+       unsigned int majpf_args_nr = ARRAY_SIZE(majpf_args);
+       const char * const minpf_args[] = { "-e", "minor-faults" };
+       unsigned int minpf_args_nr = ARRAY_SIZE(minpf_args);
+
        /* +1 is for the event string below */
-       rec_argc = ARRAY_SIZE(record_args) + 1 + argc;
+       rec_argc = ARRAY_SIZE(record_args) + sc_args_nr + 1 +
+               majpf_args_nr + minpf_args_nr + argc;
        rec_argv = calloc(rec_argc + 1, sizeof(char *));
 
        if (rec_argv == NULL)
                return -ENOMEM;
 
+       j = 0;
        for (i = 0; i < ARRAY_SIZE(record_args); i++)
-               rec_argv[i] = record_args[i];
-
-       /* event string may be different for older kernels - e.g., RHEL6 */
-       if (is_valid_tracepoint("raw_syscalls:sys_enter"))
-               rec_argv[i] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
-       else if (is_valid_tracepoint("syscalls:sys_enter"))
-               rec_argv[i] = "syscalls:sys_enter,syscalls:sys_exit";
-       else {
-               pr_err("Neither raw_syscalls nor syscalls events exist.\n");
-               return -1;
+               rec_argv[j++] = record_args[i];
+
+       if (trace->trace_syscalls) {
+               for (i = 0; i < sc_args_nr; i++)
+                       rec_argv[j++] = sc_args[i];
+
+               /* event string may be different for older kernels - e.g., RHEL6 */
+               if (is_valid_tracepoint("raw_syscalls:sys_enter"))
+                       rec_argv[j++] = "raw_syscalls:sys_enter,raw_syscalls:sys_exit";
+               else if (is_valid_tracepoint("syscalls:sys_enter"))
+                       rec_argv[j++] = "syscalls:sys_enter,syscalls:sys_exit";
+               else {
+                       pr_err("Neither raw_syscalls nor syscalls events exist.\n");
+                       return -1;
+               }
        }
-       i++;
 
-       for (j = 0; j < (unsigned int)argc; j++, i++)
-               rec_argv[i] = argv[j];
+       if (trace->trace_pgfaults & TRACE_PFMAJ)
+               for (i = 0; i < majpf_args_nr; i++)
+                       rec_argv[j++] = majpf_args[i];
+
+       if (trace->trace_pgfaults & TRACE_PFMIN)
+               for (i = 0; i < minpf_args_nr; i++)
+                       rec_argv[j++] = minpf_args[i];
+
+       for (i = 0; i < (unsigned int)argc; i++)
+               rec_argv[j++] = argv[i];
 
-       return cmd_record(i, rec_argv, NULL);
+       return cmd_record(j, rec_argv, NULL);
 }
 
 static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
@@ -1882,6 +1987,30 @@ static void perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
        perf_evlist__add(evlist, evsel);
 }
 
+static int perf_evlist__add_pgfault(struct perf_evlist *evlist,
+                                   u64 config)
+{
+       struct perf_evsel *evsel;
+       struct perf_event_attr attr = {
+               .type = PERF_TYPE_SOFTWARE,
+               .mmap_data = 1,
+       };
+
+       attr.config = config;
+       attr.sample_period = 1;
+
+       event_attr_init(&attr);
+
+       evsel = perf_evsel__new(&attr);
+       if (!evsel)
+               return -ENOMEM;
+
+       evsel->handler = trace__pgfault;
+       perf_evlist__add(evlist, evsel);
+
+       return 0;
+}
+
 static int trace__run(struct trace *trace, int argc, const char **argv)
 {
        struct perf_evlist *evlist = perf_evlist__new();
@@ -1897,10 +2026,21 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
                goto out;
        }
 
-       if (perf_evlist__add_syscall_newtp(evlist, trace__sys_enter, trace__sys_exit))
+       if (trace->trace_syscalls &&
+           perf_evlist__add_syscall_newtp(evlist, trace__sys_enter,
+                                          trace__sys_exit))
                goto out_error_tp;
 
-       perf_evlist__add_vfs_getname(evlist);
+       if (trace->trace_syscalls)
+               perf_evlist__add_vfs_getname(evlist);
+
+       if ((trace->trace_pgfaults & TRACE_PFMAJ) &&
+           perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MAJ))
+               goto out_error_tp;
+
+       if ((trace->trace_pgfaults & TRACE_PFMIN) &&
+           perf_evlist__add_pgfault(evlist, PERF_COUNT_SW_PAGE_FAULTS_MIN))
+               goto out_error_tp;
 
        if (trace->sched &&
                perf_evlist__add_newtp(evlist, "sched", "sched_stat_runtime",
@@ -1982,7 +2122,8 @@ again:
                                goto next_event;
                        }
 
-                       if (sample.raw_data == NULL) {
+                       if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
+                           sample.raw_data == NULL) {
                                fprintf(trace->output, "%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
                                       perf_evsel__name(evsel), sample.tid,
                                       sample.cpu, sample.raw_size);
@@ -1990,7 +2131,7 @@ again:
                        }
 
                        handler = evsel->handler;
-                       handler(trace, evsel, &sample);
+                       handler(trace, evsel, event, &sample);
 next_event:
                        perf_evlist__mmap_consume(evlist, i);
 
@@ -2093,13 +2234,10 @@ static int trace__replay(struct trace *trace)
        if (evsel == NULL)
                evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
                                                             "syscalls:sys_enter");
-       if (evsel == NULL) {
-               pr_err("Data file does not have raw_syscalls:sys_enter event\n");
-               goto out;
-       }
 
-       if (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
-           perf_evsel__init_sc_tp_ptr_field(evsel, args)) {
+       if (evsel &&
+           (perf_evsel__init_syscall_tp(evsel, trace__sys_enter) < 0 ||
+           perf_evsel__init_sc_tp_ptr_field(evsel, args))) {
                pr_err("Error during initialize raw_syscalls:sys_enter event\n");
                goto out;
        }
@@ -2109,15 +2247,19 @@ static int trace__replay(struct trace *trace)
        if (evsel == NULL)
                evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
                                                             "syscalls:sys_exit");
-       if (evsel == NULL) {
-               pr_err("Data file does not have raw_syscalls:sys_exit event\n");
+       if (evsel &&
+           (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
+           perf_evsel__init_sc_tp_uint_field(evsel, ret))) {
+               pr_err("Error during initialize raw_syscalls:sys_exit event\n");
                goto out;
        }
 
-       if (perf_evsel__init_syscall_tp(evsel, trace__sys_exit) < 0 ||
-           perf_evsel__init_sc_tp_uint_field(evsel, ret)) {
-               pr_err("Error during initialize raw_syscalls:sys_exit event\n");
-               goto out;
+       evlist__for_each(session->evlist, evsel) {
+               if (evsel->attr.type == PERF_TYPE_SOFTWARE &&
+                   (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ ||
+                    evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
+                    evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS))
+                       evsel->handler = trace__pgfault;
        }
 
        err = parse_target_str(trace);
@@ -2217,6 +2359,10 @@ static int trace__fprintf_one_thread(struct thread *thread, void *priv)
        printed += fprintf(fp, " %s (%d), ", thread__comm_str(thread), thread->tid);
        printed += fprintf(fp, "%lu events, ", ttrace->nr_events);
        printed += fprintf(fp, "%.1f%%", ratio);
+       if (ttrace->pfmaj)
+               printed += fprintf(fp, ", %lu majfaults", ttrace->pfmaj);
+       if (ttrace->pfmin)
+               printed += fprintf(fp, ", %lu minfaults", ttrace->pfmin);
        printed += fprintf(fp, ", %.3f msec\n", ttrace->runtime_ms);
        printed += thread__dump_stats(ttrace, trace, fp);
 
@@ -2264,6 +2410,23 @@ static int trace__open_output(struct trace *trace, const char *filename)
        return trace->output == NULL ? -errno : 0;
 }
 
+static int parse_pagefaults(const struct option *opt, const char *str,
+                           int unset __maybe_unused)
+{
+       int *trace_pgfaults = opt->value;
+
+       if (strcmp(str, "all") == 0)
+               *trace_pgfaults |= TRACE_PFMAJ | TRACE_PFMIN;
+       else if (strcmp(str, "maj") == 0)
+               *trace_pgfaults |= TRACE_PFMAJ;
+       else if (strcmp(str, "min") == 0)
+               *trace_pgfaults |= TRACE_PFMIN;
+       else
+               return -1;
+
+       return 0;
+}
+
 int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
 {
        const char * const trace_usage[] = {
@@ -2293,6 +2456,7 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
                },
                .output = stdout,
                .show_comm = true,
+               .trace_syscalls = true,
        };
        const char *output_name = NULL;
        const char *ev_qualifier_str = NULL;
@@ -2330,20 +2494,34 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
                    "Show only syscall summary with statistics"),
        OPT_BOOLEAN('S', "with-summary", &trace.summary,
                    "Show all syscalls and summary with statistics"),
+       OPT_CALLBACK_DEFAULT('F', "pf", &trace.trace_pgfaults, "all|maj|min",
+                    "Trace pagefaults", parse_pagefaults, "maj"),
+       OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
        OPT_END()
        };
        int err;
        char bf[BUFSIZ];
 
-       if ((argc > 1) && (strcmp(argv[1], "record") == 0))
-               return trace__record(argc-2, &argv[2]);
+       argc = parse_options(argc, argv, trace_options, trace_usage,
+                            PARSE_OPT_STOP_AT_NON_OPTION);
 
-       argc = parse_options(argc, argv, trace_options, trace_usage, 0);
+       if (trace.trace_pgfaults) {
+               trace.opts.sample_address = true;
+               trace.opts.sample_time = true;
+       }
+
+       if ((argc >= 1) && (strcmp(argv[0], "record") == 0))
+               return trace__record(&trace, argc-1, &argv[1]);
 
        /* summary_only implies summary option, but don't overwrite summary if set */
        if (trace.summary_only)
                trace.summary = trace.summary_only;
 
+       if (!trace.trace_syscalls && !trace.trace_pgfaults) {
+               pr_err("Please specify something to trace.\n");
+               return -1;
+       }
+
        if (output_name != NULL) {
                err = trace__open_output(&trace, output_name);
                if (err < 0) {
index 4f100b5..1f67aa0 100644 (file)
@@ -48,6 +48,10 @@ ifneq ($(ARCH),$(filter $(ARCH),x86 arm))
   NO_LIBDW_DWARF_UNWIND := 1
 endif
 
+ifeq ($(ARCH),powerpc)
+  CFLAGS += -DHAVE_SKIP_CALLCHAIN_IDX
+endif
+
 ifeq ($(LIBUNWIND_LIBS),)
   NO_LIBUNWIND := 1
 else
@@ -160,6 +164,7 @@ CORE_FEATURE_TESTS =                        \
        backtrace                       \
        dwarf                           \
        fortify-source                  \
+       sync-compare-and-swap           \
        glibc                           \
        gtk2                            \
        gtk2-infobar                    \
@@ -195,6 +200,7 @@ LIB_FEATURE_TESTS =                 \
 VF_FEATURE_TESTS =                     \
        backtrace                       \
        fortify-source                  \
+       sync-compare-and-swap           \
        gtk2-infobar                    \
        libelf-getphdrnum               \
        libelf-mmap                     \
@@ -268,6 +274,10 @@ CFLAGS += -I$(LIB_INCLUDE)
 
 CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 
+ifeq ($(feature-sync-compare-and-swap), 1)
+  CFLAGS += -DHAVE_SYNC_COMPARE_AND_SWAP_SUPPORT
+endif
+
 ifndef NO_BIONIC
   $(call feature_check,bionic)
   ifeq ($(feature-bionic), 1)
@@ -299,7 +309,11 @@ else
       NO_LIBUNWIND := 1
       NO_LIBDW_DWARF_UNWIND := 1
     else
-      msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
+      ifneq ($(filter s% -static%,$(LDFLAGS),),)
+        msg := $(error No static glibc found, please install glibc-static);
+      else
+        msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]);
+      endif
     endif
   else
     ifndef NO_LIBDW_DWARF_UNWIND
@@ -586,6 +600,10 @@ ifndef NO_LIBNUMA
   endif
 endif
 
+ifdef HAVE_KVM_STAT_SUPPORT
+    CFLAGS += -DHAVE_KVM_STAT_SUPPORT
+endif
+
 # Among the variables below, these:
 #   perfexecdir
 #   template_dir
index 64c84e5..6088f8d 100644 (file)
@@ -5,6 +5,7 @@ FILES=                                  \
        test-bionic.bin                 \
        test-dwarf.bin                  \
        test-fortify-source.bin         \
+       test-sync-compare-and-swap.bin  \
        test-glibc.bin                  \
        test-gtk2.bin                   \
        test-gtk2-infobar.bin           \
@@ -141,6 +142,9 @@ test-timerfd.bin:
 test-libdw-dwarf-unwind.bin:
        $(BUILD)
 
+test-sync-compare-and-swap.bin:
+       $(BUILD) -Werror
+
 -include *.d
 
 ###############################
index fe5c1e5..a7d022e 100644 (file)
 # include "test-libdw-dwarf-unwind.c"
 #undef main
 
+#define main main_test_sync_compare_and_swap
+# include "test-sync-compare-and-swap.c"
+#undef main
+
 int main(int argc, char *argv[])
 {
        main_test_libpython();
@@ -111,6 +115,7 @@ int main(int argc, char *argv[])
        main_test_timerfd();
        main_test_stackprotector_all();
        main_test_libdw_dwarf_unwind();
+       main_test_sync_compare_and_swap(argc, argv);
 
        return 0;
 }
diff --git a/tools/perf/config/feature-checks/test-sync-compare-and-swap.c b/tools/perf/config/feature-checks/test-sync-compare-and-swap.c
new file mode 100644 (file)
index 0000000..c34d4ca
--- /dev/null
@@ -0,0 +1,14 @@
+#include <stdint.h>
+
+volatile uint64_t x;
+
+int main(int argc, char *argv[])
+{
+       uint64_t old, new = argc;
+
+       argv = argv;
+       do {
+               old = __sync_val_compare_and_swap(&x, 0, 0);
+       } while (!__sync_bool_compare_and_swap(&x, old, new));
+       return old == new;
+}
index 5268a14..937e432 100644 (file)
@@ -54,6 +54,7 @@
 #define mb()           asm volatile("bcr 15,0" ::: "memory")
 #define wmb()          asm volatile("bcr 15,0" ::: "memory")
 #define rmb()          asm volatile("bcr 15,0" ::: "memory")
+#define CPUINFO_PROC   "vendor_id"
 #endif
 
 #ifdef __sh__
index 78f7b92..2282d41 100644 (file)
 #include "util/quote.h"
 #include "util/run-command.h"
 #include "util/parse-events.h"
+#include "util/debug.h"
 #include <api/fs/debugfs.h>
 #include <pthread.h>
 
 const char perf_usage_string[] =
-       "perf [--version] [--help] COMMAND [ARGS]";
+       "perf [--version] [--help] [OPTIONS] COMMAND [ARGS]";
 
 const char perf_more_info_string[] =
        "See 'perf help COMMAND' for more information on a specific command.";
@@ -212,6 +213,16 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                printf("%s ", p->cmd);
                        }
                        exit(0);
+               } else if (!strcmp(cmd, "--debug")) {
+                       if (*argc < 2) {
+                               fprintf(stderr, "No variable specified for --debug.\n");
+                               usage(perf_usage_string);
+                       }
+                       if (perf_debug_option((*argv)[1]))
+                               usage(perf_usage_string);
+
+                       (*argv)++;
+                       (*argc)--;
                } else {
                        fprintf(stderr, "Unknown option: %s\n", cmd);
                        usage(perf_usage_string);
@@ -458,6 +469,7 @@ int main(int argc, const char **argv)
 
        /* The page_size is placed in util object. */
        page_size = sysconf(_SC_PAGE_SIZE);
+       cacheline_size = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
 
        cmd = perf_extract_argv0_path(argv[0]);
        if (!cmd)
index 8104895..74685f3 100644 (file)
@@ -1,2 +1,3 @@
 #!/bin/bash
-perf record -e raw_syscalls:sys_exit $@
+(perf record -e raw_syscalls:sys_exit $@ || \
+ perf record -e syscalls:sys_exit $@) 2> /dev/null
index 94bc25a..55e7ae4 100644 (file)
@@ -26,6 +26,11 @@ sub raw_syscalls::sys_exit
        }
 }
 
+sub syscalls::sys_exit
+{
+       raw_syscalls::sys_exit(@_)
+}
+
 sub trace_end
 {
     printf("\nfailed syscalls by comm:\n\n");
index de7211e..38dfb72 100644 (file)
@@ -107,12 +107,13 @@ def taskState(state):
 
 class EventHeaders:
        def __init__(self, common_cpu, common_secs, common_nsecs,
-                    common_pid, common_comm):
+                    common_pid, common_comm, common_callchain):
                self.cpu = common_cpu
                self.secs = common_secs
                self.nsecs = common_nsecs
                self.pid = common_pid
                self.comm = common_comm
+               self.callchain = common_callchain
 
        def ts(self):
                return (self.secs * (10 ** 9)) + self.nsecs
index 8104895..74685f3 100644 (file)
@@ -1,2 +1,3 @@
 #!/bin/bash
-perf record -e raw_syscalls:sys_exit $@
+(perf record -e raw_syscalls:sys_exit $@ || \
+ perf record -e syscalls:sys_exit $@) 2> /dev/null
index 4efbfaa..d694084 100644 (file)
@@ -1,2 +1,3 @@
 #!/bin/bash
-perf record -e raw_syscalls:sys_enter $@
+(perf record -e raw_syscalls:sys_enter $@ || \
+ perf record -e syscalls:sys_enter $@) 2> /dev/null
index 4efbfaa..d694084 100644 (file)
@@ -1,2 +1,3 @@
 #!/bin/bash
-perf record -e raw_syscalls:sys_enter $@
+(perf record -e raw_syscalls:sys_enter $@ || \
+ perf record -e syscalls:sys_enter $@) 2> /dev/null
index 4efbfaa..d694084 100644 (file)
@@ -1,2 +1,3 @@
 #!/bin/bash
-perf record -e raw_syscalls:sys_enter $@
+(perf record -e raw_syscalls:sys_enter $@ || \
+ perf record -e syscalls:sys_enter $@) 2> /dev/null
index 4647a76..334599c 100644 (file)
@@ -27,7 +27,7 @@ def trace_end():
 
 def irq__softirq_entry(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       vec):
+       common_callchain, vec):
                print_header(event_name, common_cpu, common_secs, common_nsecs,
                        common_pid, common_comm)
 
@@ -38,7 +38,7 @@ def irq__softirq_entry(event_name, context, common_cpu,
 
 def kmem__kmalloc(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       call_site, ptr, bytes_req, bytes_alloc,
+       common_callchain, call_site, ptr, bytes_req, bytes_alloc,
        gfp_flags):
                print_header(event_name, common_cpu, common_secs, common_nsecs,
                        common_pid, common_comm)
index 85805fa..cafeff3 100644 (file)
@@ -39,7 +39,7 @@ def trace_end():
 
 def raw_syscalls__sys_exit(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       id, ret):
+       common_callchain, id, ret):
        if (for_comm and common_comm != for_comm) or \
           (for_pid  and common_pid  != for_pid ):
                return
@@ -50,6 +50,11 @@ def raw_syscalls__sys_exit(event_name, context, common_cpu,
                except TypeError:
                        syscalls[common_comm][common_pid][id][ret] = 1
 
+def syscalls__sys_exit(event_name, context, common_cpu,
+       common_secs, common_nsecs, common_pid, common_comm,
+       id, ret):
+       raw_syscalls__sys_exit(**locals())
+
 def print_error_totals():
     if for_comm is not None:
            print "\nsyscall errors for %s:\n\n" % (for_comm),
index 11e70a3..0f5cf43 100644 (file)
@@ -21,7 +21,7 @@ thread_blocktime = {}
 lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
 process_names = {} # long-lived pid-to-execname mapping
 
-def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
+def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
                              nr, uaddr, op, val, utime, uaddr2, val3):
        cmd = op & FUTEX_CMD_MASK
        if cmd != FUTEX_WAIT:
@@ -31,7 +31,7 @@ def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
        thread_thislock[tid] = uaddr
        thread_blocktime[tid] = nsecs(s, ns)
 
-def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
+def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm, callchain,
                             nr, ret):
        if thread_blocktime.has_key(tid):
                elapsed = nsecs(s, ns) - thread_blocktime[tid]
index b574059..0b6ce8c 100755 (executable)
@@ -66,7 +66,7 @@ def trace_end():
        print_drop_table()
 
 # called from perf, when it finds a correspoinding event
-def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
+def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
                   skbaddr, location, protocol):
        slocation = str(location)
        try:
index 9aa0a32..4d21ef2 100644 (file)
@@ -224,75 +224,75 @@ def trace_end():
                        (len(rx_skb_list), of_count_rx_skb_list)
 
 # called from perf, when it finds a correspoinding event
-def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec):
+def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
        if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
                return
        event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
        all_event_list.append(event_info)
 
-def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec):
+def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
        if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
                return
        event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
        all_event_list.append(event_info)
 
-def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec):
+def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, callchain, vec):
        if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX":
                return
        event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec)
        all_event_list.append(event_info)
 
 def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm,
-                       irq, irq_name):
+                       callchain, irq, irq_name):
        event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
                        irq, irq_name)
        all_event_list.append(event_info)
 
-def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret):
+def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, callchain, irq, ret):
        event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret)
        all_event_list.append(event_info)
 
-def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name):
+def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, callchain, napi, dev_name):
        event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
                        napi, dev_name)
        all_event_list.append(event_info)
 
-def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr,
+def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
                        skblen, dev_name):
        event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
                        skbaddr, skblen, dev_name)
        all_event_list.append(event_info)
 
-def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr,
+def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr,
                        skblen, dev_name):
        event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
                        skbaddr, skblen, dev_name)
        all_event_list.append(event_info)
 
-def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm,
+def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, callchain,
                        skbaddr, skblen, dev_name):
        event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
                        skbaddr, skblen, dev_name)
        all_event_list.append(event_info)
 
-def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm,
+def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, callchain,
                        skbaddr, skblen, rc, dev_name):
        event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
                        skbaddr, skblen, rc ,dev_name)
        all_event_list.append(event_info)
 
-def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
+def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
                        skbaddr, protocol, location):
        event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
                        skbaddr, protocol, location)
        all_event_list.append(event_info)
 
-def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr):
+def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, callchain, skbaddr):
        event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
                        skbaddr)
        all_event_list.append(event_info)
 
-def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm,
+def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, callchain,
        skbaddr, skblen):
        event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm,
                        skbaddr, skblen)
index 74d55ec..de66cb3 100644 (file)
@@ -369,93 +369,92 @@ def trace_end():
 
 def sched__sched_stat_runtime(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       comm, pid, runtime, vruntime):
+       common_callchain, comm, pid, runtime, vruntime):
        pass
 
 def sched__sched_stat_iowait(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       comm, pid, delay):
+       common_callchain, comm, pid, delay):
        pass
 
 def sched__sched_stat_sleep(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       comm, pid, delay):
+       common_callchain, comm, pid, delay):
        pass
 
 def sched__sched_stat_wait(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       comm, pid, delay):
+       common_callchain, comm, pid, delay):
        pass
 
 def sched__sched_process_fork(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       parent_comm, parent_pid, child_comm, child_pid):
+       common_callchain, parent_comm, parent_pid, child_comm, child_pid):
        pass
 
 def sched__sched_process_wait(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       comm, pid, prio):
+       common_callchain, comm, pid, prio):
        pass
 
 def sched__sched_process_exit(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       comm, pid, prio):
+       common_callchain, comm, pid, prio):
        pass
 
 def sched__sched_process_free(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       comm, pid, prio):
+       common_callchain, comm, pid, prio):
        pass
 
 def sched__sched_migrate_task(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       comm, pid, prio, orig_cpu,
+       common_callchain, comm, pid, prio, orig_cpu,
        dest_cpu):
        headers = EventHeaders(common_cpu, common_secs, common_nsecs,
-                               common_pid, common_comm)
+                               common_pid, common_comm, common_callchain)
        parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
 
 def sched__sched_switch(event_name, context, common_cpu,
-       common_secs, common_nsecs, common_pid, common_comm,
+       common_secs, common_nsecs, common_pid, common_comm, common_callchain,
        prev_comm, prev_pid, prev_prio, prev_state,
        next_comm, next_pid, next_prio):
 
        headers = EventHeaders(common_cpu, common_secs, common_nsecs,
-                               common_pid, common_comm)
+                               common_pid, common_comm, common_callchain)
        parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
                         next_comm, next_pid, next_prio)
 
 def sched__sched_wakeup_new(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       comm, pid, prio, success,
+       common_callchain, comm, pid, prio, success,
        target_cpu):
        headers = EventHeaders(common_cpu, common_secs, common_nsecs,
-                               common_pid, common_comm)
+                               common_pid, common_comm, common_callchain)
        parser.wake_up(headers, comm, pid, success, target_cpu, 1)
 
 def sched__sched_wakeup(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       comm, pid, prio, success,
+       common_callchain, comm, pid, prio, success,
        target_cpu):
        headers = EventHeaders(common_cpu, common_secs, common_nsecs,
-                               common_pid, common_comm)
+                               common_pid, common_comm, common_callchain)
        parser.wake_up(headers, comm, pid, success, target_cpu, 0)
 
 def sched__sched_wait_task(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       comm, pid, prio):
+       common_callchain, comm, pid, prio):
        pass
 
 def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       ret):
+       common_callchain, ret):
        pass
 
 def sched__sched_kthread_stop(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       comm, pid):
+       common_callchain, comm, pid):
        pass
 
-def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
-               common_pid, common_comm):
+def trace_unhandled(event_name, context, event_fields_dict):
        pass
index 42c267e..61621b9 100644 (file)
@@ -44,7 +44,7 @@ def trace_begin():
 
 def raw_syscalls__sys_enter(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       id, args):
+       common_callchain, id, args):
        if for_comm is not None:
                if common_comm != for_comm:
                        return
@@ -53,6 +53,11 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
        except TypeError:
                syscalls[id] = 1
 
+def syscalls__sys_enter(event_name, context, common_cpu,
+       common_secs, common_nsecs, common_pid, common_comm,
+       id, args):
+       raw_syscalls__sys_enter(**locals())
+
 def print_syscall_totals(interval):
        while 1:
                clear_term()
index c64d1c5..daf314c 100644 (file)
@@ -38,7 +38,7 @@ def trace_end():
 
 def raw_syscalls__sys_enter(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       id, args):
+       common_callchain, id, args):
 
        if (for_comm and common_comm != for_comm) or \
           (for_pid  and common_pid  != for_pid ):
@@ -48,6 +48,11 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
        except TypeError:
                syscalls[common_comm][common_pid][id] = 1
 
+def syscalls__sys_enter(event_name, context, common_cpu,
+       common_secs, common_nsecs, common_pid, common_comm,
+       id, args):
+       raw_syscalls__sys_enter(**locals())
+
 def print_syscall_totals():
     if for_comm is not None:
            print "\nsyscall events for %s:\n\n" % (for_comm),
index b435d3f..e66a773 100644 (file)
@@ -35,7 +35,7 @@ def trace_end():
 
 def raw_syscalls__sys_enter(event_name, context, common_cpu,
        common_secs, common_nsecs, common_pid, common_comm,
-       id, args):
+       common_callchain, id, args):
        if for_comm is not None:
                if common_comm != for_comm:
                        return
@@ -44,6 +44,11 @@ def raw_syscalls__sys_enter(event_name, context, common_cpu,
        except TypeError:
                syscalls[id] = 1
 
+def syscalls__sys_enter(event_name, context, common_cpu,
+       common_secs, common_nsecs, common_pid, common_comm,
+       id, args):
+       raw_syscalls__sys_enter(**locals())
+
 def print_syscall_totals():
     if for_comm is not None:
            print "\nsyscall events for %s:\n\n" % (for_comm),
index e9bd639..f710b92 100644 (file)
@@ -1,7 +1,8 @@
 [event]
 fd=1
 group_fd=-1
-flags=0
+# 0 or PERF_FLAG_FD_CLOEXEC flag
+flags=0|8
 cpu=*
 type=0|1
 size=96
index 91cd48b..dc3ada2 100644 (file)
@@ -1,7 +1,8 @@
 [event]
 fd=1
 group_fd=-1
-flags=0
+# 0 or PERF_FLAG_FD_CLOEXEC flag
+flags=0|8
 cpu=*
 type=0
 size=96
index aba0954..a02b035 100644 (file)
@@ -25,6 +25,7 @@
 #include "tests.h"
 #include "debug.h"
 #include "perf.h"
+#include "cloexec.h"
 
 static int fd1;
 static int fd2;
@@ -78,7 +79,8 @@ static int bp_event(void *fn, int setup_signal)
        pe.exclude_kernel = 1;
        pe.exclude_hv = 1;
 
-       fd = sys_perf_event_open(&pe, 0, -1, -1, 0);
+       fd = sys_perf_event_open(&pe, 0, -1, -1,
+                                perf_event_open_cloexec_flag());
        if (fd < 0) {
                pr_debug("failed opening event %llx\n", pe.config);
                return TEST_FAIL;
index 44ac821..e765377 100644 (file)
@@ -24,6 +24,7 @@
 #include "tests.h"
 #include "debug.h"
 #include "perf.h"
+#include "cloexec.h"
 
 static int overflows;
 
@@ -91,7 +92,8 @@ int test__bp_signal_overflow(void)
        pe.exclude_kernel = 1;
        pe.exclude_hv = 1;
 
-       fd = sys_perf_event_open(&pe, 0, -1, -1, 0);
+       fd = sys_perf_event_open(&pe, 0, -1, -1,
+                                perf_event_open_cloexec_flag());
        if (fd < 0) {
                pr_debug("failed opening event %llx\n", pe.config);
                return TEST_FAIL;
index 802e3cd..6f8b01b 100644 (file)
@@ -3,6 +3,8 @@
  *
  * Builtin regression testing command: ever growing number of sanity tests
  */
+#include <unistd.h>
+#include <string.h>
 #include "builtin.h"
 #include "intlist.h"
 #include "tests.h"
@@ -50,9 +52,17 @@ static struct test {
                .func = test__pmu,
        },
        {
-               .desc = "Test dso data interface",
+               .desc = "Test dso data read",
                .func = test__dso_data,
        },
+       {
+               .desc = "Test dso data cache",
+               .func = test__dso_data_cache,
+       },
+       {
+               .desc = "Test dso data reopen",
+               .func = test__dso_data_reopen,
+       },
        {
                .desc = "roundtrip evsel->name check",
                .func = test__perf_evsel__roundtrip_name_test,
@@ -172,6 +182,34 @@ static bool perf_test__matches(int curr, int argc, const char *argv[])
        return false;
 }
 
+static int run_test(struct test *test)
+{
+       int status, err = -1, child = fork();
+
+       if (child < 0) {
+               pr_err("failed to fork test: %s\n", strerror(errno));
+               return -1;
+       }
+
+       if (!child) {
+               pr_debug("test child forked, pid %d\n", getpid());
+               err = test->func();
+               exit(err);
+       }
+
+       wait(&status);
+
+       if (WIFEXITED(status)) {
+               err = WEXITSTATUS(status);
+               pr_debug("test child finished with %d\n", err);
+       } else if (WIFSIGNALED(status)) {
+               err = -1;
+               pr_debug("test child interrupted\n");
+       }
+
+       return err;
+}
+
 static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
 {
        int i = 0;
@@ -200,7 +238,7 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
                }
 
                pr_debug("\n--- start ---\n");
-               err = tests[curr].func();
+               err = run_test(&tests[curr]);
                pr_debug("---- end ----\n%s:", tests[curr].desc);
 
                switch (err) {
index 3e6cb17..caaf37f 100644 (file)
@@ -1,22 +1,28 @@
-#include "util.h"
-
 #include <stdlib.h>
 #include <linux/types.h>
 #include <sys/stat.h>
 #include <fcntl.h>
 #include <string.h>
-
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <api/fs/fs.h>
+#include "util.h"
 #include "machine.h"
 #include "symbol.h"
 #include "tests.h"
+#include "debug.h"
 
 static char *test_file(int size)
 {
-       static char buf_templ[] = "/tmp/test-XXXXXX";
+#define TEMPL "/tmp/perf-test-XXXXXX"
+       static char buf_templ[sizeof(TEMPL)];
        char *templ = buf_templ;
        int fd, i;
        unsigned char *buf;
 
+       strcpy(buf_templ, TEMPL);
+#undef TEMPL
+
        fd = mkstemp(templ);
        if (fd < 0) {
                perror("mkstemp failed");
@@ -150,3 +156,204 @@ int test__dso_data(void)
        unlink(file);
        return 0;
 }
+
+static long open_files_cnt(void)
+{
+       char path[PATH_MAX];
+       struct dirent *dent;
+       DIR *dir;
+       long nr = 0;
+
+       scnprintf(path, PATH_MAX, "%s/self/fd", procfs__mountpoint());
+       pr_debug("fd path: %s\n", path);
+
+       dir = opendir(path);
+       TEST_ASSERT_VAL("failed to open fd directory", dir);
+
+       while ((dent = readdir(dir)) != NULL) {
+               if (!strcmp(dent->d_name, ".") ||
+                   !strcmp(dent->d_name, ".."))
+                       continue;
+
+               nr++;
+       }
+
+       closedir(dir);
+       return nr - 1;
+}
+
+static struct dso **dsos;
+
+static int dsos__create(int cnt, int size)
+{
+       int i;
+
+       dsos = malloc(sizeof(dsos) * cnt);
+       TEST_ASSERT_VAL("failed to alloc dsos array", dsos);
+
+       for (i = 0; i < cnt; i++) {
+               char *file;
+
+               file = test_file(size);
+               TEST_ASSERT_VAL("failed to get dso file", file);
+
+               dsos[i] = dso__new(file);
+               TEST_ASSERT_VAL("failed to get dso", dsos[i]);
+       }
+
+       return 0;
+}
+
+static void dsos__delete(int cnt)
+{
+       int i;
+
+       for (i = 0; i < cnt; i++) {
+               struct dso *dso = dsos[i];
+
+               unlink(dso->name);
+               dso__delete(dso);
+       }
+
+       free(dsos);
+}
+
+static int set_fd_limit(int n)
+{
+       struct rlimit rlim;
+
+       if (getrlimit(RLIMIT_NOFILE, &rlim))
+               return -1;
+
+       pr_debug("file limit %ld, new %d\n", (long) rlim.rlim_cur, n);
+
+       rlim.rlim_cur = n;
+       return setrlimit(RLIMIT_NOFILE, &rlim);
+}
+
+int test__dso_data_cache(void)
+{
+       struct machine machine;
+       long nr_end, nr = open_files_cnt();
+       int dso_cnt, limit, i, fd;
+
+       memset(&machine, 0, sizeof(machine));
+
+       /* set as system limit */
+       limit = nr * 4;
+       TEST_ASSERT_VAL("failed to set file limit", !set_fd_limit(limit));
+
+       /* and this is now our dso open FDs limit + 1 extra */
+       dso_cnt = limit / 2 + 1;
+       TEST_ASSERT_VAL("failed to create dsos\n",
+               !dsos__create(dso_cnt, TEST_FILE_SIZE));
+
+       for (i = 0; i < (dso_cnt - 1); i++) {
+               struct dso *dso = dsos[i];
+
+               /*
+                * Open dsos via dso__data_fd or dso__data_read_offset.
+                * Both opens the data file and keep it open.
+                */
+               if (i % 2) {
+                       fd = dso__data_fd(dso, &machine);
+                       TEST_ASSERT_VAL("failed to get fd", fd > 0);
+               } else {
+                       #define BUFSIZE 10
+                       u8 buf[BUFSIZE];
+                       ssize_t n;
+
+                       n = dso__data_read_offset(dso, &machine, 0, buf, BUFSIZE);
+                       TEST_ASSERT_VAL("failed to read dso", n == BUFSIZE);
+               }
+       }
+
+       /* open +1 dso over the allowed limit */
+       fd = dso__data_fd(dsos[i], &machine);
+       TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+       /* should force the first one to be closed */
+       TEST_ASSERT_VAL("failed to close dsos[0]", dsos[0]->data.fd == -1);
+
+       /* cleanup everything */
+       dsos__delete(dso_cnt);
+
+       /* Make sure we did not leak any file descriptor. */
+       nr_end = open_files_cnt();
+       pr_debug("nr start %ld, nr stop %ld\n", nr, nr_end);
+       TEST_ASSERT_VAL("failed leadking files", nr == nr_end);
+       return 0;
+}
+
+int test__dso_data_reopen(void)
+{
+       struct machine machine;
+       long nr_end, nr = open_files_cnt();
+       int fd, fd_extra;
+
+#define dso_0 (dsos[0])
+#define dso_1 (dsos[1])
+#define dso_2 (dsos[2])
+
+       memset(&machine, 0, sizeof(machine));
+
+       /*
+        * Test scenario:
+        * - create 3 dso objects
+        * - set process file descriptor limit to current
+        *   files count + 3
+        * - test that the first dso gets closed when we
+        *   reach the files count limit
+        */
+
+       /* Make sure we are able to open 3 fds anyway */
+       TEST_ASSERT_VAL("failed to set file limit",
+                       !set_fd_limit((nr + 3)));
+
+       TEST_ASSERT_VAL("failed to create dsos\n", !dsos__create(3, TEST_FILE_SIZE));
+
+       /* open dso_0 */
+       fd = dso__data_fd(dso_0, &machine);
+       TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+       /* open dso_1 */
+       fd = dso__data_fd(dso_1, &machine);
+       TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+       /*
+        * open extra file descriptor and we just
+        * reached the files count limit
+        */
+       fd_extra = open("/dev/null", O_RDONLY);
+       TEST_ASSERT_VAL("failed to open extra fd", fd_extra > 0);
+
+       /* open dso_2 */
+       fd = dso__data_fd(dso_2, &machine);
+       TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+       /*
+        * dso_0 should get closed, because we reached
+        * the file descriptor limit
+        */
+       TEST_ASSERT_VAL("failed to close dso_0", dso_0->data.fd == -1);
+
+       /* open dso_0 */
+       fd = dso__data_fd(dso_0, &machine);
+       TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+       /*
+        * dso_1 should get closed, because we reached
+        * the file descriptor limit
+        */
+       TEST_ASSERT_VAL("failed to close dso_1", dso_1->data.fd == -1);
+
+       /* cleanup everything */
+       close(fd_extra);
+       dsos__delete(3);
+
+       /* Make sure we did not leak any file descriptor. */
+       nr_end = open_files_cnt();
+       pr_debug("nr start %ld, nr stop %ld\n", nr, nr_end);
+       TEST_ASSERT_VAL("failed leadking files", nr == nr_end);
+       return 0;
+}
index 108f0cd..96adb73 100644 (file)
@@ -15,7 +15,7 @@ static int mmap_handler(struct perf_tool *tool __maybe_unused,
                        struct perf_sample *sample __maybe_unused,
                        struct machine *machine)
 {
-       return machine__process_mmap_event(machine, event, NULL);
+       return machine__process_mmap2_event(machine, event, NULL);
 }
 
 static int init_live_machine(struct machine *machine)
index 465cdbc..b8d8341 100644 (file)
@@ -2,6 +2,7 @@
 #include "evsel.h"
 #include "parse-events.h"
 #include "tests.h"
+#include "debug.h"
 
 static int perf_evsel__roundtrip_cache_name_test(void)
 {
index 35d7fdb..5216242 100644 (file)
@@ -1,6 +1,7 @@
 #include <traceevent/event-parse.h>
 #include "evsel.h"
 #include "tests.h"
+#include "debug.h"
 
 static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name,
                                  int size, bool should_be_signed)
index 2f92d6e..69a71ff 100644 (file)
@@ -205,8 +205,7 @@ $(run):
        ( eval $$cmd ) >> $@ 2>&1; \
        echo "  test: $(call test,$@)" >> $@ 2>&1; \
        $(call test,$@) && \
-       rm -f $@ \
-       rm -rf $$TMP_DEST
+       rm -rf $@ $$TMP_DEST || (cat $@ ; false)
 
 $(run_O):
        $(call clean)
@@ -217,9 +216,7 @@ $(run_O):
        ( eval $$cmd ) >> $@ 2>&1 && \
        echo "  test: $(call test_O,$@)" >> $@ 2>&1; \
        $(call test_O,$@) && \
-       rm -f $@ && \
-       rm -rf $$TMP_O \
-       rm -rf $$TMP_DEST
+       rm -rf $@ $$TMP_O $$TMP_DEST || (cat $@ ; false)
 
 tarpkg:
        @cmd="$(PERF)/tests/perf-targz-src-pkg $(PERF)"; \
index c505ef2..0785b64 100644 (file)
@@ -3,6 +3,7 @@
 #include "evsel.h"
 #include "thread_map.h"
 #include "tests.h"
+#include "debug.h"
 
 int test__syscall_open_tp_fields(void)
 {
index deba669..5941927 100644 (file)
@@ -5,6 +5,7 @@
 #include <api/fs/fs.h>
 #include <api/fs/debugfs.h>
 #include "tests.h"
+#include "debug.h"
 #include <linux/hw_breakpoint.h>
 
 #define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \
index 905019f..2c63ea6 100644 (file)
@@ -7,6 +7,7 @@
 #include "evlist.h"
 #include "header.h"
 #include "util.h"
+#include "debug.h"
 
 static int process_event(struct perf_evlist **pevlist, union perf_event *event)
 {
index 3b7cd4d..f238442 100644 (file)
@@ -8,10 +8,9 @@
 #include "evsel.h"
 #include "thread_map.h"
 #include "cpumap.h"
+#include "tsc.h"
 #include "tests.h"
 
-#include "../arch/x86/util/tsc.h"
-
 #define CHECK__(x) {                           \
        while ((x) < 0) {                       \
                pr_debug(#x " failed!\n");      \
        }                                       \
 }
 
-static u64 rdtsc(void)
-{
-       unsigned int low, high;
-
-       asm volatile("rdtsc" : "=a" (low), "=d" (high));
-
-       return low | ((u64)high) << 32;
-}
-
 /**
  * test__perf_time_to_tsc - test converting perf time to TSC.
  *
index e59143f..c04d1f2 100644 (file)
@@ -6,6 +6,7 @@
 #include "perf.h"
 #include "debug.h"
 #include "tests.h"
+#include "cloexec.h"
 
 #if defined(__x86_64__) || defined(__i386__)
 
@@ -104,7 +105,8 @@ static int __test__rdpmc(void)
        sa.sa_sigaction = segfault_handler;
        sigaction(SIGSEGV, &sa, NULL);
 
-       fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+       fd = sys_perf_event_open(&attr, 0, -1, -1,
+                                perf_event_open_cloexec_flag());
        if (fd < 0) {
                pr_err("Error: sys_perf_event_open() syscall returned "
                       "with %d (%s)\n", fd, strerror(errno));
index 7ae8d17..ca292f9 100644 (file)
@@ -4,6 +4,7 @@
 #include "util.h"
 #include "event.h"
 #include "evsel.h"
+#include "debug.h"
 
 #include "tests.h"
 
index 022bb68..ed64790 100644 (file)
@@ -28,6 +28,8 @@ int test__syscall_open_tp_fields(void);
 int test__pmu(void);
 int test__attr(void);
 int test__dso_data(void);
+int test__dso_data_cache(void);
+int test__dso_data_reopen(void);
 int test__parse_events(void);
 int test__hists_link(void);
 int test__python_use(void);
index 2b2e0db..b028499 100644 (file)
@@ -2,6 +2,7 @@
 #include "machine.h"
 #include "thread.h"
 #include "map.h"
+#include "debug.h"
 
 int test__thread_mg_share(void)
 {
index 3ccf6e1..6680fa5 100644 (file)
@@ -150,7 +150,7 @@ unsigned int ui_browser__rb_tree_refresh(struct ui_browser *browser)
        while (nd != NULL) {
                ui_browser__gotorc(browser, row, 0);
                browser->write(browser, nd, row);
-               if (++row == browser->height)
+               if (++row == browser->rows)
                        break;
                nd = rb_next(nd);
        }
@@ -166,7 +166,7 @@ bool ui_browser__is_current_entry(struct ui_browser *browser, unsigned row)
 void ui_browser__refresh_dimensions(struct ui_browser *browser)
 {
        browser->width = SLtt_Screen_Cols - 1;
-       browser->height = SLtt_Screen_Rows - 2;
+       browser->height = browser->rows = SLtt_Screen_Rows - 2;
        browser->y = 1;
        browser->x = 0;
 }
@@ -250,7 +250,10 @@ int ui_browser__show(struct ui_browser *browser, const char *title,
        int err;
        va_list ap;
 
-       ui_browser__refresh_dimensions(browser);
+       if (browser->refresh_dimensions == NULL)
+               browser->refresh_dimensions = ui_browser__refresh_dimensions;
+
+       browser->refresh_dimensions(browser);
 
        pthread_mutex_lock(&ui__lock);
        __ui_browser__show_title(browser, title);
@@ -279,7 +282,7 @@ static void ui_browser__scrollbar_set(struct ui_browser *browser)
 {
        int height = browser->height, h = 0, pct = 0,
            col = browser->width,
-           row = browser->y - 1;
+           row = 0;
 
        if (browser->nr_entries > 1) {
                pct = ((browser->index * (browser->height - 1)) /
@@ -367,7 +370,7 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
 
                if (key == K_RESIZE) {
                        ui__refresh_dimensions(false);
-                       ui_browser__refresh_dimensions(browser);
+                       browser->refresh_dimensions(browser);
                        __ui_browser__show_title(browser, browser->title);
                        ui_helpline__puts(browser->helpline);
                        continue;
@@ -389,7 +392,7 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
                        if (browser->index == browser->nr_entries - 1)
                                break;
                        ++browser->index;
-                       if (browser->index == browser->top_idx + browser->height) {
+                       if (browser->index == browser->top_idx + browser->rows) {
                                ++browser->top_idx;
                                browser->seek(browser, +1, SEEK_CUR);
                        }
@@ -405,10 +408,10 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
                        break;
                case K_PGDN:
                case ' ':
-                       if (browser->top_idx + browser->height > browser->nr_entries - 1)
+                       if (browser->top_idx + browser->rows > browser->nr_entries - 1)
                                break;
 
-                       offset = browser->height;
+                       offset = browser->rows;
                        if (browser->index + offset > browser->nr_entries - 1)
                                offset = browser->nr_entries - 1 - browser->index;
                        browser->index += offset;
@@ -419,10 +422,10 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
                        if (browser->top_idx == 0)
                                break;
 
-                       if (browser->top_idx < browser->height)
+                       if (browser->top_idx < browser->rows)
                                offset = browser->top_idx;
                        else
-                               offset = browser->height;
+                               offset = browser->rows;
 
                        browser->index -= offset;
                        browser->top_idx -= offset;
@@ -432,7 +435,7 @@ int ui_browser__run(struct ui_browser *browser, int delay_secs)
                        ui_browser__reset_index(browser);
                        break;
                case K_END:
-                       offset = browser->height - 1;
+                       offset = browser->rows - 1;
                        if (offset >= browser->nr_entries)
                                offset = browser->nr_entries - 1;
 
@@ -462,7 +465,7 @@ unsigned int ui_browser__list_head_refresh(struct ui_browser *browser)
                if (!browser->filter || !browser->filter(browser, pos)) {
                        ui_browser__gotorc(browser, row, 0);
                        browser->write(browser, pos, row);
-                       if (++row == browser->height)
+                       if (++row == browser->rows)
                                break;
                }
        }
@@ -587,7 +590,7 @@ unsigned int ui_browser__argv_refresh(struct ui_browser *browser)
                if (!browser->filter || !browser->filter(browser, *pos)) {
                        ui_browser__gotorc(browser, row, 0);
                        browser->write(browser, pos, row);
-                       if (++row == browser->height)
+                       if (++row == browser->rows)
                                break;
                }
 
@@ -623,7 +626,7 @@ static void __ui_browser__line_arrow_up(struct ui_browser *browser,
 
        SLsmg_set_char_set(1);
 
-       if (start < browser->top_idx + browser->height) {
+       if (start < browser->top_idx + browser->rows) {
                row = start - browser->top_idx;
                ui_browser__gotorc(browser, row, column);
                SLsmg_write_char(SLSMG_LLCORN_CHAR);
@@ -633,7 +636,7 @@ static void __ui_browser__line_arrow_up(struct ui_browser *browser,
                if (row-- == 0)
                        goto out;
        } else
-               row = browser->height - 1;
+               row = browser->rows - 1;
 
        if (end > browser->top_idx)
                end_row = end - browser->top_idx;
@@ -675,8 +678,8 @@ static void __ui_browser__line_arrow_down(struct ui_browser *browser,
        } else
                row = 0;
 
-       if (end >= browser->top_idx + browser->height)
-               end_row = browser->height - 1;
+       if (end >= browser->top_idx + browser->rows)
+               end_row = browser->rows - 1;
        else
                end_row = end - browser->top_idx;
 
@@ -684,7 +687,7 @@ static void __ui_browser__line_arrow_down(struct ui_browser *browser,
        SLsmg_draw_vline(end_row - row + 1);
 
        ui_browser__gotorc(browser, end_row, column);
-       if (end < browser->top_idx + browser->height) {
+       if (end < browser->top_idx + browser->rows) {
                SLsmg_write_char(SLSMG_LLCORN_CHAR);
                ui_browser__gotorc(browser, end_row, column + 1);
                SLsmg_write_char(SLSMG_HLINE_CHAR);
index 03d4d62..92ae721 100644 (file)
 struct ui_browser {
        u64           index, top_idx;
        void          *top, *entries;
-       u16           y, x, width, height;
+       u16           y, x, width, height, rows;
        int           current_color;
        void          *priv;
        const char    *title;
        char          *helpline;
+       void          (*refresh_dimensions)(struct ui_browser *browser);
        unsigned int  (*refresh)(struct ui_browser *browser);
        void          (*write)(struct ui_browser *browser, void *entry, int row);
        void          (*seek)(struct ui_browser *browser, off_t offset, int whence);
index 52c03fb..a94b11f 100644 (file)
@@ -17,6 +17,7 @@
 #include "../util.h"
 #include "../ui.h"
 #include "map.h"
+#include "annotate.h"
 
 struct hist_browser {
        struct ui_browser   b;
@@ -25,6 +26,7 @@ struct hist_browser {
        struct map_symbol   *selection;
        int                  print_seq;
        bool                 show_dso;
+       bool                 show_headers;
        float                min_pcnt;
        u64                  nr_non_filtered_entries;
        u64                  nr_callchain_rows;
@@ -32,8 +34,7 @@ struct hist_browser {
 
 extern void hist_browser__init_hpp(void);
 
-static int hists__browser_title(struct hists *hists, char *bf, size_t size,
-                               const char *ev_name);
+static int hists__browser_title(struct hists *hists, char *bf, size_t size);
 static void hist_browser__update_nr_entries(struct hist_browser *hb);
 
 static struct rb_node *hists__filter_entries(struct rb_node *nd,
@@ -56,11 +57,42 @@ static u32 hist_browser__nr_entries(struct hist_browser *hb)
        return nr_entries + hb->nr_callchain_rows;
 }
 
-static void hist_browser__refresh_dimensions(struct hist_browser *browser)
+static void hist_browser__update_rows(struct hist_browser *hb)
 {
+       struct ui_browser *browser = &hb->b;
+       u16 header_offset = hb->show_headers ? 1 : 0, index_row;
+
+       browser->rows = browser->height - header_offset;
+       /*
+        * Verify if we were at the last line and that line isn't
+        * visibe because we now show the header line(s).
+        */
+       index_row = browser->index - browser->top_idx;
+       if (index_row >= browser->rows)
+               browser->index -= index_row - browser->rows + 1;
+}
+
+static void hist_browser__refresh_dimensions(struct ui_browser *browser)
+{
+       struct hist_browser *hb = container_of(browser, struct hist_browser, b);
+
        /* 3 == +/- toggle symbol before actual hist_entry rendering */
-       browser->b.width = 3 + (hists__sort_list_width(browser->hists) +
-                            sizeof("[k]"));
+       browser->width = 3 + (hists__sort_list_width(hb->hists) + sizeof("[k]"));
+       /*
+        * FIXME: Just keeping existing behaviour, but this really should be
+        *        before updating browser->width, as it will invalidate the
+        *        calculation above. Fix this and the fallout in another
+        *        changeset.
+        */
+       ui_browser__refresh_dimensions(browser);
+       hist_browser__update_rows(hb);
+}
+
+static void hist_browser__gotorc(struct hist_browser *browser, int row, int column)
+{
+       u16 header_offset = browser->show_headers ? 1 : 0;
+
+       ui_browser__gotorc(&browser->b, row + header_offset, column);
 }
 
 static void hist_browser__reset(struct hist_browser *browser)
@@ -73,7 +105,7 @@ static void hist_browser__reset(struct hist_browser *browser)
 
        hist_browser__update_nr_entries(browser);
        browser->b.nr_entries = hist_browser__nr_entries(browser);
-       hist_browser__refresh_dimensions(browser);
+       hist_browser__refresh_dimensions(&browser->b);
        ui_browser__reset_index(&browser->b);
 }
 
@@ -345,7 +377,7 @@ static void ui_browser__warn_lost_events(struct ui_browser *browser)
                "Or reduce the sampling frequency.");
 }
 
-static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
+static int hist_browser__run(struct hist_browser *browser,
                             struct hist_browser_timer *hbt)
 {
        int key;
@@ -355,8 +387,7 @@ static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
        browser->b.entries = &browser->hists->entries;
        browser->b.nr_entries = hist_browser__nr_entries(browser);
 
-       hist_browser__refresh_dimensions(browser);
-       hists__browser_title(browser->hists, title, sizeof(title), ev_name);
+       hists__browser_title(browser->hists, title, sizeof(title));
 
        if (ui_browser__show(&browser->b, title,
                             "Press '?' for help on key bindings") < 0)
@@ -383,7 +414,7 @@ static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
                                ui_browser__warn_lost_events(&browser->b);
                        }
 
-                       hists__browser_title(browser->hists, title, sizeof(title), ev_name);
+                       hists__browser_title(browser->hists, title, sizeof(title));
                        ui_browser__show_title(&browser->b, title);
                        continue;
                }
@@ -392,10 +423,10 @@ static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
                        struct hist_entry *h = rb_entry(browser->b.top,
                                                        struct hist_entry, rb_node);
                        ui_helpline__pop();
-                       ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
+                       ui_helpline__fpush("%d: nr_ent=(%d,%d), rows=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d",
                                           seq++, browser->b.nr_entries,
                                           browser->hists->nr_entries,
-                                          browser->b.height,
+                                          browser->b.rows,
                                           browser->b.index,
                                           browser->b.top_idx,
                                           h->row_offset, h->nr_rows);
@@ -409,6 +440,10 @@ static int hist_browser__run(struct hist_browser *browser, const char *ev_name,
                        /* Expand the whole world. */
                        hist_browser__set_folding(browser, true);
                        break;
+               case 'H':
+                       browser->show_headers = !browser->show_headers;
+                       hist_browser__update_rows(browser);
+                       break;
                case K_ENTER:
                        if (hist_browser__toggle_fold(browser))
                                break;
@@ -508,13 +543,13 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browse
                        }
 
                        ui_browser__set_color(&browser->b, color);
-                       ui_browser__gotorc(&browser->b, row, 0);
+                       hist_browser__gotorc(browser, row, 0);
                        slsmg_write_nstring(" ", offset + extra_offset);
                        slsmg_printf("%c ", folded_sign);
                        slsmg_write_nstring(str, width);
                        free(alloc_str);
 
-                       if (++row == browser->b.height)
+                       if (++row == browser->b.rows)
                                goto out;
 do_next:
                        if (folded_sign == '+')
@@ -527,7 +562,7 @@ do_next:
                                                                         new_level, row, row_offset,
                                                                         is_current_entry);
                }
-               if (row == browser->b.height)
+               if (row == browser->b.rows)
                        goto out;
                node = next;
        }
@@ -567,13 +602,13 @@ static int hist_browser__show_callchain_node(struct hist_browser *browser,
 
                s = callchain_list__sym_name(chain, bf, sizeof(bf),
                                             browser->show_dso);
-               ui_browser__gotorc(&browser->b, row, 0);
+               hist_browser__gotorc(browser, row, 0);
                ui_browser__set_color(&browser->b, color);
                slsmg_write_nstring(" ", offset);
                slsmg_printf("%c ", folded_sign);
                slsmg_write_nstring(s, width - 2);
 
-               if (++row == browser->b.height)
+               if (++row == browser->b.rows)
                        goto out;
        }
 
@@ -602,7 +637,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
                row += hist_browser__show_callchain_node(browser, node, level,
                                                         row, row_offset,
                                                         is_current_entry);
-               if (row == browser->b.height)
+               if (row == browser->b.rows)
                        break;
        }
 
@@ -732,7 +767,7 @@ static int hist_browser__show_entry(struct hist_browser *browser,
                        .ptr            = &arg,
                };
 
-               ui_browser__gotorc(&browser->b, row, 0);
+               hist_browser__gotorc(browser, row, 0);
 
                perf_hpp__for_each_format(fmt) {
                        if (perf_hpp__should_skip(fmt))
@@ -776,7 +811,7 @@ static int hist_browser__show_entry(struct hist_browser *browser,
        } else
                --row_offset;
 
-       if (folded_sign == '-' && row != browser->b.height) {
+       if (folded_sign == '-' && row != browser->b.rows) {
                printed += hist_browser__show_callchain(browser, &entry->sorted_chain,
                                                        1, row, &row_offset,
                                                        &current_entry);
@@ -787,6 +822,56 @@ static int hist_browser__show_entry(struct hist_browser *browser,
        return printed;
 }
 
+static int advance_hpp_check(struct perf_hpp *hpp, int inc)
+{
+       advance_hpp(hpp, inc);
+       return hpp->size <= 0;
+}
+
+static int hists__scnprintf_headers(char *buf, size_t size, struct hists *hists)
+{
+       struct perf_hpp dummy_hpp = {
+               .buf    = buf,
+               .size   = size,
+       };
+       struct perf_hpp_fmt *fmt;
+       size_t ret = 0;
+
+       if (symbol_conf.use_callchain) {
+               ret = scnprintf(buf, size, "  ");
+               if (advance_hpp_check(&dummy_hpp, ret))
+                       return ret;
+       }
+
+       perf_hpp__for_each_format(fmt) {
+               if (perf_hpp__should_skip(fmt))
+                       continue;
+
+               /* We need to add the length of the columns header. */
+               perf_hpp__reset_width(fmt, hists);
+
+               ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
+               if (advance_hpp_check(&dummy_hpp, ret))
+                       break;
+
+               ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "  ");
+               if (advance_hpp_check(&dummy_hpp, ret))
+                       break;
+       }
+
+       return ret;
+}
+
+static void hist_browser__show_headers(struct hist_browser *browser)
+{
+       char headers[1024];
+
+       hists__scnprintf_headers(headers, sizeof(headers), browser->hists);
+       ui_browser__gotorc(&browser->b, 0, 0);
+       ui_browser__set_color(&browser->b, HE_COLORSET_ROOT);
+       slsmg_write_nstring(headers, browser->b.width + 1);
+}
+
 static void ui_browser__hists_init_top(struct ui_browser *browser)
 {
        if (browser->top == NULL) {
@@ -800,9 +885,15 @@ static void ui_browser__hists_init_top(struct ui_browser *browser)
 static unsigned int hist_browser__refresh(struct ui_browser *browser)
 {
        unsigned row = 0;
+       u16 header_offset = 0;
        struct rb_node *nd;
        struct hist_browser *hb = container_of(browser, struct hist_browser, b);
 
+       if (hb->show_headers) {
+               hist_browser__show_headers(hb);
+               header_offset = 1;
+       }
+
        ui_browser__hists_init_top(browser);
 
        for (nd = browser->top; nd; nd = rb_next(nd)) {
@@ -817,11 +908,11 @@ static unsigned int hist_browser__refresh(struct ui_browser *browser)
                        continue;
 
                row += hist_browser__show_entry(hb, h, row);
-               if (row == browser->height)
+               if (row == browser->rows)
                        break;
        }
 
-       return row;
+       return row + header_offset;
 }
 
 static struct rb_node *hists__filter_entries(struct rb_node *nd,
@@ -1190,8 +1281,10 @@ static struct hist_browser *hist_browser__new(struct hists *hists)
        if (browser) {
                browser->hists = hists;
                browser->b.refresh = hist_browser__refresh;
+               browser->b.refresh_dimensions = hist_browser__refresh_dimensions;
                browser->b.seek = ui_browser__hists_seek;
                browser->b.use_navkeypressed = true;
+               browser->show_headers = symbol_conf.show_hist_headers;
        }
 
        return browser;
@@ -1212,8 +1305,7 @@ static struct thread *hist_browser__selected_thread(struct hist_browser *browser
        return browser->he_selection->thread;
 }
 
-static int hists__browser_title(struct hists *hists, char *bf, size_t size,
-                               const char *ev_name)
+static int hists__browser_title(struct hists *hists, char *bf, size_t size)
 {
        char unit;
        int printed;
@@ -1222,6 +1314,7 @@ static int hists__browser_title(struct hists *hists, char *bf, size_t size,
        unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
        u64 nr_events = hists->stats.total_period;
        struct perf_evsel *evsel = hists_to_evsel(hists);
+       const char *ev_name = perf_evsel__name(evsel);
        char buf[512];
        size_t buflen = sizeof(buf);
 
@@ -1389,7 +1482,7 @@ static void hist_browser__update_nr_entries(struct hist_browser *hb)
 }
 
 static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
-                                   const char *helpline, const char *ev_name,
+                                   const char *helpline,
                                    bool left_exits,
                                    struct hist_browser_timer *hbt,
                                    float min_pcnt,
@@ -1421,6 +1514,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
        "d             Zoom into current DSO\n"                         \
        "E             Expand all callchains\n"                         \
        "F             Toggle percentage of filtered entries\n"         \
+       "H             Display column headers\n"                        \
 
        /* help messages are sorted by lexical order of the hotkey */
        const char report_help[] = HIST_BROWSER_HELP_COMMON
@@ -1464,7 +1558,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
 
                nr_options = 0;
 
-               key = hist_browser__run(browser, ev_name, hbt);
+               key = hist_browser__run(browser, hbt);
 
                if (browser->he_selection != NULL) {
                        thread = hist_browser__selected_thread(browser);
@@ -1593,13 +1687,18 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                         bi->to.sym->name) > 0)
                                annotate_t = nr_options++;
                } else {
-
                        if (browser->selection != NULL &&
                            browser->selection->sym != NULL &&
-                           !browser->selection->map->dso->annotate_warned &&
-                               asprintf(&options[nr_options], "Annotate %s",
-                                        browser->selection->sym->name) > 0)
-                               annotate = nr_options++;
+                           !browser->selection->map->dso->annotate_warned) {
+                               struct annotation *notes;
+
+                               notes = symbol__annotation(browser->selection->sym);
+
+                               if (notes->src &&
+                                   asprintf(&options[nr_options], "Annotate %s",
+                                                browser->selection->sym->name) > 0)
+                                       annotate = nr_options++;
+                       }
                }
 
                if (thread != NULL &&
@@ -1656,6 +1755,7 @@ retry_popup_menu:
 
                if (choice == annotate || choice == annotate_t || choice == annotate_f) {
                        struct hist_entry *he;
+                       struct annotation *notes;
                        int err;
 do_annotate:
                        if (!objdump_path && perf_session_env__lookup_objdump(env))
@@ -1679,6 +1779,10 @@ do_annotate:
                                he->ms.map = he->branch_info->to.map;
                        }
 
+                       notes = symbol__annotation(he->ms.sym);
+                       if (!notes->src)
+                               continue;
+
                        /*
                         * Don't let this be freed, say, by hists__decay_entry.
                         */
@@ -1832,7 +1936,7 @@ static int perf_evsel_menu__run(struct perf_evsel_menu *menu,
 {
        struct perf_evlist *evlist = menu->b.priv;
        struct perf_evsel *pos;
-       const char *ev_name, *title = "Available samples";
+       const char *title = "Available samples";
        int delay_secs = hbt ? hbt->refresh : 0;
        int key;
 
@@ -1865,9 +1969,8 @@ browse_hists:
                         */
                        if (hbt)
                                hbt->timer(hbt->arg);
-                       ev_name = perf_evsel__name(pos);
                        key = perf_evsel__hists_browse(pos, nr_events, help,
-                                                      ev_name, true, hbt,
+                                                      true, hbt,
                                                       menu->min_pcnt,
                                                       menu->env);
                        ui_browser__show_title(&menu->b, title);
@@ -1971,10 +2074,9 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help,
 single_entry:
        if (nr_entries == 1) {
                struct perf_evsel *first = perf_evlist__first(evlist);
-               const char *ev_name = perf_evsel__name(first);
 
                return perf_evsel__hists_browse(first, nr_entries, help,
-                                               ev_name, false, hbt, min_pcnt,
+                                               false, hbt, min_pcnt,
                                                env);
        }
 
index 90122ab..40af0ac 100644 (file)
@@ -479,7 +479,7 @@ print_entries:
 
                if (h->ms.map == NULL && verbose > 1) {
                        __map_groups__fprintf_maps(h->thread->mg,
-                                                  MAP__FUNCTION, verbose, fp);
+                                                  MAP__FUNCTION, fp);
                        fprintf(fp, "%.10s end\n", graph_dotted_line);
                }
        }
index 48b6d3f..437ee09 100644 (file)
@@ -626,7 +626,7 @@ int sample__resolve_callchain(struct perf_sample *sample, struct symbol **parent
 
 int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
 {
-       if (!symbol_conf.use_callchain)
+       if (!symbol_conf.use_callchain || sample->callchain == NULL)
                return 0;
        return callchain_append(he->callchain, &callchain_cursor, sample->period);
 }
index 8f84423..da43619 100644 (file)
@@ -176,4 +176,17 @@ static inline void callchain_cursor_snapshot(struct callchain_cursor *dest,
        dest->first = src->curr;
        dest->nr -= src->pos;
 }
+
+#ifdef HAVE_SKIP_CALLCHAIN_IDX
+extern int arch_skip_callchain_idx(struct machine *machine,
+                       struct thread *thread, struct ip_callchain *chain);
+#else
+static inline int arch_skip_callchain_idx(struct machine *machine __maybe_unused,
+                       struct thread *thread __maybe_unused,
+                       struct ip_callchain *chain __maybe_unused)
+{
+       return -1;
+}
+#endif
+
 #endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c
new file mode 100644 (file)
index 0000000..c5d05ec
--- /dev/null
@@ -0,0 +1,57 @@
+#include "util.h"
+#include "../perf.h"
+#include "cloexec.h"
+#include "asm/bug.h"
+
+static unsigned long flag = PERF_FLAG_FD_CLOEXEC;
+
+static int perf_flag_probe(void)
+{
+       /* use 'safest' configuration as used in perf_evsel__fallback() */
+       struct perf_event_attr attr = {
+               .type = PERF_COUNT_SW_CPU_CLOCK,
+               .config = PERF_COUNT_SW_CPU_CLOCK,
+       };
+       int fd;
+       int err;
+
+       /* check cloexec flag */
+       fd = sys_perf_event_open(&attr, 0, -1, -1,
+                                PERF_FLAG_FD_CLOEXEC);
+       err = errno;
+
+       if (fd >= 0) {
+               close(fd);
+               return 1;
+       }
+
+       WARN_ONCE(err != EINVAL,
+                 "perf_event_open(..., PERF_FLAG_FD_CLOEXEC) failed with unexpected error %d (%s)\n",
+                 err, strerror(err));
+
+       /* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */
+       fd = sys_perf_event_open(&attr, 0, -1, -1, 0);
+       err = errno;
+
+       if (WARN_ONCE(fd < 0,
+                     "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n",
+                     err, strerror(err)))
+               return -1;
+
+       close(fd);
+
+       return 0;
+}
+
+unsigned long perf_event_open_cloexec_flag(void)
+{
+       static bool probed;
+
+       if (!probed) {
+               if (perf_flag_probe() <= 0)
+                       flag = 0;
+               probed = true;
+       }
+
+       return flag;
+}
diff --git a/tools/perf/util/cloexec.h b/tools/perf/util/cloexec.h
new file mode 100644 (file)
index 0000000..94a5a7d
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __PERF_CLOEXEC_H
+#define __PERF_CLOEXEC_H
+
+unsigned long perf_event_open_cloexec_flag(void);
+
+#endif /* __PERF_CLOEXEC_H */
index 24519e1..1e5e2e5 100644 (file)
@@ -350,6 +350,16 @@ static int perf_default_core_config(const char *var __maybe_unused,
        return 0;
 }
 
+static int perf_ui_config(const char *var, const char *value)
+{
+       /* Add other config variables here. */
+       if (!strcmp(var, "ui.show-headers")) {
+               symbol_conf.show_hist_headers = perf_config_bool(var, value);
+               return 0;
+       }
+       return 0;
+}
+
 int perf_default_config(const char *var, const char *value,
                        void *dummy __maybe_unused)
 {
@@ -359,6 +369,9 @@ int perf_default_config(const char *var, const char *value,
        if (!prefixcmp(var, "hist."))
                return perf_hist_config(var, value);
 
+       if (!prefixcmp(var, "ui."))
+               return perf_ui_config(var, value);
+
        /* Add other config variables here. */
        return 0;
 }
index 55de44e..29d720c 100644 (file)
@@ -7,6 +7,7 @@
 
 #include "data.h"
 #include "util.h"
+#include "debug.h"
 
 static bool check_pipe(struct perf_data_file *file)
 {
@@ -65,7 +66,7 @@ static int open_file_read(struct perf_data_file *file)
                goto out_close;
 
        if (!file->force && st.st_uid && (st.st_uid != geteuid())) {
-               pr_err("file %s not owned by current user or root\n",
+               pr_err("File %s not owned by current user or root (use -f to override)\n",
                       file->path);
                goto out_close;
        }
index 299b555..71d4193 100644 (file)
 int verbose;
 bool dump_trace = false, quiet = false;
 
-static int _eprintf(int level, const char *fmt, va_list args)
+static int _eprintf(int level, int var, const char *fmt, va_list args)
 {
        int ret = 0;
 
-       if (verbose >= level) {
+       if (var >= level) {
                if (use_browser >= 1)
                        ui_helpline__vshow(fmt, args);
                else
@@ -30,13 +30,13 @@ static int _eprintf(int level, const char *fmt, va_list args)
        return ret;
 }
 
-int eprintf(int level, const char *fmt, ...)
+int eprintf(int level, int var, const char *fmt, ...)
 {
        va_list args;
        int ret;
 
        va_start(args, fmt);
-       ret = _eprintf(level, fmt, args);
+       ret = _eprintf(level, var, fmt, args);
        va_end(args);
 
        return ret;
@@ -51,9 +51,9 @@ void pr_stat(const char *fmt, ...)
        va_list args;
 
        va_start(args, fmt);
-       _eprintf(1, fmt, args);
+       _eprintf(1, verbose, fmt, args);
        va_end(args);
-       eprintf(1, "\n");
+       eprintf(1, verbose, "\n");
 }
 
 int dump_printf(const char *fmt, ...)
@@ -105,3 +105,47 @@ void trace_event(union perf_event *event)
        }
        printf(".\n");
 }
+
+static struct debug_variable {
+       const char *name;
+       int *ptr;
+} debug_variables[] = {
+       { .name = "verbose", .ptr = &verbose },
+       { .name = NULL, }
+};
+
+int perf_debug_option(const char *str)
+{
+       struct debug_variable *var = &debug_variables[0];
+       char *vstr, *s = strdup(str);
+       int v = 1;
+
+       vstr = strchr(s, '=');
+       if (vstr)
+               *vstr++ = 0;
+
+       while (var->name) {
+               if (!strcmp(s, var->name))
+                       break;
+               var++;
+       }
+
+       if (!var->name) {
+               pr_err("Unknown debug variable name '%s'\n", s);
+               free(s);
+               return -1;
+       }
+
+       if (vstr) {
+               v = atoi(vstr);
+               /*
+                * Allow only values in range (0, 10),
+                * otherwise set 0.
+                */
+               v = (v < 0) || (v > 10) ? 0 : v;
+       }
+
+       *var->ptr = v;
+       free(s);
+       return 0;
+}
index 443694c..89fb6b0 100644 (file)
 extern int verbose;
 extern bool quiet, dump_trace;
 
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+#endif
+
+#define pr_err(fmt, ...) \
+       eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+       eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+       eprintf(0, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug(fmt, ...) \
+       eprintf(1, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debugN(n, fmt, ...) \
+       eprintf(n, verbose, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
+
 int dump_printf(const char *fmt, ...) __attribute__((format(printf, 1, 2)));
 void trace_event(union perf_event *event);
 
@@ -19,4 +37,8 @@ int ui__warning(const char *format, ...) __attribute__((format(printf, 1, 2)));
 
 void pr_stat(const char *fmt, ...);
 
+int eprintf(int level, int var, const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+
+int perf_debug_option(const char *str);
+
 #endif /* __PERF_DEBUG_H */
index 64453d6..90d02c6 100644 (file)
@@ -1,3 +1,6 @@
+#include <asm/bug.h>
+#include <sys/time.h>
+#include <sys/resource.h>
 #include "symbol.h"
 #include "dso.h"
 #include "machine.h"
@@ -136,7 +139,48 @@ int dso__read_binary_type_filename(const struct dso *dso,
        return ret;
 }
 
-static int open_dso(struct dso *dso, struct machine *machine)
+/*
+ * Global list of open DSOs and the counter.
+ */
+static LIST_HEAD(dso__data_open);
+static long dso__data_open_cnt;
+
+static void dso__list_add(struct dso *dso)
+{
+       list_add_tail(&dso->data.open_entry, &dso__data_open);
+       dso__data_open_cnt++;
+}
+
+static void dso__list_del(struct dso *dso)
+{
+       list_del(&dso->data.open_entry);
+       WARN_ONCE(dso__data_open_cnt <= 0,
+                 "DSO data fd counter out of bounds.");
+       dso__data_open_cnt--;
+}
+
+static void close_first_dso(void);
+
+static int do_open(char *name)
+{
+       int fd;
+
+       do {
+               fd = open(name, O_RDONLY);
+               if (fd >= 0)
+                       return fd;
+
+               pr_debug("dso open failed, mmap: %s\n", strerror(errno));
+               if (!dso__data_open_cnt || errno != EMFILE)
+                       break;
+
+               close_first_dso();
+       } while (1);
+
+       return -1;
+}
+
+static int __open_dso(struct dso *dso, struct machine *machine)
 {
        int fd;
        char *root_dir = (char *)"";
@@ -154,11 +198,130 @@ static int open_dso(struct dso *dso, struct machine *machine)
                return -EINVAL;
        }
 
-       fd = open(name, O_RDONLY);
+       fd = do_open(name);
        free(name);
        return fd;
 }
 
+static void check_data_close(void);
+
+/**
+ * dso_close - Open DSO data file
+ * @dso: dso object
+ *
+ * Open @dso's data file descriptor and updates
+ * list/count of open DSO objects.
+ */
+static int open_dso(struct dso *dso, struct machine *machine)
+{
+       int fd = __open_dso(dso, machine);
+
+       if (fd >= 0) {
+               dso__list_add(dso);
+               /*
+                * Check if we crossed the allowed number
+                * of opened DSOs and close one if needed.
+                */
+               check_data_close();
+       }
+
+       return fd;
+}
+
+static void close_data_fd(struct dso *dso)
+{
+       if (dso->data.fd >= 0) {
+               close(dso->data.fd);
+               dso->data.fd = -1;
+               dso->data.file_size = 0;
+               dso__list_del(dso);
+       }
+}
+
+/**
+ * dso_close - Close DSO data file
+ * @dso: dso object
+ *
+ * Close @dso's data file descriptor and updates
+ * list/count of open DSO objects.
+ */
+static void close_dso(struct dso *dso)
+{
+       close_data_fd(dso);
+}
+
+static void close_first_dso(void)
+{
+       struct dso *dso;
+
+       dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
+       close_dso(dso);
+}
+
+static rlim_t get_fd_limit(void)
+{
+       struct rlimit l;
+       rlim_t limit = 0;
+
+       /* Allow half of the current open fd limit. */
+       if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
+               if (l.rlim_cur == RLIM_INFINITY)
+                       limit = l.rlim_cur;
+               else
+                       limit = l.rlim_cur / 2;
+       } else {
+               pr_err("failed to get fd limit\n");
+               limit = 1;
+       }
+
+       return limit;
+}
+
+static bool may_cache_fd(void)
+{
+       static rlim_t limit;
+
+       if (!limit)
+               limit = get_fd_limit();
+
+       if (limit == RLIM_INFINITY)
+               return true;
+
+       return limit > (rlim_t) dso__data_open_cnt;
+}
+
+/*
+ * Check and close LRU dso if we crossed allowed limit
+ * for opened dso file descriptors. The limit is half
+ * of the RLIMIT_NOFILE files opened.
+*/
+static void check_data_close(void)
+{
+       bool cache_fd = may_cache_fd();
+
+       if (!cache_fd)
+               close_first_dso();
+}
+
+/**
+ * dso__data_close - Close DSO data file
+ * @dso: dso object
+ *
+ * External interface to close @dso's data file descriptor.
+ */
+void dso__data_close(struct dso *dso)
+{
+       close_dso(dso);
+}
+
+/**
+ * dso__data_fd - Get dso's data file descriptor
+ * @dso: dso object
+ * @machine: machine object
+ *
+ * External interface to find dso's file, open it and
+ * returns file descriptor.
+ */
 int dso__data_fd(struct dso *dso, struct machine *machine)
 {
        enum dso_binary_type binary_type_data[] = {
@@ -168,21 +331,44 @@ int dso__data_fd(struct dso *dso, struct machine *machine)
        };
        int i = 0;
 
-       if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND)
-               return open_dso(dso, machine);
+       if (dso->data.status == DSO_DATA_STATUS_ERROR)
+               return -1;
 
-       do {
-               int fd;
+       if (dso->data.fd >= 0)
+               goto out;
 
+       if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
+               dso->data.fd = open_dso(dso, machine);
+               goto out;
+       }
+
+       do {
                dso->binary_type = binary_type_data[i++];
 
-               fd = open_dso(dso, machine);
-               if (fd >= 0)
-                       return fd;
+               dso->data.fd = open_dso(dso, machine);
+               if (dso->data.fd >= 0)
+                       goto out;
 
        } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
+out:
+       if (dso->data.fd >= 0)
+               dso->data.status = DSO_DATA_STATUS_OK;
+       else
+               dso->data.status = DSO_DATA_STATUS_ERROR;
+
+       return dso->data.fd;
+}
 
-       return -EINVAL;
+bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
+{
+       u32 flag = 1 << by;
+
+       if (dso->data.status_seen & flag)
+               return true;
+
+       dso->data.status_seen |= flag;
+
+       return false;
 }
 
 static void
@@ -260,16 +446,10 @@ dso_cache__memcpy(struct dso_cache *cache, u64 offset,
 }
 
 static ssize_t
-dso_cache__read(struct dso *dso, struct machine *machine,
-                u64 offset, u8 *data, ssize_t size)
+dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
 {
        struct dso_cache *cache;
        ssize_t ret;
-       int fd;
-
-       fd = dso__data_fd(dso, machine);
-       if (fd < 0)
-               return -1;
 
        do {
                u64 cache_offset;
@@ -283,16 +463,16 @@ dso_cache__read(struct dso *dso, struct machine *machine,
                cache_offset = offset & DSO__DATA_CACHE_MASK;
                ret = -EINVAL;
 
-               if (-1 == lseek(fd, cache_offset, SEEK_SET))
+               if (-1 == lseek(dso->data.fd, cache_offset, SEEK_SET))
                        break;
 
-               ret = read(fd, cache->data, DSO__DATA_CACHE_SIZE);
+               ret = read(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE);
                if (ret <= 0)
                        break;
 
                cache->offset = cache_offset;
                cache->size   = ret;
-               dso_cache__insert(&dso->cache, cache);
+               dso_cache__insert(&dso->data.cache, cache);
 
                ret = dso_cache__memcpy(cache, offset, data, size);
 
@@ -301,24 +481,27 @@ dso_cache__read(struct dso *dso, struct machine *machine,
        if (ret <= 0)
                free(cache);
 
-       close(fd);
        return ret;
 }
 
-static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
-                             u64 offset, u8 *data, ssize_t size)
+static ssize_t dso_cache_read(struct dso *dso, u64 offset,
+                             u8 *data, ssize_t size)
 {
        struct dso_cache *cache;
 
-       cache = dso_cache__find(&dso->cache, offset);
+       cache = dso_cache__find(&dso->data.cache, offset);
        if (cache)
                return dso_cache__memcpy(cache, offset, data, size);
        else
-               return dso_cache__read(dso, machine, offset, data, size);
+               return dso_cache__read(dso, offset, data, size);
 }
 
-ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
-                             u64 offset, u8 *data, ssize_t size)
+/*
+ * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
+ * in the rb_tree. Any read to already cached data is served
+ * by cached data.
+ */
+static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
 {
        ssize_t r = 0;
        u8 *p = data;
@@ -326,7 +509,7 @@ ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
        do {
                ssize_t ret;
 
-               ret = dso_cache_read(dso, machine, offset, p, size);
+               ret = dso_cache_read(dso, offset, p, size);
                if (ret < 0)
                        return ret;
 
@@ -346,6 +529,89 @@ ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
        return r;
 }
 
+static int data_file_size(struct dso *dso)
+{
+       struct stat st;
+
+       if (!dso->data.file_size) {
+               if (fstat(dso->data.fd, &st)) {
+                       pr_err("dso mmap failed, fstat: %s\n", strerror(errno));
+                       return -1;
+               }
+               dso->data.file_size = st.st_size;
+       }
+
+       return 0;
+}
+
+/**
+ * dso__data_size - Return dso data size
+ * @dso: dso object
+ * @machine: machine object
+ *
+ * Return: dso data size
+ */
+off_t dso__data_size(struct dso *dso, struct machine *machine)
+{
+       int fd;
+
+       fd = dso__data_fd(dso, machine);
+       if (fd < 0)
+               return fd;
+
+       if (data_file_size(dso))
+               return -1;
+
+       /* For now just estimate dso data size is close to file size */
+       return dso->data.file_size;
+}
+
+static ssize_t data_read_offset(struct dso *dso, u64 offset,
+                               u8 *data, ssize_t size)
+{
+       if (data_file_size(dso))
+               return -1;
+
+       /* Check the offset sanity. */
+       if (offset > dso->data.file_size)
+               return -1;
+
+       if (offset + size < offset)
+               return -1;
+
+       return cached_read(dso, offset, data, size);
+}
+
+/**
+ * dso__data_read_offset - Read data from dso file offset
+ * @dso: dso object
+ * @machine: machine object
+ * @offset: file offset
+ * @data: buffer to store data
+ * @size: size of the @data buffer
+ *
+ * External interface to read data from dso file offset. Open
+ * dso data file and use cached_read to get the data.
+ */
+ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
+                             u64 offset, u8 *data, ssize_t size)
+{
+       if (dso__data_fd(dso, machine) < 0)
+               return -1;
+
+       return data_read_offset(dso, offset, data, size);
+}
+
+/**
+ * dso__data_read_addr - Read data from dso address
+ * @dso: dso object
+ * @machine: machine object
+ * @add: virtual memory address
+ * @data: buffer to store data
+ * @size: size of the @data buffer
+ *
+ * External interface to read data from dso address.
+ */
 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
                            struct machine *machine, u64 addr,
                            u8 *data, ssize_t size)
@@ -473,9 +739,12 @@ struct dso *dso__new(const char *name)
                dso__set_short_name(dso, dso->name, false);
                for (i = 0; i < MAP__NR_TYPES; ++i)
                        dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
-               dso->cache = RB_ROOT;
+               dso->data.cache = RB_ROOT;
+               dso->data.fd = -1;
+               dso->data.status = DSO_DATA_STATUS_UNKNOWN;
                dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
                dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
+               dso->is_64_bit = (sizeof(void *) == 8);
                dso->loaded = 0;
                dso->rel = 0;
                dso->sorted_by_name = 0;
@@ -485,6 +754,7 @@ struct dso *dso__new(const char *name)
                dso->kernel = DSO_TYPE_USER;
                dso->needs_swap = DSO_SWAP__UNSET;
                INIT_LIST_HEAD(&dso->node);
+               INIT_LIST_HEAD(&dso->data.open_entry);
        }
 
        return dso;
@@ -506,7 +776,8 @@ void dso__delete(struct dso *dso)
                dso->long_name_allocated = false;
        }
 
-       dso_cache__free(&dso->cache);
+       dso__data_close(dso);
+       dso_cache__free(&dso->data.cache);
        dso__free_a2l(dso);
        zfree(&dso->symsrc_filename);
        free(dso);
@@ -669,3 +940,14 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
 
        return ret;
 }
+
+enum dso_type dso__type(struct dso *dso, struct machine *machine)
+{
+       int fd;
+
+       fd = dso__data_fd(dso, machine);
+       if (fd < 0)
+               return DSO__TYPE_UNKNOWN;
+
+       return dso__type_fd(fd);
+}
index 38efe95..5e463c0 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/rbtree.h>
 #include <stdbool.h>
 #include <linux/types.h>
+#include <linux/bitops.h>
 #include "map.h"
 #include "build-id.h"
 
@@ -40,6 +41,23 @@ enum dso_swap_type {
        DSO_SWAP__YES,
 };
 
+enum dso_data_status {
+       DSO_DATA_STATUS_ERROR   = -1,
+       DSO_DATA_STATUS_UNKNOWN = 0,
+       DSO_DATA_STATUS_OK      = 1,
+};
+
+enum dso_data_status_seen {
+       DSO_DATA_STATUS_SEEN_ITRACE,
+};
+
+enum dso_type {
+       DSO__TYPE_UNKNOWN,
+       DSO__TYPE_64BIT,
+       DSO__TYPE_32BIT,
+       DSO__TYPE_X32BIT,
+};
+
 #define DSO__SWAP(dso, type, val)                      \
 ({                                                     \
        type ____r = val;                               \
@@ -76,7 +94,6 @@ struct dso {
        struct list_head node;
        struct rb_root   symbols[MAP__NR_TYPES];
        struct rb_root   symbol_names[MAP__NR_TYPES];
-       struct rb_root   cache;
        void             *a2l;
        char             *symsrc_filename;
        unsigned int     a2l_fails;
@@ -91,6 +108,7 @@ struct dso {
        u8               annotate_warned:1;
        u8               short_name_allocated:1;
        u8               long_name_allocated:1;
+       u8               is_64_bit:1;
        u8               sorted_by_name;
        u8               loaded;
        u8               rel;
@@ -99,6 +117,17 @@ struct dso {
        const char       *long_name;
        u16              long_name_len;
        u16              short_name_len;
+
+       /* dso data file */
+       struct {
+               struct rb_root   cache;
+               int              fd;
+               int              status;
+               u32              status_seen;
+               size_t           file_size;
+               struct list_head open_entry;
+       } data;
+
        char             name[0];
 };
 
@@ -141,12 +170,55 @@ char dso__symtab_origin(const struct dso *dso);
 int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type,
                                   char *root_dir, char *filename, size_t size);
 
+/*
+ * The dso__data_* external interface provides following functions:
+ *   dso__data_fd
+ *   dso__data_close
+ *   dso__data_size
+ *   dso__data_read_offset
+ *   dso__data_read_addr
+ *
+ * Please refer to the dso.c object code for each function and
+ * arguments documentation. Following text tries to explain the
+ * dso file descriptor caching.
+ *
+ * The dso__data* interface allows caching of opened file descriptors
+ * to speed up the dso data accesses. The idea is to leave the file
+ * descriptor opened ideally for the whole life of the dso object.
+ *
+ * The current usage of the dso__data_* interface is as follows:
+ *
+ * Get DSO's fd:
+ *   int fd = dso__data_fd(dso, machine);
+ *   USE 'fd' SOMEHOW
+ *
+ * Read DSO's data:
+ *   n = dso__data_read_offset(dso_0, &machine, 0, buf, BUFSIZE);
+ *   n = dso__data_read_addr(dso_0, &machine, 0, buf, BUFSIZE);
+ *
+ * Eventually close DSO's fd:
+ *   dso__data_close(dso);
+ *
+ * It is not necessary to close the DSO object data file. Each time new
+ * DSO data file is opened, the limit (RLIMIT_NOFILE/2) is checked. Once
+ * it is crossed, the oldest opened DSO object is closed.
+ *
+ * The dso__delete function calls close_dso function to ensure the
+ * data file descriptor gets closed/unmapped before the dso object
+ * is freed.
+ *
+ * TODO
+*/
 int dso__data_fd(struct dso *dso, struct machine *machine);
+void dso__data_close(struct dso *dso);
+
+off_t dso__data_size(struct dso *dso, struct machine *machine);
 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
                              u64 offset, u8 *data, ssize_t size);
 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
                            struct machine *machine, u64 addr,
                            u8 *data, ssize_t size);
+bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by);
 
 struct map *dso__new_map(const char *name);
 struct dso *dso__kernel_findnew(struct machine *machine, const char *name,
@@ -181,4 +253,6 @@ static inline bool dso__is_kcore(struct dso *dso)
 
 void dso__free_a2l(struct dso *dso);
 
+enum dso_type dso__type(struct dso *dso, struct machine *machine);
+
 #endif /* __PERF_DSO */
index 65795b8..1398c83 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/types.h>
+#include <sys/mman.h>
 #include "event.h"
 #include "debug.h"
 #include "hist.h"
@@ -178,13 +179,14 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                return -1;
        }
 
-       event->header.type = PERF_RECORD_MMAP;
+       event->header.type = PERF_RECORD_MMAP2;
 
        while (1) {
                char bf[BUFSIZ];
                char prot[5];
                char execname[PATH_MAX];
                char anonstr[] = "//anon";
+               unsigned int ino;
                size_t size;
                ssize_t n;
 
@@ -195,15 +197,20 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                strcpy(execname, "");
 
                /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
-               n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n",
-                      &event->mmap.start, &event->mmap.len, prot,
-                      &event->mmap.pgoff,
-                      execname);
+               n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
+                      &event->mmap2.start, &event->mmap2.len, prot,
+                      &event->mmap2.pgoff, &event->mmap2.maj,
+                      &event->mmap2.min,
+                      &ino, execname);
+
                /*
                 * Anon maps don't have the execname.
                 */
-               if (n < 4)
+               if (n < 7)
                        continue;
+
+               event->mmap2.ino = (u64)ino;
+
                /*
                 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
                 */
@@ -212,6 +219,21 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                else
                        event->header.misc = PERF_RECORD_MISC_GUEST_USER;
 
+               /* map protection and flags bits */
+               event->mmap2.prot = 0;
+               event->mmap2.flags = 0;
+               if (prot[0] == 'r')
+                       event->mmap2.prot |= PROT_READ;
+               if (prot[1] == 'w')
+                       event->mmap2.prot |= PROT_WRITE;
+               if (prot[2] == 'x')
+                       event->mmap2.prot |= PROT_EXEC;
+
+               if (prot[3] == 's')
+                       event->mmap2.flags |= MAP_SHARED;
+               else
+                       event->mmap2.flags |= MAP_PRIVATE;
+
                if (prot[2] != 'x') {
                        if (!mmap_data || prot[0] != 'r')
                                continue;
@@ -223,15 +245,15 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                        strcpy(execname, anonstr);
 
                size = strlen(execname) + 1;
-               memcpy(event->mmap.filename, execname, size);
+               memcpy(event->mmap2.filename, execname, size);
                size = PERF_ALIGN(size, sizeof(u64));
-               event->mmap.len -= event->mmap.start;
-               event->mmap.header.size = (sizeof(event->mmap) -
-                                       (sizeof(event->mmap.filename) - size));
-               memset(event->mmap.filename + size, 0, machine->id_hdr_size);
-               event->mmap.header.size += machine->id_hdr_size;
-               event->mmap.pid = tgid;
-               event->mmap.tid = pid;
+               event->mmap2.len -= event->mmap.start;
+               event->mmap2.header.size = (sizeof(event->mmap2) -
+                                       (sizeof(event->mmap2.filename) - size));
+               memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
+               event->mmap2.header.size += machine->id_hdr_size;
+               event->mmap2.pid = tgid;
+               event->mmap2.tid = pid;
 
                if (process(tool, event, &synth_sample, machine) != 0) {
                        rc = -1;
@@ -581,7 +603,14 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
 
 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
 {
-       return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid);
+       const char *s;
+
+       if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
+               s = " exec";
+       else
+               s = "";
+
+       return fprintf(fp, "%s: %s:%d\n", s, event->comm.comm, event->comm.tid);
 }
 
 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
@@ -612,12 +641,15 @@ size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
 {
        return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
-                          " %02x:%02x %"PRIu64" %"PRIu64"]: %c %s\n",
+                          " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
                       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
                       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
                       event->mmap2.min, event->mmap2.ino,
                       event->mmap2.ino_generation,
-                      (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
+                      (event->mmap2.prot & PROT_READ) ? 'r' : '-',
+                      (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
+                      (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
+                      (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
                       event->mmap2.filename);
 }
 
@@ -756,6 +788,7 @@ try_again:
                    cpumode == PERF_RECORD_MISC_USER &&
                    machine && mg != &machine->kmaps) {
                        mg = &machine->kmaps;
+                       load_map = true;
                        goto try_again;
                }
        } else {
@@ -841,3 +874,45 @@ int perf_event__preprocess_sample(const union perf_event *event,
 
        return 0;
 }
+
+bool is_bts_event(struct perf_event_attr *attr)
+{
+       return attr->type == PERF_TYPE_HARDWARE &&
+              (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
+              attr->sample_period == 1;
+}
+
+bool sample_addr_correlates_sym(struct perf_event_attr *attr)
+{
+       if (attr->type == PERF_TYPE_SOFTWARE &&
+           (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
+            attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
+            attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
+               return true;
+
+       if (is_bts_event(attr))
+               return true;
+
+       return false;
+}
+
+void perf_event__preprocess_sample_addr(union perf_event *event,
+                                       struct perf_sample *sample,
+                                       struct machine *machine,
+                                       struct thread *thread,
+                                       struct addr_location *al)
+{
+       u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+
+       thread__find_addr_map(thread, machine, cpumode, MAP__FUNCTION,
+                             sample->addr, al);
+       if (!al->map)
+               thread__find_addr_map(thread, machine, cpumode, MAP__VARIABLE,
+                                     sample->addr, al);
+
+       al->cpu = sample->cpu;
+       al->sym = NULL;
+
+       if (al->map)
+               al->sym = map__find_symbol(al->map, al->addr, NULL);
+}
index d970232..94d6976 100644 (file)
@@ -7,6 +7,7 @@
 #include "../perf.h"
 #include "map.h"
 #include "build-id.h"
+#include "perf_regs.h"
 
 struct mmap_event {
        struct perf_event_header header;
@@ -27,6 +28,8 @@ struct mmap2_event {
        u32 min;
        u64 ino;
        u64 ino_generation;
+       u32 prot;
+       u32 flags;
        char filename[PATH_MAX];
 };
 
@@ -87,6 +90,10 @@ struct regs_dump {
        u64 abi;
        u64 mask;
        u64 *regs;
+
+       /* Cached values/mask filled by first register access. */
+       u64 cache_regs[PERF_REGS_MAX];
+       u64 cache_mask;
 };
 
 struct stack_dump {
@@ -281,6 +288,16 @@ int perf_event__preprocess_sample(const union perf_event *event,
                                  struct addr_location *al,
                                  struct perf_sample *sample);
 
+struct thread;
+
+bool is_bts_event(struct perf_event_attr *attr);
+bool sample_addr_correlates_sym(struct perf_event_attr *attr);
+void perf_event__preprocess_sample_addr(union perf_event *event,
+                                       struct perf_sample *sample,
+                                       struct machine *machine,
+                                       struct thread *thread,
+                                       struct addr_location *al);
+
 const char *perf_event__name(unsigned int id);
 
 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
index 59ef280..814e954 100644 (file)
@@ -606,12 +606,17 @@ static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
        return evlist->mmap != NULL ? 0 : -ENOMEM;
 }
 
-static int __perf_evlist__mmap(struct perf_evlist *evlist,
-                              int idx, int prot, int mask, int fd)
+struct mmap_params {
+       int prot;
+       int mask;
+};
+
+static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
+                              struct mmap_params *mp, int fd)
 {
        evlist->mmap[idx].prev = 0;
-       evlist->mmap[idx].mask = mask;
-       evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
+       evlist->mmap[idx].mask = mp->mask;
+       evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
                                      MAP_SHARED, fd, 0);
        if (evlist->mmap[idx].base == MAP_FAILED) {
                pr_debug2("failed to mmap perf event ring buffer, error %d\n",
@@ -625,8 +630,8 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist,
 }
 
 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
-                                      int prot, int mask, int cpu, int thread,
-                                      int *output)
+                                      struct mmap_params *mp, int cpu,
+                                      int thread, int *output)
 {
        struct perf_evsel *evsel;
 
@@ -635,8 +640,7 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
 
                if (*output == -1) {
                        *output = fd;
-                       if (__perf_evlist__mmap(evlist, idx, prot, mask,
-                                               *output) < 0)
+                       if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
                                return -1;
                } else {
                        if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
@@ -651,8 +655,8 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
        return 0;
 }
 
-static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
-                                    int mask)
+static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
+                                    struct mmap_params *mp)
 {
        int cpu, thread;
        int nr_cpus = cpu_map__nr(evlist->cpus);
@@ -663,8 +667,8 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
                int output = -1;
 
                for (thread = 0; thread < nr_threads; thread++) {
-                       if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
-                                                       cpu, thread, &output))
+                       if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
+                                                       thread, &output))
                                goto out_unmap;
                }
        }
@@ -677,8 +681,8 @@ out_unmap:
        return -1;
 }
 
-static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
-                                       int mask)
+static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
+                                       struct mmap_params *mp)
 {
        int thread;
        int nr_threads = thread_map__nr(evlist->threads);
@@ -687,8 +691,8 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
        for (thread = 0; thread < nr_threads; thread++) {
                int output = -1;
 
-               if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
-                                               thread, &output))
+               if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
+                                               &output))
                        goto out_unmap;
        }
 
@@ -793,7 +797,9 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
        struct perf_evsel *evsel;
        const struct cpu_map *cpus = evlist->cpus;
        const struct thread_map *threads = evlist->threads;
-       int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
+       struct mmap_params mp = {
+               .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
+       };
 
        if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
                return -ENOMEM;
@@ -804,7 +810,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
        evlist->overwrite = overwrite;
        evlist->mmap_len = perf_evlist__mmap_size(pages);
        pr_debug("mmap size %zuB\n", evlist->mmap_len);
-       mask = evlist->mmap_len - page_size - 1;
+       mp.mask = evlist->mmap_len - page_size - 1;
 
        evlist__for_each(evlist, evsel) {
                if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
@@ -814,9 +820,9 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
        }
 
        if (cpu_map__empty(cpus))
-               return perf_evlist__mmap_per_thread(evlist, prot, mask);
+               return perf_evlist__mmap_per_thread(evlist, &mp);
 
-       return perf_evlist__mmap_per_cpu(evlist, prot, mask);
+       return perf_evlist__mmap_per_cpu(evlist, &mp);
 }
 
 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
@@ -1214,10 +1220,11 @@ int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
                                             "For your workloads it needs to be <= 1\nHint:\t");
                }
                printed += scnprintf(buf + printed, size - printed,
-                                    "For system wide tracing it needs to be set to -1");
+                                    "For system wide tracing it needs to be set to -1.\n");
 
                printed += scnprintf(buf + printed, size - printed,
-                                   ".\nHint:\tThe current value is %d.", value);
+                                   "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
+                                   "Hint:\tThe current value is %d.", value);
                break;
        default:
                scnprintf(buf, size, "%s", emsg);
index 5c28d82..21a373e 100644 (file)
@@ -29,6 +29,7 @@ static struct {
        bool sample_id_all;
        bool exclude_guest;
        bool mmap2;
+       bool cloexec;
 } perf_missing_features;
 
 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
@@ -589,10 +590,10 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
        }
 
        /*
-        * We default some events to a 1 default interval. But keep
+        * We default some events to have a default interval. But keep
         * it a weak assumption overridable by the user.
         */
-       if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
+       if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
                                     opts->user_interval != ULLONG_MAX)) {
                if (opts->freq) {
                        perf_evsel__set_sample_bit(evsel, PERIOD);
@@ -623,7 +624,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
                attr->mmap_data = track;
        }
 
-       if (opts->call_graph_enabled)
+       if (opts->call_graph_enabled && !evsel->no_aux_samples)
                perf_evsel__config_callgraph(evsel, opts);
 
        if (target__has_cpu(&opts->target))
@@ -637,7 +638,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
             target__has_cpu(&opts->target) || per_cpu))
                perf_evsel__set_sample_bit(evsel, TIME);
 
-       if (opts->raw_samples) {
+       if (opts->raw_samples && !evsel->no_aux_samples) {
                perf_evsel__set_sample_bit(evsel, TIME);
                perf_evsel__set_sample_bit(evsel, RAW);
                perf_evsel__set_sample_bit(evsel, CPU);
@@ -650,7 +651,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
                attr->watermark = 0;
                attr->wakeup_events = 1;
        }
-       if (opts->branch_stack) {
+       if (opts->branch_stack && !evsel->no_aux_samples) {
                perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
                attr->branch_sample_type = opts->branch_stack;
        }
@@ -659,6 +660,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
                perf_evsel__set_sample_bit(evsel, WEIGHT);
 
        attr->mmap  = track;
+       attr->mmap2 = track && !perf_missing_features.mmap2;
        attr->comm  = track;
 
        if (opts->sample_transaction)
@@ -680,6 +682,11 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
        if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
                !opts->initial_delay)
                attr->enable_on_exec = 1;
+
+       if (evsel->immediate) {
+               attr->disabled = 0;
+               attr->enable_on_exec = 0;
+       }
 }
 
 int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -959,6 +966,7 @@ static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
        ret += PRINT_ATTR2(exclude_user, exclude_kernel);
        ret += PRINT_ATTR2(exclude_hv, exclude_idle);
        ret += PRINT_ATTR2(mmap, comm);
+       ret += PRINT_ATTR2(mmap2, comm_exec);
        ret += PRINT_ATTR2(freq, inherit_stat);
        ret += PRINT_ATTR2(enable_on_exec, task);
        ret += PRINT_ATTR2(watermark, precise_ip);
@@ -966,7 +974,6 @@ static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
        ret += PRINT_ATTR2(exclude_host, exclude_guest);
        ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel,
                            "excl.callchain_user", exclude_callchain_user);
-       ret += PRINT_ATTR_U32(mmap2);
 
        ret += PRINT_ATTR_U32(wakeup_events);
        ret += PRINT_ATTR_U32(wakeup_watermark);
@@ -988,7 +995,7 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
                              struct thread_map *threads)
 {
        int cpu, thread;
-       unsigned long flags = 0;
+       unsigned long flags = PERF_FLAG_FD_CLOEXEC;
        int pid = -1, err;
        enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
 
@@ -997,11 +1004,13 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
                return -ENOMEM;
 
        if (evsel->cgrp) {
-               flags = PERF_FLAG_PID_CGROUP;
+               flags |= PERF_FLAG_PID_CGROUP;
                pid = evsel->cgrp->fd;
        }
 
 fallback_missing_features:
+       if (perf_missing_features.cloexec)
+               flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
        if (perf_missing_features.mmap2)
                evsel->attr.mmap2 = 0;
        if (perf_missing_features.exclude_guest)
@@ -1070,7 +1079,10 @@ try_fallback:
        if (err != -EINVAL || cpu > 0 || thread > 0)
                goto out_close;
 
-       if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
+       if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
+               perf_missing_features.cloexec = true;
+               goto fallback_missing_features;
+       } else if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
                perf_missing_features.mmap2 = true;
                goto fallback_missing_features;
        } else if (!perf_missing_features.exclude_guest &&
@@ -1939,6 +1951,7 @@ int perf_evsel__fprintf(struct perf_evsel *evsel,
                if_print(mmap);
                if_print(mmap2);
                if_print(comm);
+               if_print(comm_exec);
                if_print(freq);
                if_print(inherit_stat);
                if_print(enable_on_exec);
index a52e9a5..d7f93ce 100644 (file)
@@ -83,6 +83,8 @@ struct perf_evsel {
        int                     is_pos;
        bool                    supported;
        bool                    needs_swap;
+       bool                    no_aux_samples;
+       bool                    immediate;
        /* parse modifier helper */
        int                     exclude_GH;
        int                     nr_members;
index 893f8e2..158c787 100644 (file)
@@ -200,6 +200,47 @@ static int write_buildid(const char *name, size_t name_len, u8 *build_id,
        return write_padded(fd, name, name_len + 1, len);
 }
 
+static int __dsos__hit_all(struct list_head *head)
+{
+       struct dso *pos;
+
+       list_for_each_entry(pos, head, node)
+               pos->hit = true;
+
+       return 0;
+}
+
+static int machine__hit_all_dsos(struct machine *machine)
+{
+       int err;
+
+       err = __dsos__hit_all(&machine->kernel_dsos);
+       if (err)
+               return err;
+
+       return __dsos__hit_all(&machine->user_dsos);
+}
+
+int dsos__hit_all(struct perf_session *session)
+{
+       struct rb_node *nd;
+       int err;
+
+       err = machine__hit_all_dsos(&session->machines.host);
+       if (err)
+               return err;
+
+       for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
+               struct machine *pos = rb_entry(nd, struct machine, rb_node);
+
+               err = machine__hit_all_dsos(pos);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
 static int __dsos__write_buildid_table(struct list_head *head,
                                       struct machine *machine,
                                       pid_t pid, u16 misc, int fd)
@@ -215,9 +256,9 @@ static int __dsos__write_buildid_table(struct list_head *head,
                if (!pos->hit)
                        continue;
 
-               if (is_vdso_map(pos->short_name)) {
-                       name = (char *) VDSO__MAP_NAME;
-                       name_len = sizeof(VDSO__MAP_NAME) + 1;
+               if (dso__is_vdso(pos)) {
+                       name = pos->short_name;
+                       name_len = pos->short_name_len + 1;
                } else if (dso__is_kcore(pos)) {
                        machine__mmap_name(machine, nm, sizeof(nm));
                        name = nm;
@@ -298,7 +339,7 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
 
        len = scnprintf(filename, size, "%s%s%s",
                       debugdir, slash ? "/" : "",
-                      is_vdso ? VDSO__MAP_NAME : realname);
+                      is_vdso ? DSO__NAME_VDSO : realname);
        if (mkdir_p(filename, 0755))
                goto out_free;
 
@@ -386,7 +427,7 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine,
                               const char *debugdir)
 {
        bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
-       bool is_vdso = is_vdso_map(dso->short_name);
+       bool is_vdso = dso__is_vdso(dso);
        const char *name = dso->long_name;
        char nm[PATH_MAX];
 
index d08cfe4..8f5cbae 100644 (file)
@@ -151,6 +151,8 @@ int perf_event__process_build_id(struct perf_tool *tool,
                                 struct perf_session *session);
 bool is_perf_magic(u64 magic);
 
+int dsos__hit_all(struct perf_session *session);
+
 /*
  * arch specific callback
  */
index 5a0a4b2..30df618 100644 (file)
@@ -128,6 +128,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
                               + unresolved_col_width + 2;
                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
                                           symlen);
+                       hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
+                                          symlen + 1);
                } else {
                        symlen = unresolved_col_width + 4 + 2;
                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
@@ -439,9 +441,10 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
                        .map    = al->map,
                        .sym    = al->sym,
                },
-               .cpu    = al->cpu,
-               .ip     = al->addr,
-               .level  = al->level,
+               .cpu     = al->cpu,
+               .cpumode = al->cpumode,
+               .ip      = al->addr,
+               .level   = al->level,
                .stat = {
                        .nr_events = 1,
                        .period = period,
index d2bf035..742f49a 100644 (file)
@@ -72,6 +72,7 @@ enum hist_column {
        HISTC_MEM_TLB,
        HISTC_MEM_LVL,
        HISTC_MEM_SNOOP,
+       HISTC_MEM_DCACHELINE,
        HISTC_TRANSACTION,
        HISTC_NR_COLS, /* Last entry */
 };
index 9844c31..09e8e7a 100644 (file)
@@ -94,27 +94,6 @@ static inline int scnprintf(char * buf, size_t size, const char * fmt, ...)
        return (i >= ssize) ? (ssize - 1) : i;
 }
 
-int eprintf(int level,
-           const char *fmt, ...) __attribute__((format(printf, 2, 3)));
-
-#ifndef pr_fmt
-#define pr_fmt(fmt) fmt
-#endif
-
-#define pr_err(fmt, ...) \
-       eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warning(fmt, ...) \
-       eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_info(fmt, ...) \
-       eprintf(0, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_debug(fmt, ...) \
-       eprintf(1, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_debugN(n, fmt, ...) \
-       eprintf(n, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_debug2(fmt, ...) pr_debugN(2, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_debug3(fmt, ...) pr_debugN(3, pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_debug4(fmt, ...) pr_debugN(4, pr_fmt(fmt), ##__VA_ARGS__)
-
 /*
  * This looks more complex than it should be. But we need to
  * get the type for the ~ right in round_down (it needs to be
diff --git a/tools/perf/util/kvm-stat.h b/tools/perf/util/kvm-stat.h
new file mode 100644 (file)
index 0000000..0b5a8cd
--- /dev/null
@@ -0,0 +1,140 @@
+#ifndef __PERF_KVM_STAT_H
+#define __PERF_KVM_STAT_H
+
+#include "../perf.h"
+#include "evsel.h"
+#include "evlist.h"
+#include "session.h"
+#include "tool.h"
+#include "stat.h"
+
+struct event_key {
+       #define INVALID_KEY     (~0ULL)
+       u64 key;
+       int info;
+       struct exit_reasons_table *exit_reasons;
+};
+
+struct kvm_event_stats {
+       u64 time;
+       struct stats stats;
+};
+
+struct kvm_event {
+       struct list_head hash_entry;
+       struct rb_node rb;
+
+       struct event_key key;
+
+       struct kvm_event_stats total;
+
+       #define DEFAULT_VCPU_NUM 8
+       int max_vcpu;
+       struct kvm_event_stats *vcpu;
+};
+
+typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int);
+
+struct kvm_event_key {
+       const char *name;
+       key_cmp_fun key;
+};
+
+struct perf_kvm_stat;
+
+struct child_event_ops {
+       void (*get_key)(struct perf_evsel *evsel,
+                       struct perf_sample *sample,
+                       struct event_key *key);
+       const char *name;
+};
+
+struct kvm_events_ops {
+       bool (*is_begin_event)(struct perf_evsel *evsel,
+                              struct perf_sample *sample,
+                              struct event_key *key);
+       bool (*is_end_event)(struct perf_evsel *evsel,
+                            struct perf_sample *sample, struct event_key *key);
+       struct child_event_ops *child_ops;
+       void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key,
+                          char *decode);
+       const char *name;
+};
+
+struct exit_reasons_table {
+       unsigned long exit_code;
+       const char *reason;
+};
+
+#define EVENTS_BITS            12
+#define EVENTS_CACHE_SIZE      (1UL << EVENTS_BITS)
+
+struct perf_kvm_stat {
+       struct perf_tool    tool;
+       struct record_opts  opts;
+       struct perf_evlist  *evlist;
+       struct perf_session *session;
+
+       const char *file_name;
+       const char *report_event;
+       const char *sort_key;
+       int trace_vcpu;
+
+       struct exit_reasons_table *exit_reasons;
+       const char *exit_reasons_isa;
+
+       struct kvm_events_ops *events_ops;
+       key_cmp_fun compare;
+       struct list_head kvm_events_cache[EVENTS_CACHE_SIZE];
+
+       u64 total_time;
+       u64 total_count;
+       u64 lost_events;
+       u64 duration;
+
+       const char *pid_str;
+       struct intlist *pid_list;
+
+       struct rb_root result;
+
+       int timerfd;
+       unsigned int display_time;
+       bool live;
+};
+
+struct kvm_reg_events_ops {
+       const char *name;
+       struct kvm_events_ops *ops;
+};
+
+void exit_event_get_key(struct perf_evsel *evsel,
+                       struct perf_sample *sample,
+                       struct event_key *key);
+bool exit_event_begin(struct perf_evsel *evsel,
+                     struct perf_sample *sample,
+                     struct event_key *key);
+bool exit_event_end(struct perf_evsel *evsel,
+                   struct perf_sample *sample,
+                   struct event_key *key);
+void exit_event_decode_key(struct perf_kvm_stat *kvm,
+                          struct event_key *key,
+                          char *decode);
+
+bool kvm_exit_event(struct perf_evsel *evsel);
+bool kvm_entry_event(struct perf_evsel *evsel);
+
+#define define_exit_reasons_table(name, symbols)       \
+       static struct exit_reasons_table name[] = {     \
+               symbols, { -1, NULL }                   \
+       }
+
+/*
+ * arch specific callbacks and data structures
+ */
+int cpu_isa_init(struct perf_kvm_stat *kvm, const char *cpuid);
+
+extern const char * const kvm_events_tp[];
+extern struct kvm_reg_events_ops kvm_reg_events_ops[];
+extern const char * const kvm_skip_events[];
+
+#endif /* __PERF_KVM_STAT_H */
index 7409ac8..16bba9f 100644 (file)
@@ -8,6 +8,7 @@
 #include "sort.h"
 #include "strlist.h"
 #include "thread.h"
+#include "vdso.h"
 #include <stdbool.h>
 #include <symbol/kallsyms.h>
 #include "unwind.h"
@@ -23,6 +24,8 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
        INIT_LIST_HEAD(&machine->dead_threads);
        machine->last_match = NULL;
 
+       machine->vdso_info = NULL;
+
        machine->kmaps.machine = machine;
        machine->pid = pid;
 
@@ -34,7 +37,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
                return -ENOMEM;
 
        if (pid != HOST_KERNEL_ID) {
-               struct thread *thread = machine__findnew_thread(machine, 0,
+               struct thread *thread = machine__findnew_thread(machine, -1,
                                                                pid);
                char comm[64];
 
@@ -45,6 +48,8 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
                thread__set_comm(thread, comm, 0);
        }
 
+       machine->current_tid = NULL;
+
        return 0;
 }
 
@@ -103,7 +108,9 @@ void machine__exit(struct machine *machine)
        map_groups__exit(&machine->kmaps);
        dsos__delete(&machine->user_dsos);
        dsos__delete(&machine->kernel_dsos);
+       vdso__exit(machine);
        zfree(&machine->root_dir);
+       zfree(&machine->current_tid);
 }
 
 void machine__delete(struct machine *machine)
@@ -272,6 +279,52 @@ void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
        return;
 }
 
+static void machine__update_thread_pid(struct machine *machine,
+                                      struct thread *th, pid_t pid)
+{
+       struct thread *leader;
+
+       if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
+               return;
+
+       th->pid_ = pid;
+
+       if (th->pid_ == th->tid)
+               return;
+
+       leader = machine__findnew_thread(machine, th->pid_, th->pid_);
+       if (!leader)
+               goto out_err;
+
+       if (!leader->mg)
+               leader->mg = map_groups__new();
+
+       if (!leader->mg)
+               goto out_err;
+
+       if (th->mg == leader->mg)
+               return;
+
+       if (th->mg) {
+               /*
+                * Maps are created from MMAP events which provide the pid and
+                * tid.  Consequently there never should be any maps on a thread
+                * with an unknown pid.  Just print an error if there are.
+                */
+               if (!map_groups__empty(th->mg))
+                       pr_err("Discarding thread maps for %d:%d\n",
+                              th->pid_, th->tid);
+               map_groups__delete(th->mg);
+       }
+
+       th->mg = map_groups__get(leader->mg);
+
+       return;
+
+out_err:
+       pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
+}
+
 static struct thread *__machine__findnew_thread(struct machine *machine,
                                                pid_t pid, pid_t tid,
                                                bool create)
@@ -285,10 +338,10 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
         * so most of the time we dont have to look up
         * the full rbtree:
         */
-       if (machine->last_match && machine->last_match->tid == tid) {
-               if (pid && pid != machine->last_match->pid_)
-                       machine->last_match->pid_ = pid;
-               return machine->last_match;
+       th = machine->last_match;
+       if (th && th->tid == tid) {
+               machine__update_thread_pid(machine, th, pid);
+               return th;
        }
 
        while (*p != NULL) {
@@ -297,8 +350,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
 
                if (th->tid == tid) {
                        machine->last_match = th;
-                       if (pid && pid != th->pid_)
-                               th->pid_ = pid;
+                       machine__update_thread_pid(machine, th, pid);
                        return th;
                }
 
@@ -325,8 +377,10 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
                 * within thread__init_map_groups to find the thread
                 * leader and that would screwed the rb tree.
                 */
-               if (thread__init_map_groups(th, machine))
+               if (thread__init_map_groups(th, machine)) {
+                       thread__delete(th);
                        return NULL;
+               }
        }
 
        return th;
@@ -496,18 +550,6 @@ struct process_args {
        u64 start;
 };
 
-static int symbol__in_kernel(void *arg, const char *name,
-                            char type __maybe_unused, u64 start)
-{
-       struct process_args *args = arg;
-
-       if (strchr(name, '['))
-               return 0;
-
-       args->start = start;
-       return 1;
-}
-
 static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
                                           size_t bufsz)
 {
@@ -517,27 +559,41 @@ static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
                scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
 }
 
-/* Figure out the start address of kernel map from /proc/kallsyms */
-static u64 machine__get_kernel_start_addr(struct machine *machine)
+const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
+
+/* Figure out the start address of kernel map from /proc/kallsyms.
+ * Returns the name of the start symbol in *symbol_name. Pass in NULL as
+ * symbol_name if it's not that important.
+ */
+static u64 machine__get_kernel_start_addr(struct machine *machine,
+                                         const char **symbol_name)
 {
        char filename[PATH_MAX];
-       struct process_args args;
+       int i;
+       const char *name;
+       u64 addr = 0;
 
        machine__get_kallsyms_filename(machine, filename, PATH_MAX);
 
        if (symbol__restricted_filename(filename, "/proc/kallsyms"))
                return 0;
 
-       if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
-               return 0;
+       for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
+               addr = kallsyms__get_function_start(filename, name);
+               if (addr)
+                       break;
+       }
 
-       return args.start;
+       if (symbol_name)
+               *symbol_name = name;
+
+       return addr;
 }
 
 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
 {
        enum map_type type;
-       u64 start = machine__get_kernel_start_addr(machine);
+       u64 start = machine__get_kernel_start_addr(machine, NULL);
 
        for (type = 0; type < MAP__NR_TYPES; ++type) {
                struct kmap *kmap;
@@ -852,23 +908,11 @@ static int machine__create_modules(struct machine *machine)
        return 0;
 }
 
-const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
-
 int machine__create_kernel_maps(struct machine *machine)
 {
        struct dso *kernel = machine__get_kernel(machine);
-       char filename[PATH_MAX];
        const char *name;
-       u64 addr = 0;
-       int i;
-
-       machine__get_kallsyms_filename(machine, filename, PATH_MAX);
-
-       for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
-               addr = kallsyms__get_function_start(filename, name);
-               if (addr)
-                       break;
-       }
+       u64 addr = machine__get_kernel_start_addr(machine, &name);
        if (!addr)
                return -1;
 
@@ -1055,12 +1099,14 @@ int machine__process_mmap2_event(struct machine *machine,
        else
                type = MAP__FUNCTION;
 
-       map = map__new(&machine->user_dsos, event->mmap2.start,
+       map = map__new(machine, event->mmap2.start,
                        event->mmap2.len, event->mmap2.pgoff,
                        event->mmap2.pid, event->mmap2.maj,
                        event->mmap2.min, event->mmap2.ino,
                        event->mmap2.ino_generation,
-                       event->mmap2.filename, type);
+                       event->mmap2.prot,
+                       event->mmap2.flags,
+                       event->mmap2.filename, type, thread);
 
        if (map == NULL)
                goto out_problem;
@@ -1103,11 +1149,11 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
        else
                type = MAP__FUNCTION;
 
-       map = map__new(&machine->user_dsos, event->mmap.start,
+       map = map__new(machine, event->mmap.start,
                        event->mmap.len, event->mmap.pgoff,
-                       event->mmap.pid, 0, 0, 0, 0,
+                       event->mmap.pid, 0, 0, 0, 0, 0, 0,
                        event->mmap.filename,
-                       type);
+                       type, thread);
 
        if (map == NULL)
                goto out_problem;
@@ -1289,7 +1335,9 @@ static int machine__resolve_callchain_sample(struct machine *machine,
        u8 cpumode = PERF_RECORD_MISC_USER;
        int chain_nr = min(max_stack, (int)chain->nr);
        int i;
+       int j;
        int err;
+       int skip_idx __maybe_unused;
 
        callchain_cursor_reset(&callchain_cursor);
 
@@ -1298,14 +1346,26 @@ static int machine__resolve_callchain_sample(struct machine *machine,
                return 0;
        }
 
+       /*
+        * Based on DWARF debug information, some architectures skip
+        * a callchain entry saved by the kernel.
+        */
+       skip_idx = arch_skip_callchain_idx(machine, thread, chain);
+
        for (i = 0; i < chain_nr; i++) {
                u64 ip;
                struct addr_location al;
 
                if (callchain_param.order == ORDER_CALLEE)
-                       ip = chain->ips[i];
+                       j = i;
                else
-                       ip = chain->ips[chain->nr - i - 1];
+                       j = chain->nr - i - 1;
+
+#ifdef HAVE_SKIP_CALLCHAIN_IDX
+               if (j == skip_idx)
+                       continue;
+#endif
+               ip = chain->ips[j];
 
                if (ip >= PERF_CONTEXT_MAX) {
                        switch (ip) {
@@ -1428,3 +1488,46 @@ int __machine__synthesize_threads(struct machine *machine, struct perf_tool *too
        /* command specified */
        return 0;
 }
+
+pid_t machine__get_current_tid(struct machine *machine, int cpu)
+{
+       if (cpu < 0 || cpu >= MAX_NR_CPUS || !machine->current_tid)
+               return -1;
+
+       return machine->current_tid[cpu];
+}
+
+int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
+                            pid_t tid)
+{
+       struct thread *thread;
+
+       if (cpu < 0)
+               return -EINVAL;
+
+       if (!machine->current_tid) {
+               int i;
+
+               machine->current_tid = calloc(MAX_NR_CPUS, sizeof(pid_t));
+               if (!machine->current_tid)
+                       return -ENOMEM;
+               for (i = 0; i < MAX_NR_CPUS; i++)
+                       machine->current_tid[i] = -1;
+       }
+
+       if (cpu >= MAX_NR_CPUS) {
+               pr_err("Requested CPU %d too large. ", cpu);
+               pr_err("Consider raising MAX_NR_CPUS\n");
+               return -EINVAL;
+       }
+
+       machine->current_tid[cpu] = tid;
+
+       thread = machine__findnew_thread(machine, pid, tid);
+       if (!thread)
+               return -ENOMEM;
+
+       thread->cpu = cpu;
+
+       return 0;
+}
index c8c74a1..b972824 100644 (file)
@@ -20,6 +20,8 @@ union perf_event;
 
 extern const char *ref_reloc_sym_names[];
 
+struct vdso_info;
+
 struct machine {
        struct rb_node    rb_node;
        pid_t             pid;
@@ -28,11 +30,13 @@ struct machine {
        struct rb_root    threads;
        struct list_head  dead_threads;
        struct thread     *last_match;
+       struct vdso_info  *vdso_info;
        struct list_head  user_dsos;
        struct list_head  kernel_dsos;
        struct map_groups kmaps;
        struct map        *vmlinux_maps[MAP__NR_TYPES];
        symbol_filter_t   symbol_filter;
+       pid_t             *current_tid;
 };
 
 static inline
@@ -191,4 +195,8 @@ int machine__synthesize_threads(struct machine *machine, struct target *target,
                                             perf_event__process, data_mmap);
 }
 
+pid_t machine__get_current_tid(struct machine *machine, int cpu);
+int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
+                            pid_t tid);
+
 #endif /* __PERF_MACHINE_H */
index 8ccbb32..31b8905 100644 (file)
@@ -12,6 +12,8 @@
 #include "vdso.h"
 #include "build-id.h"
 #include "util.h"
+#include "debug.h"
+#include "machine.h"
 #include <linux/string.h>
 
 const char *map_type__name[MAP__NR_TYPES] = {
@@ -136,10 +138,10 @@ void map__init(struct map *map, enum map_type type,
        map->erange_warned = false;
 }
 
-struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
+struct map *map__new(struct machine *machine, u64 start, u64 len,
                     u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
-                    u64 ino_gen, char *filename,
-                    enum map_type type)
+                    u64 ino_gen, u32 prot, u32 flags, char *filename,
+                    enum map_type type, struct thread *thread)
 {
        struct map *map = malloc(sizeof(*map));
 
@@ -157,6 +159,8 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
                map->min = d_min;
                map->ino = ino;
                map->ino_generation = ino_gen;
+               map->prot = prot;
+               map->flags = flags;
 
                if ((anon || no_dso) && type == MAP__FUNCTION) {
                        snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
@@ -170,9 +174,9 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
 
                if (vdso) {
                        pgoff = 0;
-                       dso = vdso__dso_findnew(dsos__list);
+                       dso = vdso__dso_findnew(machine, thread);
                } else
-                       dso = __dsos__findnew(dsos__list, filename);
+                       dso = __dsos__findnew(&machine->user_dsos, filename);
 
                if (dso == NULL)
                        goto out_delete;
@@ -452,6 +456,20 @@ void map_groups__exit(struct map_groups *mg)
        }
 }
 
+bool map_groups__empty(struct map_groups *mg)
+{
+       int i;
+
+       for (i = 0; i < MAP__NR_TYPES; ++i) {
+               if (maps__first(&mg->maps[i]))
+                       return false;
+               if (!list_empty(&mg->removed_maps[i]))
+                       return false;
+       }
+
+       return true;
+}
+
 struct map_groups *map_groups__new(void)
 {
        struct map_groups *mg = malloc(sizeof(*mg));
@@ -552,8 +570,8 @@ int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
        return ams->sym ? 0 : -1;
 }
 
-size_t __map_groups__fprintf_maps(struct map_groups *mg,
-                                 enum map_type type, int verbose, FILE *fp)
+size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
+                                 FILE *fp)
 {
        size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
        struct rb_node *nd;
@@ -571,17 +589,16 @@ size_t __map_groups__fprintf_maps(struct map_groups *mg,
        return printed;
 }
 
-size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp)
+static size_t map_groups__fprintf_maps(struct map_groups *mg, FILE *fp)
 {
        size_t printed = 0, i;
        for (i = 0; i < MAP__NR_TYPES; ++i)
-               printed += __map_groups__fprintf_maps(mg, i, verbose, fp);
+               printed += __map_groups__fprintf_maps(mg, i, fp);
        return printed;
 }
 
 static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
-                                                enum map_type type,
-                                                int verbose, FILE *fp)
+                                                enum map_type type, FILE *fp)
 {
        struct map *pos;
        size_t printed = 0;
@@ -598,23 +615,23 @@ static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
 }
 
 static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
-                                              int verbose, FILE *fp)
+                                              FILE *fp)
 {
        size_t printed = 0, i;
        for (i = 0; i < MAP__NR_TYPES; ++i)
-               printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp);
+               printed += __map_groups__fprintf_removed_maps(mg, i, fp);
        return printed;
 }
 
-size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp)
+size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
 {
-       size_t printed = map_groups__fprintf_maps(mg, verbose, fp);
+       size_t printed = map_groups__fprintf_maps(mg, fp);
        printed += fprintf(fp, "Removed maps:\n");
-       return printed + map_groups__fprintf_removed_maps(mg, verbose, fp);
+       return printed + map_groups__fprintf_removed_maps(mg, fp);
 }
 
 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
-                                  int verbose, FILE *fp)
+                                  FILE *fp)
 {
        struct rb_root *root = &mg->maps[map->type];
        struct rb_node *next = rb_first(root);
index ae2d451..2f83954 100644 (file)
@@ -35,6 +35,8 @@ struct map {
        bool                    referenced;
        bool                    erange_warned;
        u32                     priv;
+       u32                     prot;
+       u32                     flags;
        u64                     pgoff;
        u64                     reloc;
        u32                     maj, min; /* only valid for MMAP2 record */
@@ -64,6 +66,7 @@ struct map_groups {
 
 struct map_groups *map_groups__new(void);
 void map_groups__delete(struct map_groups *mg);
+bool map_groups__empty(struct map_groups *mg);
 
 static inline struct map_groups *map_groups__get(struct map_groups *mg)
 {
@@ -101,6 +104,7 @@ u64 map__rip_2objdump(struct map *map, u64 rip);
 u64 map__objdump_2mem(struct map *map, u64 ip);
 
 struct symbol;
+struct thread;
 
 /* map__for_each_symbol - iterate over the symbols in the given map
  *
@@ -116,10 +120,10 @@ typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
 
 void map__init(struct map *map, enum map_type type,
               u64 start, u64 end, u64 pgoff, struct dso *dso);
-struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
+struct map *map__new(struct machine *machine, u64 start, u64 len,
                     u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
-                    u64 ino_gen,
-                    char *filename, enum map_type type);
+                    u64 ino_gen, u32 prot, u32 flags,
+                    char *filename, enum map_type type, struct thread *thread);
 struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
 void map__delete(struct map *map);
 struct map *map__clone(struct map *map);
@@ -139,8 +143,8 @@ void map__fixup_end(struct map *map);
 
 void map__reloc_vmlinux(struct map *map);
 
-size_t __map_groups__fprintf_maps(struct map_groups *mg,
-                                 enum map_type type, int verbose, FILE *fp);
+size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
+                                 FILE *fp);
 void maps__insert(struct rb_root *maps, struct map *map);
 void maps__remove(struct rb_root *maps, struct map *map);
 struct map *maps__find(struct rb_root *maps, u64 addr);
@@ -150,8 +154,7 @@ void map_groups__init(struct map_groups *mg);
 void map_groups__exit(struct map_groups *mg);
 int map_groups__clone(struct map_groups *mg,
                      struct map_groups *parent, enum map_type type);
-size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp);
-size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp);
+size_t map_groups__fprintf(struct map_groups *mg, FILE *fp);
 
 int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name,
                                     u64 addr);
@@ -208,7 +211,7 @@ struct symbol *map_groups__find_function_by_name(struct map_groups *mg,
 }
 
 int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
-                                  int verbose, FILE *fp);
+                                  FILE *fp);
 
 struct map *map_groups__find_by_name(struct map_groups *mg,
                                     enum map_type type, const char *name);
index d8dac8a..b59ba85 100644 (file)
@@ -98,6 +98,7 @@ struct option {
        parse_opt_cb *callback;
        intptr_t defval;
        bool *set;
+       void *data;
 };
 
 #define check_vtype(v, type) ( BUILD_BUG_ON_ZERO(!__builtin_types_compatible_p(typeof(v), type)) + v )
@@ -131,6 +132,10 @@ struct option {
        { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l),\
        .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d,\
        .flags = PARSE_OPT_LASTARG_DEFAULT | PARSE_OPT_NOARG}
+#define OPT_CALLBACK_OPTARG(s, l, v, d, a, h, f) \
+       { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), \
+         .value = (v), (a), .help = (h), .callback = (f), \
+         .flags = PARSE_OPT_OPTARG, .data = (d) }
 
 /* parse_options() will filter out the processed options and leave the
  * non-option argments in argv[].
index a3539ef..43168fb 100644 (file)
@@ -1,11 +1,15 @@
 #include <errno.h>
 #include "perf_regs.h"
+#include "event.h"
 
 int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
 {
        int i, idx = 0;
        u64 mask = regs->mask;
 
+       if (regs->cache_mask & (1 << id))
+               goto out;
+
        if (!(mask & (1 << id)))
                return -EINVAL;
 
@@ -14,6 +18,10 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
                        idx++;
        }
 
-       *valp = regs->regs[idx];
+       regs->cache_mask |= (1 << id);
+       regs->cache_regs[id] = regs->regs[idx];
+
+out:
+       *valp = regs->cache_regs[id];
        return 0;
 }
index 79c78f7..980dbf7 100644 (file)
@@ -2,7 +2,8 @@
 #define __PERF_REGS_H
 
 #include <linux/types.h>
-#include "event.h"
+
+struct regs_dump;
 
 #ifdef HAVE_PERF_REGS_SUPPORT
 #include <perf_regs.h>
@@ -11,6 +12,7 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
 
 #else
 #define PERF_REGS_MASK 0
+#define PERF_REGS_MAX  0
 
 static inline const char *perf_reg_name(int id __maybe_unused)
 {
index 0d1542f..9a0a183 100644 (file)
@@ -628,11 +628,11 @@ static int __show_line_range(struct line_range *lr, const char *module)
 
        ret = debuginfo__find_line_range(dinfo, lr);
        debuginfo__delete(dinfo);
-       if (ret == 0) {
+       if (ret == 0 || ret == -ENOENT) {
                pr_warning("Specified source line is not found.\n");
                return -ENOENT;
        } else if (ret < 0) {
-               pr_warning("Debuginfo analysis failed. (%d)\n", ret);
+               pr_warning("Debuginfo analysis failed.\n");
                return ret;
        }
 
@@ -641,7 +641,7 @@ static int __show_line_range(struct line_range *lr, const char *module)
        ret = get_real_path(tmp, lr->comp_dir, &lr->path);
        free(tmp);      /* Free old path */
        if (ret < 0) {
-               pr_warning("Failed to find source file. (%d)\n", ret);
+               pr_warning("Failed to find source file path.\n");
                return ret;
        }
 
@@ -721,9 +721,14 @@ static int show_available_vars_at(struct debuginfo *dinfo,
        ret = debuginfo__find_available_vars_at(dinfo, pev, &vls,
                                                max_vls, externs);
        if (ret <= 0) {
-               pr_err("Failed to find variables at %s (%d)\n", buf, ret);
+               if (ret == 0 || ret == -ENOENT) {
+                       pr_err("Failed to find the address of %s\n", buf);
+                       ret = -ENOENT;
+               } else
+                       pr_warning("Debuginfo analysis failed.\n");
                goto end;
        }
+
        /* Some variables are found */
        fprintf(stdout, "Available variables at %s\n", buf);
        for (i = 0; i < ret; i++) {
index 9d8eb26..dca9145 100644 (file)
@@ -26,7 +26,6 @@
 #include <errno.h>
 #include <stdio.h>
 #include <unistd.h>
-#include <getopt.h>
 #include <stdlib.h>
 #include <string.h>
 #include <stdarg.h>
@@ -573,14 +572,13 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
        if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) {
                /* Search again in global variables */
                if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die))
+                       pr_warning("Failed to find '%s' in this function.\n",
+                                  pf->pvar->var);
                        ret = -ENOENT;
        }
        if (ret >= 0)
                ret = convert_variable(&vr_die, pf);
 
-       if (ret < 0)
-               pr_warning("Failed to find '%s' in this function.\n",
-                          pf->pvar->var);
        return ret;
 }
 
@@ -1281,7 +1279,11 @@ out:
        return ret;
 }
 
-/* Find available variables at given probe point */
+/*
+ * Find available variables at given probe point
+ * Return the number of found probe points. Return 0 if there is no
+ * matched probe point. Return <0 if an error occurs.
+ */
 int debuginfo__find_available_vars_at(struct debuginfo *dbg,
                                      struct perf_probe_event *pev,
                                      struct variable_list **vls,
index daa17ae..a126e6c 100644 (file)
@@ -6,6 +6,7 @@
 
 #include "util.h"
 #include "pstack.h"
+#include "debug.h"
 #include <linux/kernel.h>
 #include <stdlib.h>
 
index 122669c..12aa9b0 100644 (file)
  */
 int verbose;
 
-int eprintf(int level, const char *fmt, ...)
+int eprintf(int level, int var, const char *fmt, ...)
 {
        va_list args;
        int ret = 0;
 
-       if (verbose >= level) {
+       if (var >= level) {
                va_start(args, fmt);
                ret = vfprintf(stderr, fmt, args);
                va_end(args);
index 049e0a0..fe8079e 100644 (file)
@@ -4,6 +4,7 @@
 #include "parse-events.h"
 #include <api/fs/fs.h>
 #include "util.h"
+#include "cloexec.h"
 
 typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
 
@@ -11,6 +12,7 @@ static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
 {
        struct perf_evlist *evlist;
        struct perf_evsel *evsel;
+       unsigned long flags = perf_event_open_cloexec_flag();
        int err = -EAGAIN, fd;
 
        evlist = perf_evlist__new();
@@ -22,14 +24,14 @@ static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
 
        evsel = perf_evlist__first(evlist);
 
-       fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
+       fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, flags);
        if (fd < 0)
                goto out_delete;
        close(fd);
 
        fn(evsel);
 
-       fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
+       fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, flags);
        if (fd < 0) {
                if (errno == EINVAL)
                        err = -EINVAL;
@@ -69,15 +71,26 @@ static void perf_probe_sample_identifier(struct perf_evsel *evsel)
        evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
 }
 
+static void perf_probe_comm_exec(struct perf_evsel *evsel)
+{
+       evsel->attr.comm_exec = 1;
+}
+
 bool perf_can_sample_identifier(void)
 {
        return perf_probe_api(perf_probe_sample_identifier);
 }
 
+static bool perf_can_comm_exec(void)
+{
+       return perf_probe_api(perf_probe_comm_exec);
+}
+
 void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
 {
        struct perf_evsel *evsel;
        bool use_sample_identifier = false;
+       bool use_comm_exec;
 
        /*
         * Set the evsel leader links before we configure attributes,
@@ -89,8 +102,13 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
        if (evlist->cpus->map[0] < 0)
                opts->no_inherit = true;
 
-       evlist__for_each(evlist, evsel)
+       use_comm_exec = perf_can_comm_exec();
+
+       evlist__for_each(evlist, evsel) {
                perf_evsel__config(evsel, opts);
+               if (!evsel->idx && use_comm_exec)
+                       evsel->attr.comm_exec = 1;
+       }
 
        if (evlist->nr_entries > 1) {
                struct perf_evsel *first = perf_evlist__first(evlist);
@@ -203,7 +221,8 @@ bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
                cpu = evlist->cpus->map[0];
        }
 
-       fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1, 0);
+       fd = sys_perf_event_open(&evsel->attr, -1, cpu, -1,
+                                perf_event_open_cloexec_flag());
        if (fd >= 0) {
                close(fd);
                ret = true;
index e108207..b2dba9c 100644 (file)
@@ -34,6 +34,7 @@
 #include "../event.h"
 #include "../trace-event.h"
 #include "../evsel.h"
+#include "../debug.h"
 
 void boot_Perf__Trace__Context(pTHX_ CV *cv);
 void boot_DynaLoader(pTHX_ CV *cv);
@@ -215,6 +216,7 @@ static void define_event_symbols(struct event_format *event,
        case PRINT_BSTRING:
        case PRINT_DYNAMIC_ARRAY:
        case PRINT_STRING:
+       case PRINT_BITMASK:
                break;
        case PRINT_TYPE:
                define_event_symbols(event, ev_name, args->typecast.item);
index cd9774d..cbce254 100644 (file)
 #include <errno.h>
 
 #include "../../perf.h"
+#include "../debug.h"
 #include "../evsel.h"
 #include "../util.h"
 #include "../event.h"
 #include "../thread.h"
 #include "../trace-event.h"
+#include "../machine.h"
 
 PyMODINIT_FUNC initperf_trace_context(void);
 
@@ -50,10 +52,14 @@ static int zero_flag_atom;
 
 static PyObject *main_module, *main_dict;
 
+static void handler_call_die(const char *handler_name) NORETURN;
 static void handler_call_die(const char *handler_name)
 {
        PyErr_Print();
        Py_FatalError("problem in Python trace event handler");
+       // Py_FatalError does not return
+       // but we have to make the compiler happy
+       abort();
 }
 
 /*
@@ -97,6 +103,7 @@ static void define_value(enum print_arg_type field_type,
                retval = PyObject_CallObject(handler, t);
                if (retval == NULL)
                        handler_call_die(handler_name);
+               Py_DECREF(retval);
        }
 
        Py_DECREF(t);
@@ -143,6 +150,7 @@ static void define_field(enum print_arg_type field_type,
                retval = PyObject_CallObject(handler, t);
                if (retval == NULL)
                        handler_call_die(handler_name);
+               Py_DECREF(retval);
        }
 
        Py_DECREF(t);
@@ -197,6 +205,7 @@ static void define_event_symbols(struct event_format *event,
        case PRINT_BSTRING:
        case PRINT_DYNAMIC_ARRAY:
        case PRINT_FUNC:
+       case PRINT_BITMASK:
                /* we should warn... */
                return;
        }
@@ -230,15 +239,133 @@ static inline struct event_format *find_cache_event(struct perf_evsel *evsel)
        return event;
 }
 
+static PyObject *get_field_numeric_entry(struct event_format *event,
+               struct format_field *field, void *data)
+{
+       bool is_array = field->flags & FIELD_IS_ARRAY;
+       PyObject *obj, *list = NULL;
+       unsigned long long val;
+       unsigned int item_size, n_items, i;
+
+       if (is_array) {
+               list = PyList_New(field->arraylen);
+               item_size = field->size / field->arraylen;
+               n_items = field->arraylen;
+       } else {
+               item_size = field->size;
+               n_items = 1;
+       }
+
+       for (i = 0; i < n_items; i++) {
+
+               val = read_size(event, data + field->offset + i * item_size,
+                               item_size);
+               if (field->flags & FIELD_IS_SIGNED) {
+                       if ((long long)val >= LONG_MIN &&
+                                       (long long)val <= LONG_MAX)
+                               obj = PyInt_FromLong(val);
+                       else
+                               obj = PyLong_FromLongLong(val);
+               } else {
+                       if (val <= LONG_MAX)
+                               obj = PyInt_FromLong(val);
+                       else
+                               obj = PyLong_FromUnsignedLongLong(val);
+               }
+               if (is_array)
+                       PyList_SET_ITEM(list, i, obj);
+       }
+       if (is_array)
+               obj = list;
+       return obj;
+}
+
+
+static PyObject *python_process_callchain(struct perf_sample *sample,
+                                        struct perf_evsel *evsel,
+                                        struct addr_location *al)
+{
+       PyObject *pylist;
+
+       pylist = PyList_New(0);
+       if (!pylist)
+               Py_FatalError("couldn't create Python list");
+
+       if (!symbol_conf.use_callchain || !sample->callchain)
+               goto exit;
+
+       if (machine__resolve_callchain(al->machine, evsel, al->thread,
+                                          sample, NULL, NULL,
+                                          PERF_MAX_STACK_DEPTH) != 0) {
+               pr_err("Failed to resolve callchain. Skipping\n");
+               goto exit;
+       }
+       callchain_cursor_commit(&callchain_cursor);
+
+
+       while (1) {
+               PyObject *pyelem;
+               struct callchain_cursor_node *node;
+               node = callchain_cursor_current(&callchain_cursor);
+               if (!node)
+                       break;
+
+               pyelem = PyDict_New();
+               if (!pyelem)
+                       Py_FatalError("couldn't create Python dictionary");
+
+
+               pydict_set_item_string_decref(pyelem, "ip",
+                               PyLong_FromUnsignedLongLong(node->ip));
+
+               if (node->sym) {
+                       PyObject *pysym  = PyDict_New();
+                       if (!pysym)
+                               Py_FatalError("couldn't create Python dictionary");
+                       pydict_set_item_string_decref(pysym, "start",
+                                       PyLong_FromUnsignedLongLong(node->sym->start));
+                       pydict_set_item_string_decref(pysym, "end",
+                                       PyLong_FromUnsignedLongLong(node->sym->end));
+                       pydict_set_item_string_decref(pysym, "binding",
+                                       PyInt_FromLong(node->sym->binding));
+                       pydict_set_item_string_decref(pysym, "name",
+                                       PyString_FromStringAndSize(node->sym->name,
+                                                       node->sym->namelen));
+                       pydict_set_item_string_decref(pyelem, "sym", pysym);
+               }
+
+               if (node->map) {
+                       struct map *map = node->map;
+                       const char *dsoname = "[unknown]";
+                       if (map && map->dso && (map->dso->name || map->dso->long_name)) {
+                               if (symbol_conf.show_kernel_path && map->dso->long_name)
+                                       dsoname = map->dso->long_name;
+                               else if (map->dso->name)
+                                       dsoname = map->dso->name;
+                       }
+                       pydict_set_item_string_decref(pyelem, "dso",
+                                       PyString_FromString(dsoname));
+               }
+
+               callchain_cursor_advance(&callchain_cursor);
+               PyList_Append(pylist, pyelem);
+               Py_DECREF(pyelem);
+       }
+
+exit:
+       return pylist;
+}
+
+
 static void python_process_tracepoint(struct perf_sample *sample,
                                      struct perf_evsel *evsel,
                                      struct thread *thread,
                                      struct addr_location *al)
 {
-       PyObject *handler, *retval, *context, *t, *obj, *dict = NULL;
+       PyObject *handler, *retval, *context, *t, *obj, *callchain;
+       PyObject *dict = NULL;
        static char handler_name[256];
        struct format_field *field;
-       unsigned long long val;
        unsigned long s, ns;
        struct event_format *event;
        unsigned n = 0;
@@ -279,18 +406,23 @@ static void python_process_tracepoint(struct perf_sample *sample,
        PyTuple_SetItem(t, n++, PyString_FromString(handler_name));
        PyTuple_SetItem(t, n++, context);
 
+       /* ip unwinding */
+       callchain = python_process_callchain(sample, evsel, al);
+
        if (handler) {
                PyTuple_SetItem(t, n++, PyInt_FromLong(cpu));
                PyTuple_SetItem(t, n++, PyInt_FromLong(s));
                PyTuple_SetItem(t, n++, PyInt_FromLong(ns));
                PyTuple_SetItem(t, n++, PyInt_FromLong(pid));
                PyTuple_SetItem(t, n++, PyString_FromString(comm));
+               PyTuple_SetItem(t, n++, callchain);
        } else {
                pydict_set_item_string_decref(dict, "common_cpu", PyInt_FromLong(cpu));
                pydict_set_item_string_decref(dict, "common_s", PyInt_FromLong(s));
                pydict_set_item_string_decref(dict, "common_ns", PyInt_FromLong(ns));
                pydict_set_item_string_decref(dict, "common_pid", PyInt_FromLong(pid));
                pydict_set_item_string_decref(dict, "common_comm", PyString_FromString(comm));
+               pydict_set_item_string_decref(dict, "common_callchain", callchain);
        }
        for (field = event->format.fields; field; field = field->next) {
                if (field->flags & FIELD_IS_STRING) {
@@ -302,20 +434,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
                                offset = field->offset;
                        obj = PyString_FromString((char *)data + offset);
                } else { /* FIELD_IS_NUMERIC */
-                       val = read_size(event, data + field->offset,
-                                       field->size);
-                       if (field->flags & FIELD_IS_SIGNED) {
-                               if ((long long)val >= LONG_MIN &&
-                                   (long long)val <= LONG_MAX)
-                                       obj = PyInt_FromLong(val);
-                               else
-                                       obj = PyLong_FromLongLong(val);
-                       } else {
-                               if (val <= LONG_MAX)
-                                       obj = PyInt_FromLong(val);
-                               else
-                                       obj = PyLong_FromUnsignedLongLong(val);
-                       }
+                       obj = get_field_numeric_entry(event, field, data);
                }
                if (handler)
                        PyTuple_SetItem(t, n++, obj);
@@ -323,6 +442,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
                        pydict_set_item_string_decref(dict, field->name, obj);
 
        }
+
        if (!handler)
                PyTuple_SetItem(t, n++, dict);
 
@@ -333,6 +453,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
                retval = PyObject_CallObject(handler, t);
                if (retval == NULL)
                        handler_call_die(handler_name);
+               Py_DECREF(retval);
        } else {
                handler = PyDict_GetItemString(main_dict, "trace_unhandled");
                if (handler && PyCallable_Check(handler)) {
@@ -340,6 +461,7 @@ static void python_process_tracepoint(struct perf_sample *sample,
                        retval = PyObject_CallObject(handler, t);
                        if (retval == NULL)
                                handler_call_die("trace_unhandled");
+                       Py_DECREF(retval);
                }
                Py_DECREF(dict);
        }
@@ -352,7 +474,7 @@ static void python_process_general_event(struct perf_sample *sample,
                                         struct thread *thread,
                                         struct addr_location *al)
 {
-       PyObject *handler, *retval, *t, *dict;
+       PyObject *handler, *retval, *t, *dict, *callchain, *dict_sample;
        static char handler_name[64];
        unsigned n = 0;
 
@@ -368,6 +490,10 @@ static void python_process_general_event(struct perf_sample *sample,
        if (!dict)
                Py_FatalError("couldn't create Python dictionary");
 
+       dict_sample = PyDict_New();
+       if (!dict_sample)
+               Py_FatalError("couldn't create Python dictionary");
+
        snprintf(handler_name, sizeof(handler_name), "%s", "process_event");
 
        handler = PyDict_GetItemString(main_dict, handler_name);
@@ -377,8 +503,21 @@ static void python_process_general_event(struct perf_sample *sample,
        pydict_set_item_string_decref(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel)));
        pydict_set_item_string_decref(dict, "attr", PyString_FromStringAndSize(
                        (const char *)&evsel->attr, sizeof(evsel->attr)));
-       pydict_set_item_string_decref(dict, "sample", PyString_FromStringAndSize(
-                       (const char *)sample, sizeof(*sample)));
+
+       pydict_set_item_string_decref(dict_sample, "pid",
+                       PyInt_FromLong(sample->pid));
+       pydict_set_item_string_decref(dict_sample, "tid",
+                       PyInt_FromLong(sample->tid));
+       pydict_set_item_string_decref(dict_sample, "cpu",
+                       PyInt_FromLong(sample->cpu));
+       pydict_set_item_string_decref(dict_sample, "ip",
+                       PyLong_FromUnsignedLongLong(sample->ip));
+       pydict_set_item_string_decref(dict_sample, "time",
+                       PyLong_FromUnsignedLongLong(sample->time));
+       pydict_set_item_string_decref(dict_sample, "period",
+                       PyLong_FromUnsignedLongLong(sample->period));
+       pydict_set_item_string_decref(dict, "sample", dict_sample);
+
        pydict_set_item_string_decref(dict, "raw_buf", PyString_FromStringAndSize(
                        (const char *)sample->raw_data, sample->raw_size));
        pydict_set_item_string_decref(dict, "comm",
@@ -392,6 +531,10 @@ static void python_process_general_event(struct perf_sample *sample,
                        PyString_FromString(al->sym->name));
        }
 
+       /* ip unwinding */
+       callchain = python_process_callchain(sample, evsel, al);
+       pydict_set_item_string_decref(dict, "callchain", callchain);
+
        PyTuple_SetItem(t, n++, dict);
        if (_PyTuple_Resize(&t, n) == -1)
                Py_FatalError("error resizing Python tuple");
@@ -399,6 +542,7 @@ static void python_process_general_event(struct perf_sample *sample,
        retval = PyObject_CallObject(handler, t);
        if (retval == NULL)
                handler_call_die(handler_name);
+       Py_DECREF(retval);
 exit:
        Py_DECREF(dict);
        Py_DECREF(t);
@@ -520,8 +664,7 @@ static int python_stop_script(void)
        retval = PyObject_CallObject(handler, NULL);
        if (retval == NULL)
                handler_call_die("trace_end");
-       else
-               Py_DECREF(retval);
+       Py_DECREF(retval);
 out:
        Py_XDECREF(main_dict);
        Py_XDECREF(main_module);
@@ -588,6 +731,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
                fprintf(ofp, "common_nsecs, ");
                fprintf(ofp, "common_pid, ");
                fprintf(ofp, "common_comm,\n\t");
+               fprintf(ofp, "common_callchain, ");
 
                not_first = 0;
                count = 0;
@@ -622,6 +766,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
                        fprintf(ofp, "%s=", f->name);
                        if (f->flags & FIELD_IS_STRING ||
                            f->flags & FIELD_IS_FLAG ||
+                           f->flags & FIELD_IS_ARRAY ||
                            f->flags & FIELD_IS_SYMBOLIC)
                                fprintf(ofp, "%%s");
                        else if (f->flags & FIELD_IS_SIGNED)
@@ -630,7 +775,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
                                fprintf(ofp, "%%u");
                }
 
-               fprintf(ofp, "\\n\" %% \\\n\t\t(");
+               fprintf(ofp, "\" %% \\\n\t\t(");
 
                not_first = 0;
                count = 0;
@@ -666,7 +811,15 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
                                fprintf(ofp, "%s", f->name);
                }
 
-               fprintf(ofp, "),\n\n");
+               fprintf(ofp, ")\n\n");
+
+               fprintf(ofp, "\t\tfor node in common_callchain:");
+               fprintf(ofp, "\n\t\t\tif 'sym' in node:");
+               fprintf(ofp, "\n\t\t\t\tprint \"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name'])");
+               fprintf(ofp, "\n\t\t\telse:");
+               fprintf(ofp, "\n\t\t\t\tprint \"\t[%%x]\" %% (node['ip'])\n\n");
+               fprintf(ofp, "\t\tprint \"\\n\"\n\n");
+
        }
 
        fprintf(ofp, "def trace_unhandled(event_name, context, "
index 64a186e..88dfef7 100644 (file)
@@ -14,7 +14,6 @@
 #include "util.h"
 #include "cpumap.h"
 #include "perf_regs.h"
-#include "vdso.h"
 
 static int perf_session__open(struct perf_session *session)
 {
@@ -156,7 +155,6 @@ void perf_session__delete(struct perf_session *session)
        if (session->file)
                perf_data_file__close(session->file);
        free(session);
-       vdso__exit();
 }
 
 static int process_event_synth_tracing_data_stub(struct perf_tool *tool
@@ -511,6 +509,7 @@ static int flush_sample_queue(struct perf_session *s,
                os->last_flush = iter->timestamp;
                list_del(&iter->list);
                list_add(&iter->list, &os->sample_cache);
+               os->nr_samples--;
 
                if (show_progress)
                        ui_progress__update(&prog, 1);
@@ -523,8 +522,6 @@ static int flush_sample_queue(struct perf_session *s,
                        list_entry(head->prev, struct sample_queue, list);
        }
 
-       os->nr_samples = 0;
-
        return 0;
 }
 
@@ -994,8 +991,10 @@ static int perf_session_deliver_event(struct perf_session *session,
        }
 }
 
-static int perf_session__process_user_event(struct perf_session *session, union perf_event *event,
-                                           struct perf_tool *tool, u64 file_offset)
+static s64 perf_session__process_user_event(struct perf_session *session,
+                                           union perf_event *event,
+                                           struct perf_tool *tool,
+                                           u64 file_offset)
 {
        int fd = perf_data_file__fd(session->file);
        int err;
@@ -1037,7 +1036,7 @@ static void event_swap(union perf_event *event, bool sample_id_all)
                swap(event, sample_id_all);
 }
 
-static int perf_session__process_event(struct perf_session *session,
+static s64 perf_session__process_event(struct perf_session *session,
                                       union perf_event *event,
                                       struct perf_tool *tool,
                                       u64 file_offset)
@@ -1083,13 +1082,14 @@ void perf_event_header__bswap(struct perf_event_header *hdr)
 
 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
 {
-       return machine__findnew_thread(&session->machines.host, 0, pid);
+       return machine__findnew_thread(&session->machines.host, -1, pid);
 }
 
 static struct thread *perf_session__register_idle_thread(struct perf_session *session)
 {
-       struct thread *thread = perf_session__findnew(session, 0);
+       struct thread *thread;
 
+       thread = machine__findnew_thread(&session->machines.host, 0, 0);
        if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
                pr_err("problem inserting idle task.\n");
                thread = NULL;
@@ -1147,7 +1147,7 @@ static int __perf_session__process_pipe_events(struct perf_session *session,
        union perf_event *event;
        uint32_t size, cur_size = 0;
        void *buf = NULL;
-       int skip = 0;
+       s64 skip = 0;
        u64 head;
        ssize_t err;
        void *p;
@@ -1276,13 +1276,13 @@ int __perf_session__process_events(struct perf_session *session,
                                   u64 file_size, struct perf_tool *tool)
 {
        int fd = perf_data_file__fd(session->file);
-       u64 head, page_offset, file_offset, file_pos;
+       u64 head, page_offset, file_offset, file_pos, size;
        int err, mmap_prot, mmap_flags, map_idx = 0;
        size_t  mmap_size;
        char *buf, *mmaps[NUM_MMAPS];
        union perf_event *event;
-       uint32_t size;
        struct ui_progress prog;
+       s64 skip;
 
        perf_tool__fill_defaults(tool);
 
@@ -1296,8 +1296,10 @@ int __perf_session__process_events(struct perf_session *session,
        ui_progress__init(&prog, file_size, "Processing events...");
 
        mmap_size = MMAP_SIZE;
-       if (mmap_size > file_size)
+       if (mmap_size > file_size) {
                mmap_size = file_size;
+               session->one_mmap = true;
+       }
 
        memset(mmaps, 0, sizeof(mmaps));
 
@@ -1319,6 +1321,10 @@ remap:
        mmaps[map_idx] = buf;
        map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
        file_pos = file_offset + head;
+       if (session->one_mmap) {
+               session->one_mmap_addr = buf;
+               session->one_mmap_offset = file_offset;
+       }
 
 more:
        event = fetch_mmaped_event(session, head, mmap_size, buf);
@@ -1337,7 +1343,8 @@ more:
        size = event->header.size;
 
        if (size < sizeof(struct perf_event_header) ||
-           perf_session__process_event(session, event, tool, file_pos) < 0) {
+           (skip = perf_session__process_event(session, event, tool, file_pos))
+                                                                       < 0) {
                pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
                       file_offset + head, event->header.size,
                       event->header.type);
@@ -1345,6 +1352,9 @@ more:
                goto out_err;
        }
 
+       if (skip)
+               size += skip;
+
        head += size;
        file_pos += size;
 
@@ -1364,6 +1374,7 @@ out_err:
        ui_progress__finish();
        perf_session__warn_about_errors(session, tool);
        perf_session_free_sample_buffers(session);
+       session->one_mmap = false;
        return err;
 }
 
index 3140f8a..0321013 100644 (file)
@@ -36,6 +36,9 @@ struct perf_session {
        struct trace_event      tevent;
        struct events_stats     stats;
        bool                    repipe;
+       bool                    one_mmap;
+       void                    *one_mmap_addr;
+       u64                     one_mmap_offset;
        struct ordered_samples  ordered_samples;
        struct perf_data_file   *file;
 };
index 45512ba..14e5a03 100644 (file)
@@ -1,3 +1,4 @@
+#include <sys/mman.h>
 #include "sort.h"
 #include "hist.h"
 #include "comm.h"
@@ -784,6 +785,104 @@ static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
        return repsep_snprintf(bf, size, "%-*s", width, out);
 }
 
+static inline  u64 cl_address(u64 address)
+{
+       /* return the cacheline of the address */
+       return (address & ~(cacheline_size - 1));
+}
+
+static int64_t
+sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+       u64 l, r;
+       struct map *l_map, *r_map;
+
+       if (!left->mem_info)  return -1;
+       if (!right->mem_info) return 1;
+
+       /* group event types together */
+       if (left->cpumode > right->cpumode) return -1;
+       if (left->cpumode < right->cpumode) return 1;
+
+       l_map = left->mem_info->daddr.map;
+       r_map = right->mem_info->daddr.map;
+
+       /* if both are NULL, jump to sort on al_addr instead */
+       if (!l_map && !r_map)
+               goto addr;
+
+       if (!l_map) return -1;
+       if (!r_map) return 1;
+
+       if (l_map->maj > r_map->maj) return -1;
+       if (l_map->maj < r_map->maj) return 1;
+
+       if (l_map->min > r_map->min) return -1;
+       if (l_map->min < r_map->min) return 1;
+
+       if (l_map->ino > r_map->ino) return -1;
+       if (l_map->ino < r_map->ino) return 1;
+
+       if (l_map->ino_generation > r_map->ino_generation) return -1;
+       if (l_map->ino_generation < r_map->ino_generation) return 1;
+
+       /*
+        * Addresses with no major/minor numbers are assumed to be
+        * anonymous in userspace.  Sort those on pid then address.
+        *
+        * The kernel and non-zero major/minor mapped areas are
+        * assumed to be unity mapped.  Sort those on address.
+        */
+
+       if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
+           (!(l_map->flags & MAP_SHARED)) &&
+           !l_map->maj && !l_map->min && !l_map->ino &&
+           !l_map->ino_generation) {
+               /* userspace anonymous */
+
+               if (left->thread->pid_ > right->thread->pid_) return -1;
+               if (left->thread->pid_ < right->thread->pid_) return 1;
+       }
+
+addr:
+       /* al_addr does all the right addr - start + offset calculations */
+       l = cl_address(left->mem_info->daddr.al_addr);
+       r = cl_address(right->mem_info->daddr.al_addr);
+
+       if (l > r) return -1;
+       if (l < r) return 1;
+
+       return 0;
+}
+
+static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
+                                         size_t size, unsigned int width)
+{
+
+       uint64_t addr = 0;
+       struct map *map = NULL;
+       struct symbol *sym = NULL;
+       char level = he->level;
+
+       if (he->mem_info) {
+               addr = cl_address(he->mem_info->daddr.al_addr);
+               map = he->mem_info->daddr.map;
+               sym = he->mem_info->daddr.sym;
+
+               /* print [s] for shared data mmaps */
+               if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
+                    map && (map->type == MAP__VARIABLE) &&
+                   (map->flags & MAP_SHARED) &&
+                   (map->maj || map->min || map->ino ||
+                    map->ino_generation))
+                       level = 's';
+               else if (!map)
+                       level = 'X';
+       }
+       return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
+                                        width);
+}
+
 struct sort_entry sort_mispredict = {
        .se_header      = "Branch Mispredicted",
        .se_cmp         = sort__mispredict_cmp,
@@ -876,6 +975,13 @@ struct sort_entry sort_mem_snoop = {
        .se_width_idx   = HISTC_MEM_SNOOP,
 };
 
+struct sort_entry sort_mem_dcacheline = {
+       .se_header      = "Data Cacheline",
+       .se_cmp         = sort__dcacheline_cmp,
+       .se_snprintf    = hist_entry__dcacheline_snprintf,
+       .se_width_idx   = HISTC_MEM_DCACHELINE,
+};
+
 static int64_t
 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
 {
@@ -1043,6 +1149,7 @@ static struct sort_dimension memory_sort_dimensions[] = {
        DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
        DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
        DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
+       DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
 };
 
 #undef DIM
@@ -1108,7 +1215,7 @@ static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
        hse = container_of(fmt, struct hpp_sort_entry, hpp);
        len = hists__col_len(&evsel->hists, hse->se->se_width_idx);
 
-       return scnprintf(hpp->buf, hpp->size, "%*s", len, hse->se->se_header);
+       return scnprintf(hpp->buf, hpp->size, "%-*s", len, hse->se->se_header);
 }
 
 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
index 5bf0098..041f0c9 100644 (file)
@@ -89,6 +89,7 @@ struct hist_entry {
        u64                     ip;
        u64                     transaction;
        s32                     cpu;
+       u8                      cpumode;
 
        struct hist_entry_diff  diff;
 
@@ -185,6 +186,7 @@ enum sort_type {
        SORT_MEM_TLB,
        SORT_MEM_LVL,
        SORT_MEM_SNOOP,
+       SORT_MEM_DCACHELINE,
 };
 
 /*
index 6a0a13d..283d3e7 100644 (file)
@@ -30,6 +30,7 @@ static u64 turbo_frequency, max_freq;
 
 #define SLOT_MULT 30.0
 #define SLOT_HEIGHT 25.0
+#define SLOT_HALF (SLOT_HEIGHT / 2)
 
 int svg_page_width = 1000;
 u64 svg_highlight;
@@ -114,8 +115,14 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end)
        fprintf(svgfile, "      rect          { stroke-width: 1; }\n");
        fprintf(svgfile, "      rect.process  { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:1;   stroke:rgb(  0,  0,  0); } \n");
        fprintf(svgfile, "      rect.process2 { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
+       fprintf(svgfile, "      rect.process3 { fill:rgb(180,180,180); fill-opacity:0.5; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
        fprintf(svgfile, "      rect.sample   { fill:rgb(  0,  0,255); fill-opacity:0.8; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
        fprintf(svgfile, "      rect.sample_hi{ fill:rgb(255,128,  0); fill-opacity:0.8; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
+       fprintf(svgfile, "      rect.error    { fill:rgb(255,  0,  0); fill-opacity:0.5; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
+       fprintf(svgfile, "      rect.net      { fill:rgb(  0,128,  0); fill-opacity:0.5; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
+       fprintf(svgfile, "      rect.disk     { fill:rgb(  0,  0,255); fill-opacity:0.5; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
+       fprintf(svgfile, "      rect.sync     { fill:rgb(128,128,  0); fill-opacity:0.5; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
+       fprintf(svgfile, "      rect.poll     { fill:rgb(  0,128,128); fill-opacity:0.2; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
        fprintf(svgfile, "      rect.blocked  { fill:rgb(255,  0,  0); fill-opacity:0.5; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
        fprintf(svgfile, "      rect.waiting  { fill:rgb(224,214,  0); fill-opacity:0.8; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
        fprintf(svgfile, "      rect.WAITING  { fill:rgb(255,214, 48); fill-opacity:0.6; stroke-width:0;   stroke:rgb(  0,  0,  0); } \n");
@@ -132,12 +139,81 @@ void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end)
        fprintf(svgfile, "    ]]>\n   </style>\n</defs>\n");
 }
 
+static double normalize_height(double height)
+{
+       if (height < 0.25)
+               return 0.25;
+       else if (height < 0.50)
+               return 0.50;
+       else if (height < 0.75)
+               return 0.75;
+       else
+               return 0.100;
+}
+
+void svg_ubox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges)
+{
+       double w = time2pixels(end) - time2pixels(start);
+       height = normalize_height(height);
+
+       if (!svgfile)
+               return;
+
+       fprintf(svgfile, "<g>\n");
+       fprintf(svgfile, "<title>fd=%d error=%d merges=%d</title>\n", fd, err, merges);
+       fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
+               time2pixels(start),
+               w,
+               Yslot * SLOT_MULT,
+               SLOT_HALF * height,
+               type);
+       fprintf(svgfile, "</g>\n");
+}
+
+void svg_lbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges)
+{
+       double w = time2pixels(end) - time2pixels(start);
+       height = normalize_height(height);
+
+       if (!svgfile)
+               return;
+
+       fprintf(svgfile, "<g>\n");
+       fprintf(svgfile, "<title>fd=%d error=%d merges=%d</title>\n", fd, err, merges);
+       fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
+               time2pixels(start),
+               w,
+               Yslot * SLOT_MULT + SLOT_HEIGHT - SLOT_HALF * height,
+               SLOT_HALF * height,
+               type);
+       fprintf(svgfile, "</g>\n");
+}
+
+void svg_fbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges)
+{
+       double w = time2pixels(end) - time2pixels(start);
+       height = normalize_height(height);
+
+       if (!svgfile)
+               return;
+
+       fprintf(svgfile, "<g>\n");
+       fprintf(svgfile, "<title>fd=%d error=%d merges=%d</title>\n", fd, err, merges);
+       fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
+               time2pixels(start),
+               w,
+               Yslot * SLOT_MULT + SLOT_HEIGHT - SLOT_HEIGHT * height,
+               SLOT_HEIGHT * height,
+               type);
+       fprintf(svgfile, "</g>\n");
+}
+
 void svg_box(int Yslot, u64 start, u64 end, const char *type)
 {
        if (!svgfile)
                return;
 
-       fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"%s\"/>\n",
+       fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
                time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT, type);
 }
 
@@ -174,7 +250,7 @@ void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
                cpu, time_to_string(end - start));
        if (backtrace)
                fprintf(svgfile, "<desc>Switched because:\n%s</desc>\n", backtrace);
-       fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"%s\"/>\n",
+       fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"%s\"/>\n",
                time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT,
                type);
 
@@ -186,7 +262,7 @@ void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
        text_size = round_text_size(text_size);
 
        if (text_size > MIN_TEXT_SIZE)
-               fprintf(svgfile, "<text x=\"%1.8f\" y=\"%1.8f\" font-size=\"%1.8fpt\">%i</text>\n",
+               fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\" font-size=\"%.8fpt\">%i</text>\n",
                        time2pixels(start), Yslot *  SLOT_MULT + SLOT_HEIGHT - 1, text_size,  cpu + 1);
 
        fprintf(svgfile, "</g>\n");
@@ -202,10 +278,10 @@ static char *time_to_string(u64 duration)
                return text;
 
        if (duration < 1000 * 1000) { /* less than 1 msec */
-               sprintf(text, "%4.1f us", duration / 1000.0);
+               sprintf(text, "%.1f us", duration / 1000.0);
                return text;
        }
-       sprintf(text, "%4.1f ms", duration / 1000.0 / 1000);
+       sprintf(text, "%.1f ms", duration / 1000.0 / 1000);
 
        return text;
 }
@@ -233,14 +309,14 @@ void svg_waiting(int Yslot, int cpu, u64 start, u64 end, const char *backtrace)
 
        font_size = round_text_size(font_size);
 
-       fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), Yslot * SLOT_MULT);
+       fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\">\n", time2pixels(start), Yslot * SLOT_MULT);
        fprintf(svgfile, "<title>#%d waiting %s</title>\n", cpu, time_to_string(end - start));
        if (backtrace)
                fprintf(svgfile, "<desc>Waiting on:\n%s</desc>\n", backtrace);
-       fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
+       fprintf(svgfile, "<rect x=\"0\" width=\"%.8f\" y=\"0\" height=\"%.1f\" class=\"%s\"/>\n",
                time2pixels(end)-time2pixels(start), SLOT_HEIGHT, style);
        if (font_size > MIN_TEXT_SIZE)
-               fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%1.8fpt\"> %s</text>\n",
+               fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%.8fpt\"> %s</text>\n",
                        font_size, text);
        fprintf(svgfile, "</g>\n");
 }
@@ -289,16 +365,16 @@ void svg_cpu_box(int cpu, u64 __max_freq, u64 __turbo_freq)
 
        fprintf(svgfile, "<g>\n");
 
-       fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"cpu\"/>\n",
+       fprintf(svgfile, "<rect x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\" class=\"cpu\"/>\n",
                time2pixels(first_time),
                time2pixels(last_time)-time2pixels(first_time),
                cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
 
        sprintf(cpu_string, "CPU %i", (int)cpu);
-       fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\">%s</text>\n",
+       fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\">%s</text>\n",
                10+time2pixels(first_time), cpu2y(cpu) + SLOT_HEIGHT/2, cpu_string);
 
-       fprintf(svgfile, "<text transform=\"translate(%4.8f,%4.8f)\" font-size=\"1.25pt\">%s</text>\n",
+       fprintf(svgfile, "<text transform=\"translate(%.8f,%.8f)\" font-size=\"1.25pt\">%s</text>\n",
                10+time2pixels(first_time), cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - 4, cpu_model());
 
        fprintf(svgfile, "</g>\n");
@@ -319,11 +395,11 @@ void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const c
        else
                type = "sample";
 
-       fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), cpu2y(cpu));
+       fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\">\n", time2pixels(start), cpu2y(cpu));
        fprintf(svgfile, "<title>%d %s running %s</title>\n", pid, name, time_to_string(end - start));
        if (backtrace)
                fprintf(svgfile, "<desc>Switched because:\n%s</desc>\n", backtrace);
-       fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
+       fprintf(svgfile, "<rect x=\"0\" width=\"%.8f\" y=\"0\" height=\"%.1f\" class=\"%s\"/>\n",
                time2pixels(end)-time2pixels(start), SLOT_MULT+SLOT_HEIGHT, type);
        width = time2pixels(end)-time2pixels(start);
        if (width > 6)
@@ -332,7 +408,7 @@ void svg_process(int cpu, u64 start, u64 end, int pid, const char *name, const c
        width = round_text_size(width);
 
        if (width > MIN_TEXT_SIZE)
-               fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%3.8fpt\">%s</text>\n",
+               fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%.8fpt\">%s</text>\n",
                        width, name);
 
        fprintf(svgfile, "</g>\n");
@@ -353,7 +429,7 @@ void svg_cstate(int cpu, u64 start, u64 end, int type)
                type = 6;
        sprintf(style, "c%i", type);
 
-       fprintf(svgfile, "<rect class=\"%s\" x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\"/>\n",
+       fprintf(svgfile, "<rect class=\"%s\" x=\"%.8f\" width=\"%.8f\" y=\"%.1f\" height=\"%.1f\"/>\n",
                style,
                time2pixels(start), time2pixels(end)-time2pixels(start),
                cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
@@ -365,7 +441,7 @@ void svg_cstate(int cpu, u64 start, u64 end, int type)
        width = round_text_size(width);
 
        if (width > MIN_TEXT_SIZE)
-               fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"%3.8fpt\">C%i</text>\n",
+               fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\" font-size=\"%.8fpt\">C%i</text>\n",
                        time2pixels(start), cpu2y(cpu)+width, width, type);
 
        fprintf(svgfile, "</g>\n");
@@ -407,9 +483,9 @@ void svg_pstate(int cpu, u64 start, u64 end, u64 freq)
        if (max_freq)
                height = freq * 1.0 / max_freq * (SLOT_HEIGHT + SLOT_MULT);
        height = 1 + cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - height;
-       fprintf(svgfile, "<line x1=\"%4.8f\" x2=\"%4.8f\" y1=\"%4.1f\" y2=\"%4.1f\" class=\"pstate\"/>\n",
+       fprintf(svgfile, "<line x1=\"%.8f\" x2=\"%.8f\" y1=\"%.1f\" y2=\"%.1f\" class=\"pstate\"/>\n",
                time2pixels(start), time2pixels(end), height, height);
-       fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"0.25pt\">%s</text>\n",
+       fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\" font-size=\"0.25pt\">%s</text>\n",
                time2pixels(start), height+0.9, HzToHuman(freq));
 
        fprintf(svgfile, "</g>\n");
@@ -435,32 +511,32 @@ void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc
 
        if (row1 < row2) {
                if (row1) {
-                       fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+                       fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
                                time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT,  time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32);
                        if (desc2)
-                               fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &gt;</text></g>\n",
+                               fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &gt;</text></g>\n",
                                        time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_HEIGHT/48, desc2);
                }
                if (row2) {
-                       fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+                       fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
                                time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32,  time2pixels(start), row2 * SLOT_MULT);
                        if (desc1)
-                               fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &gt;</text></g>\n",
+                               fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &gt;</text></g>\n",
                                        time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32, desc1);
                }
        } else {
                if (row2) {
-                       fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+                       fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
                                time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT,  time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32);
                        if (desc1)
-                               fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &lt;</text></g>\n",
+                               fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &lt;</text></g>\n",
                                        time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/48, desc1);
                }
                if (row1) {
-                       fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+                       fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
                                time2pixels(start), row1 * SLOT_MULT - SLOT_MULT/32,  time2pixels(start), row1 * SLOT_MULT);
                        if (desc2)
-                               fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &lt;</text></g>\n",
+                               fprintf(svgfile, "<g transform=\"translate(%.8f,%.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &lt;</text></g>\n",
                                        time2pixels(start), row1 * SLOT_MULT - SLOT_HEIGHT/32, desc2);
                }
        }
@@ -468,7 +544,7 @@ void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc
        if (row2 > row1)
                height += SLOT_HEIGHT;
        if (row1)
-               fprintf(svgfile, "<circle  cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\"  style=\"fill:rgb(32,255,32)\"/>\n",
+               fprintf(svgfile, "<circle  cx=\"%.8f\" cy=\"%.2f\" r = \"0.01\"  style=\"fill:rgb(32,255,32)\"/>\n",
                        time2pixels(start), height);
 
        fprintf(svgfile, "</g>\n");
@@ -488,16 +564,16 @@ void svg_wakeline(u64 start, int row1, int row2, const char *backtrace)
                fprintf(svgfile, "<desc>%s</desc>\n", backtrace);
 
        if (row1 < row2)
-               fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+               fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
                        time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT,  time2pixels(start), row2 * SLOT_MULT);
        else
-               fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
+               fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
                        time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT,  time2pixels(start), row1 * SLOT_MULT);
 
        height = row1 * SLOT_MULT;
        if (row2 > row1)
                height += SLOT_HEIGHT;
-       fprintf(svgfile, "<circle  cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\"  style=\"fill:rgb(32,255,32)\"/>\n",
+       fprintf(svgfile, "<circle  cx=\"%.8f\" cy=\"%.2f\" r = \"0.01\"  style=\"fill:rgb(32,255,32)\"/>\n",
                        time2pixels(start), height);
 
        fprintf(svgfile, "</g>\n");
@@ -515,9 +591,9 @@ void svg_interrupt(u64 start, int row, const char *backtrace)
        if (backtrace)
                fprintf(svgfile, "<desc>%s</desc>\n", backtrace);
 
-       fprintf(svgfile, "<circle  cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\"  style=\"fill:rgb(255,128,128)\"/>\n",
+       fprintf(svgfile, "<circle  cx=\"%.8f\" cy=\"%.2f\" r = \"0.01\"  style=\"fill:rgb(255,128,128)\"/>\n",
                        time2pixels(start), row * SLOT_MULT);
-       fprintf(svgfile, "<circle  cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\"  style=\"fill:rgb(255,128,128)\"/>\n",
+       fprintf(svgfile, "<circle  cx=\"%.8f\" cy=\"%.2f\" r = \"0.01\"  style=\"fill:rgb(255,128,128)\"/>\n",
                        time2pixels(start), row * SLOT_MULT + SLOT_HEIGHT);
 
        fprintf(svgfile, "</g>\n");
@@ -528,7 +604,7 @@ void svg_text(int Yslot, u64 start, const char *text)
        if (!svgfile)
                return;
 
-       fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\">%s</text>\n",
+       fprintf(svgfile, "<text x=\"%.8f\" y=\"%.8f\">%s</text>\n",
                time2pixels(start), Yslot * SLOT_MULT+SLOT_HEIGHT/2, text);
 }
 
@@ -537,12 +613,26 @@ static void svg_legenda_box(int X, const char *text, const char *style)
        double boxsize;
        boxsize = SLOT_HEIGHT / 2;
 
-       fprintf(svgfile, "<rect x=\"%i\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
+       fprintf(svgfile, "<rect x=\"%i\" width=\"%.8f\" y=\"0\" height=\"%.1f\" class=\"%s\"/>\n",
                X, boxsize, boxsize, style);
-       fprintf(svgfile, "<text transform=\"translate(%4.8f, %4.8f)\" font-size=\"%4.8fpt\">%s</text>\n",
+       fprintf(svgfile, "<text transform=\"translate(%.8f, %.8f)\" font-size=\"%.8fpt\">%s</text>\n",
                X + boxsize + 5, boxsize, 0.8 * boxsize, text);
 }
 
+void svg_io_legenda(void)
+{
+       if (!svgfile)
+               return;
+
+       fprintf(svgfile, "<g>\n");
+       svg_legenda_box(0,      "Disk", "disk");
+       svg_legenda_box(100,    "Network", "net");
+       svg_legenda_box(200,    "Sync", "sync");
+       svg_legenda_box(300,    "Poll", "poll");
+       svg_legenda_box(400,    "Error", "error");
+       fprintf(svgfile, "</g>\n");
+}
+
 void svg_legenda(void)
 {
        if (!svgfile)
@@ -559,7 +649,7 @@ void svg_legenda(void)
        fprintf(svgfile, "</g>\n");
 }
 
-void svg_time_grid(void)
+void svg_time_grid(double min_thickness)
 {
        u64 i;
 
@@ -579,8 +669,10 @@ void svg_time_grid(void)
                        color = 128;
                }
 
-               fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%" PRIu64 "\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
-                       time2pixels(i), SLOT_MULT/2, time2pixels(i), total_height, color, color, color, thickness);
+               if (thickness >= min_thickness)
+                       fprintf(svgfile, "<line x1=\"%.8f\" y1=\"%.2f\" x2=\"%.8f\" y2=\"%" PRIu64 "\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%.3f\"/>\n",
+                               time2pixels(i), SLOT_MULT/2, time2pixels(i),
+                               total_height, color, color, color, thickness);
 
                i += 10000000;
        }
index e3aff53..9292a52 100644 (file)
@@ -4,6 +4,9 @@
 #include <linux/types.h>
 
 extern void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end);
+extern void svg_ubox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges);
+extern void svg_lbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges);
+extern void svg_fbox(int Yslot, u64 start, u64 end, double height, const char *type, int fd, int err, int merges);
 extern void svg_box(int Yslot, u64 start, u64 end, const char *type);
 extern void svg_blocked(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
 extern void svg_running(int Yslot, int cpu, u64 start, u64 end, const char *backtrace);
@@ -16,7 +19,8 @@ extern void svg_cstate(int cpu, u64 start, u64 end, int type);
 extern void svg_pstate(int cpu, u64 start, u64 end, u64 freq);
 
 
-extern void svg_time_grid(void);
+extern void svg_time_grid(double min_thickness);
+extern void svg_io_legenda(void);
 extern void svg_legenda(void);
 extern void svg_wakeline(u64 start, int row1, int row2, const char *backtrace);
 extern void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2, const char *backtrace);
index 6864661..d753499 100644 (file)
@@ -49,7 +49,8 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
 
 static inline int elf_sym__is_function(const GElf_Sym *sym)
 {
-       return elf_sym__type(sym) == STT_FUNC &&
+       return (elf_sym__type(sym) == STT_FUNC ||
+               elf_sym__type(sym) == STT_GNU_IFUNC) &&
               sym->st_name != 0 &&
               sym->st_shndx != SHN_UNDEF;
 }
@@ -598,6 +599,8 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
                        goto out_elf_end;
        }
 
+       ss->is_64_bit = (gelf_getclass(elf) == ELFCLASS64);
+
        ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab",
                        NULL);
        if (ss->symshdr.sh_type != SHT_SYMTAB)
@@ -619,7 +622,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
                GElf_Shdr shdr;
                ss->adjust_symbols = (ehdr.e_type == ET_EXEC ||
                                ehdr.e_type == ET_REL ||
-                               is_vdso_map(dso->short_name) ||
+                               dso__is_vdso(dso) ||
                                elf_section_by_name(elf, &ehdr, &shdr,
                                                     ".gnu.prelink_undo",
                                                     NULL) != NULL);
@@ -698,6 +701,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
        bool remap_kernel = false, adjust_kernel_syms = false;
 
        dso->symtab_type = syms_ss->type;
+       dso->is_64_bit = syms_ss->is_64_bit;
        dso->rel = syms_ss->ehdr.e_type == ET_REL;
 
        /*
@@ -1024,6 +1028,39 @@ int file__read_maps(int fd, bool exe, mapfn_t mapfn, void *data,
        return err;
 }
 
+enum dso_type dso__type_fd(int fd)
+{
+       enum dso_type dso_type = DSO__TYPE_UNKNOWN;
+       GElf_Ehdr ehdr;
+       Elf_Kind ek;
+       Elf *elf;
+
+       elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+       if (elf == NULL)
+               goto out;
+
+       ek = elf_kind(elf);
+       if (ek != ELF_K_ELF)
+               goto out_end;
+
+       if (gelf_getclass(elf) == ELFCLASS64) {
+               dso_type = DSO__TYPE_64BIT;
+               goto out_end;
+       }
+
+       if (gelf_getehdr(elf, &ehdr) == NULL)
+               goto out_end;
+
+       if (ehdr.e_machine == EM_X86_64)
+               dso_type = DSO__TYPE_X32BIT;
+       else
+               dso_type = DSO__TYPE_32BIT;
+out_end:
+       elf_end(elf);
+out:
+       return dso_type;
+}
+
 static int copy_bytes(int from, off_t from_offs, int to, off_t to_offs, u64 len)
 {
        ssize_t r;
index bd15f49..c9541fe 100644 (file)
@@ -288,6 +288,44 @@ int dso__synthesize_plt_symbols(struct dso *dso __maybe_unused,
        return 0;
 }
 
+static int fd__is_64_bit(int fd)
+{
+       u8 e_ident[EI_NIDENT];
+
+       if (lseek(fd, 0, SEEK_SET))
+               return -1;
+
+       if (readn(fd, e_ident, sizeof(e_ident)) != sizeof(e_ident))
+               return -1;
+
+       if (memcmp(e_ident, ELFMAG, SELFMAG) ||
+           e_ident[EI_VERSION] != EV_CURRENT)
+               return -1;
+
+       return e_ident[EI_CLASS] == ELFCLASS64;
+}
+
+enum dso_type dso__type_fd(int fd)
+{
+       Elf64_Ehdr ehdr;
+       int ret;
+
+       ret = fd__is_64_bit(fd);
+       if (ret < 0)
+               return DSO__TYPE_UNKNOWN;
+
+       if (ret)
+               return DSO__TYPE_64BIT;
+
+       if (readn(fd, &ehdr, sizeof(ehdr)) != sizeof(ehdr))
+               return DSO__TYPE_UNKNOWN;
+
+       if (ehdr.e_machine == EM_X86_64)
+               return DSO__TYPE_X32BIT;
+
+       return DSO__TYPE_32BIT;
+}
+
 int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
                  struct symsrc *ss,
                  struct symsrc *runtime_ss __maybe_unused,
@@ -295,6 +333,11 @@ int dso__load_sym(struct dso *dso, struct map *map __maybe_unused,
                  int kmodule __maybe_unused)
 {
        unsigned char *build_id[BUILD_ID_SIZE];
+       int ret;
+
+       ret = fd__is_64_bit(ss->fd);
+       if (ret >= 0)
+               dso->is_64_bit = ret;
 
        if (filename__read_build_id(ss->name, build_id, BUILD_ID_SIZE) > 0) {
                dso__set_build_id(dso, build_id);
index 7b9096f..eb06746 100644 (file)
@@ -34,6 +34,7 @@ struct symbol_conf symbol_conf = {
        .annotate_src           = true,
        .demangle               = true,
        .cumulate_callchain     = true,
+       .show_hist_headers      = true,
        .symfs                  = "",
 };
 
@@ -341,6 +342,16 @@ static struct symbol *symbols__first(struct rb_root *symbols)
        return NULL;
 }
 
+static struct symbol *symbols__next(struct symbol *sym)
+{
+       struct rb_node *n = rb_next(&sym->rb_node);
+
+       if (n)
+               return rb_entry(n, struct symbol, rb_node);
+
+       return NULL;
+}
+
 struct symbol_name_rb_node {
        struct rb_node  rb_node;
        struct symbol   sym;
@@ -411,11 +422,16 @@ struct symbol *dso__find_symbol(struct dso *dso,
        return symbols__find(&dso->symbols[type], addr);
 }
 
-static struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
+struct symbol *dso__first_symbol(struct dso *dso, enum map_type type)
 {
        return symbols__first(&dso->symbols[type]);
 }
 
+struct symbol *dso__next_symbol(struct symbol *sym)
+{
+       return symbols__next(sym);
+}
+
 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
                                        const char *name)
 {
@@ -1064,6 +1080,7 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
                              &is_64_bit);
        if (err)
                goto out_err;
+       dso->is_64_bit = is_64_bit;
 
        if (list_empty(&md.maps)) {
                err = -EINVAL;
@@ -1662,6 +1679,7 @@ do_kallsyms:
        free(kallsyms_allocated_filename);
 
        if (err > 0 && !dso__is_kcore(dso)) {
+               dso->binary_type = DSO_BINARY_TYPE__KALLSYMS;
                dso__set_long_name(dso, "[kernel.kallsyms]", false);
                map__fixup_start(map);
                map__fixup_end(map);
@@ -1709,6 +1727,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map,
        if (err > 0)
                pr_debug("Using %s for symbols\n", kallsyms_filename);
        if (err > 0 && !dso__is_kcore(dso)) {
+               dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS;
                machine__mmap_name(machine, path, sizeof(path));
                dso__set_long_name(dso, strdup(path), true);
                map__fixup_start(map);
index 615c752..e7295e9 100644 (file)
@@ -118,7 +118,8 @@ struct symbol_conf {
                        annotate_src,
                        event_group,
                        demangle,
-                       filter_relative;
+                       filter_relative,
+                       show_hist_headers;
        const char      *vmlinux_name,
                        *kallsyms_name,
                        *source_prefix,
@@ -215,6 +216,7 @@ struct symsrc {
        GElf_Shdr dynshdr;
 
        bool adjust_symbols;
+       bool is_64_bit;
 #endif
 };
 
@@ -238,6 +240,11 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
 struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
                                        const char *name);
 
+struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
+struct symbol *dso__next_symbol(struct symbol *sym);
+
+enum dso_type dso__type_fd(int fd);
+
 int filename__read_build_id(const char *filename, void *bf, size_t size);
 int sysfs__read_build_id(const char *filename, void *bf, size_t size);
 int modules__parse(const char *filename, void *arg,
index 2fde0d5..12c7a25 100644 (file)
@@ -13,7 +13,7 @@ int thread__init_map_groups(struct thread *thread, struct machine *machine)
        struct thread *leader;
        pid_t pid = thread->pid_;
 
-       if (pid == thread->tid) {
+       if (pid == thread->tid || pid == -1) {
                thread->mg = map_groups__new();
        } else {
                leader = machine__findnew_thread(machine, pid, pid);
@@ -34,6 +34,7 @@ struct thread *thread__new(pid_t pid, pid_t tid)
                thread->pid_ = pid;
                thread->tid = tid;
                thread->ppid = -1;
+               thread->cpu = -1;
                INIT_LIST_HEAD(&thread->comm_list);
 
                comm_str = malloc(32);
@@ -60,8 +61,10 @@ void thread__delete(struct thread *thread)
 {
        struct comm *comm, *tmp;
 
-       map_groups__put(thread->mg);
-       thread->mg = NULL;
+       if (thread->mg) {
+               map_groups__put(thread->mg);
+               thread->mg = NULL;
+       }
        list_for_each_entry_safe(comm, tmp, &thread->comm_list, list) {
                list_del(&comm->list);
                comm__free(comm);
@@ -127,12 +130,12 @@ int thread__comm_len(struct thread *thread)
 size_t thread__fprintf(struct thread *thread, FILE *fp)
 {
        return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) +
-              map_groups__fprintf(thread->mg, verbose, fp);
+              map_groups__fprintf(thread->mg, fp);
 }
 
 void thread__insert_map(struct thread *thread, struct map *map)
 {
-       map_groups__fixup_overlappings(thread->mg, map, verbose, stderr);
+       map_groups__fixup_overlappings(thread->mg, map, stderr);
        map_groups__insert(thread->mg, map);
 }
 
index 3c0c272..716b772 100644 (file)
@@ -17,6 +17,7 @@ struct thread {
        pid_t                   pid_; /* Not all tools update this */
        pid_t                   tid;
        pid_t                   ppid;
+       int                     cpu;
        char                    shortname[3];
        bool                    comm_set;
        bool                    dead; /* if set thread has exited */
index 7e6fcfe..eb72716 100644 (file)
@@ -40,6 +40,7 @@
 #include "trace-event.h"
 #include <api/fs/debugfs.h>
 #include "evsel.h"
+#include "debug.h"
 
 #define VERSION "0.5"
 
@@ -191,12 +192,10 @@ static int copy_event_system(const char *sys, struct tracepoint_path *tps)
                    strcmp(dent->d_name, "..") == 0 ||
                    !name_in_tp_list(dent->d_name, tps))
                        continue;
-               format = malloc(strlen(sys) + strlen(dent->d_name) + 10);
-               if (!format) {
+               if (asprintf(&format, "%s/%s/format", sys, dent->d_name) < 0) {
                        err = -ENOMEM;
                        goto out;
                }
-               sprintf(format, "%s/%s/format", sys, dent->d_name);
                ret = stat(format, &st);
                free(format);
                if (ret < 0)
@@ -217,12 +216,10 @@ static int copy_event_system(const char *sys, struct tracepoint_path *tps)
                    strcmp(dent->d_name, "..") == 0 ||
                    !name_in_tp_list(dent->d_name, tps))
                        continue;
-               format = malloc(strlen(sys) + strlen(dent->d_name) + 10);
-               if (!format) {
+               if (asprintf(&format, "%s/%s/format", sys, dent->d_name) < 0) {
                        err = -ENOMEM;
                        goto out;
                }
-               sprintf(format, "%s/%s/format", sys, dent->d_name);
                ret = stat(format, &st);
 
                if (ret >= 0) {
@@ -317,12 +314,10 @@ static int record_event_files(struct tracepoint_path *tps)
                    strcmp(dent->d_name, "ftrace") == 0 ||
                    !system_in_tp_list(dent->d_name, tps))
                        continue;
-               sys = malloc(strlen(path) + strlen(dent->d_name) + 2);
-               if (!sys) {
+               if (asprintf(&sys, "%s/%s", path, dent->d_name) < 0) {
                        err = -ENOMEM;
                        goto out;
                }
-               sprintf(sys, "%s/%s", path, dent->d_name);
                ret = stat(sys, &st);
                if (ret >= 0) {
                        ssize_t size = strlen(dent->d_name) + 1;
index e113e18..54d9e9b 100644 (file)
@@ -22,7 +22,6 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
-#include <getopt.h>
 #include <stdarg.h>
 #include <sys/types.h>
 #include <sys/stat.h>
@@ -36,6 +35,7 @@
 #include "../perf.h"
 #include "util.h"
 #include "trace-event.h"
+#include "debug.h"
 
 static int input_fd;
 
diff --git a/tools/perf/util/tsc.c b/tools/perf/util/tsc.c
new file mode 100644 (file)
index 0000000..4d4210d
--- /dev/null
@@ -0,0 +1,30 @@
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+#include "tsc.h"
+
+u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc)
+{
+       u64 t, quot, rem;
+
+       t = ns - tc->time_zero;
+       quot = t / tc->time_mult;
+       rem  = t % tc->time_mult;
+       return (quot << tc->time_shift) +
+              (rem << tc->time_shift) / tc->time_mult;
+}
+
+u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
+{
+       u64 quot, rem;
+
+       quot = cyc >> tc->time_shift;
+       rem  = cyc & ((1 << tc->time_shift) - 1);
+       return tc->time_zero + quot * tc->time_mult +
+              ((rem * tc->time_mult) >> tc->time_shift);
+}
+
+u64 __weak rdtsc(void)
+{
+       return 0;
+}
diff --git a/tools/perf/util/tsc.h b/tools/perf/util/tsc.h
new file mode 100644 (file)
index 0000000..a8b78f1
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef __PERF_TSC_H
+#define __PERF_TSC_H
+
+#include <linux/types.h>
+
+#include "../arch/x86/util/tsc.h"
+
+u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc);
+u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc);
+u64 rdtsc(void);
+
+#endif
index 5ec80a5..7419768 100644 (file)
@@ -3,6 +3,7 @@
 #include <elfutils/libdwfl.h>
 #include <inttypes.h>
 #include <errno.h>
+#include "debug.h"
 #include "unwind.h"
 #include "unwind-libdw.h"
 #include "machine.h"
index bd5768d..92b56db 100644 (file)
@@ -30,6 +30,7 @@
 #include "unwind.h"
 #include "symbol.h"
 #include "util.h"
+#include "debug.h"
 
 extern int
 UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as,
@@ -250,7 +251,6 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
 
        /* Check the .eh_frame section for unwinding info */
        offset = elf_section_offset(fd, ".eh_frame_hdr");
-       close(fd);
 
        if (offset)
                ret = unwind_spec_ehframe(dso, machine, offset,
@@ -271,7 +271,6 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
 
        /* Check the .debug_frame section for unwinding info */
        *offset = elf_section_offset(fd, ".debug_frame");
-       close(fd);
 
        if (*offset)
                return 0;
index 7fff6be..e52e746 100644 (file)
@@ -1,5 +1,6 @@
 #include "../perf.h"
 #include "util.h"
+#include "debug.h"
 #include <api/fs/fs.h>
 #include <sys/mman.h>
 #ifdef HAVE_BACKTRACE_SUPPORT
@@ -17,6 +18,7 @@
  * XXX We need to find a better place for these things...
  */
 unsigned int page_size;
+int cacheline_size;
 
 bool test_attr__enabled;
 
@@ -332,12 +334,9 @@ const char *find_tracing_dir(void)
        if (!debugfs)
                return NULL;
 
-       tracing = malloc(strlen(debugfs) + 9);
-       if (!tracing)
+       if (asprintf(&tracing, "%s/tracing", debugfs) < 0)
                return NULL;
 
-       sprintf(tracing, "%s/tracing", debugfs);
-
        tracing_found = 1;
        return tracing;
 }
@@ -351,11 +350,9 @@ char *get_tracing_file(const char *name)
        if (!tracing)
                return NULL;
 
-       file = malloc(strlen(tracing) + strlen(name) + 2);
-       if (!file)
+       if (asprintf(&file, "%s/%s", tracing, name) < 0)
                return NULL;
 
-       sprintf(file, "%s/%s", tracing, name);
        return file;
 }
 
index b03da44..6686436 100644 (file)
@@ -304,6 +304,7 @@ char *rtrim(char *s);
 void dump_stack(void);
 
 extern unsigned int page_size;
+extern int cacheline_size;
 
 void get_term_dimensions(struct winsize *ws);
 
index 0ddb3b8..adca693 100644 (file)
 #include "vdso.h"
 #include "util.h"
 #include "symbol.h"
+#include "machine.h"
 #include "linux/string.h"
+#include "debug.h"
 
-static bool vdso_found;
-static char vdso_file[] = "/tmp/perf-vdso.so-XXXXXX";
+#define VDSO__TEMP_FILE_NAME "/tmp/perf-vdso.so-XXXXXX"
+
+struct vdso_file {
+       bool found;
+       bool error;
+       char temp_file_name[sizeof(VDSO__TEMP_FILE_NAME)];
+       const char *dso_name;
+};
+
+struct vdso_info {
+       struct vdso_file vdso;
+};
+
+static struct vdso_info *vdso_info__new(void)
+{
+       static const struct vdso_info vdso_info_init = {
+               .vdso    = {
+                       .temp_file_name = VDSO__TEMP_FILE_NAME,
+                       .dso_name = DSO__NAME_VDSO,
+               },
+       };
+
+       return memdup(&vdso_info_init, sizeof(vdso_info_init));
+}
 
 static int find_vdso_map(void **start, void **end)
 {
@@ -47,7 +71,7 @@ static int find_vdso_map(void **start, void **end)
        return !found;
 }
 
-static char *get_file(void)
+static char *get_file(struct vdso_file *vdso_file)
 {
        char *vdso = NULL;
        char *buf = NULL;
@@ -55,10 +79,10 @@ static char *get_file(void)
        size_t size;
        int fd;
 
-       if (vdso_found)
-               return vdso_file;
+       if (vdso_file->found)
+               return vdso_file->temp_file_name;
 
-       if (find_vdso_map(&start, &end))
+       if (vdso_file->error || find_vdso_map(&start, &end))
                return NULL;
 
        size = end - start;
@@ -67,45 +91,78 @@ static char *get_file(void)
        if (!buf)
                return NULL;
 
-       fd = mkstemp(vdso_file);
+       fd = mkstemp(vdso_file->temp_file_name);
        if (fd < 0)
                goto out;
 
        if (size == (size_t) write(fd, buf, size))
-               vdso = vdso_file;
+               vdso = vdso_file->temp_file_name;
 
        close(fd);
 
  out:
        free(buf);
 
-       vdso_found = (vdso != NULL);
+       vdso_file->found = (vdso != NULL);
+       vdso_file->error = !vdso_file->found;
        return vdso;
 }
 
-void vdso__exit(void)
+void vdso__exit(struct machine *machine)
 {
-       if (vdso_found)
-               unlink(vdso_file);
+       struct vdso_info *vdso_info = machine->vdso_info;
+
+       if (!vdso_info)
+               return;
+
+       if (vdso_info->vdso.found)
+               unlink(vdso_info->vdso.temp_file_name);
+
+       zfree(&machine->vdso_info);
 }
 
-struct dso *vdso__dso_findnew(struct list_head *head)
+static struct dso *vdso__new(struct machine *machine, const char *short_name,
+                            const char *long_name)
 {
-       struct dso *dso = dsos__find(head, VDSO__MAP_NAME, true);
+       struct dso *dso;
 
+       dso = dso__new(short_name);
+       if (dso != NULL) {
+               dsos__add(&machine->user_dsos, dso);
+               dso__set_long_name(dso, long_name, false);
+       }
+
+       return dso;
+}
+
+struct dso *vdso__dso_findnew(struct machine *machine,
+                             struct thread *thread __maybe_unused)
+{
+       struct vdso_info *vdso_info;
+       struct dso *dso;
+
+       if (!machine->vdso_info)
+               machine->vdso_info = vdso_info__new();
+
+       vdso_info = machine->vdso_info;
+       if (!vdso_info)
+               return NULL;
+
+       dso = dsos__find(&machine->user_dsos, DSO__NAME_VDSO, true);
        if (!dso) {
                char *file;
 
-               file = get_file();
+               file = get_file(&vdso_info->vdso);
                if (!file)
                        return NULL;
 
-               dso = dso__new(VDSO__MAP_NAME);
-               if (dso != NULL) {
-                       dsos__add(head, dso);
-                       dso__set_long_name(dso, file, false);
-               }
+               dso = vdso__new(machine, DSO__NAME_VDSO, file);
        }
 
        return dso;
 }
+
+bool dso__is_vdso(struct dso *dso)
+{
+       return !strcmp(dso->short_name, DSO__NAME_VDSO);
+}
index 0f76e7c..af9d692 100644 (file)
@@ -7,12 +7,21 @@
 
 #define VDSO__MAP_NAME "[vdso]"
 
+#define DSO__NAME_VDSO "[vdso]"
+
 static inline bool is_vdso_map(const char *filename)
 {
        return !strcmp(filename, VDSO__MAP_NAME);
 }
 
-struct dso *vdso__dso_findnew(struct list_head *head);
-void vdso__exit(void);
+struct dso;
+
+bool dso__is_vdso(struct dso *dso);
+
+struct machine;
+struct thread;
+
+struct dso *vdso__dso_findnew(struct machine *machine, struct thread *thread);
+void vdso__exit(struct machine *machine);
 
 #endif /* __PERF_VDSO__ */
index 4063156..55ab700 100755 (executable)
@@ -72,7 +72,7 @@ my %default = (
     "IGNORE_UNUSED"            => 0,
 );
 
-my $ktest_config;
+my $ktest_config = "ktest.conf";
 my $version;
 my $have_version = 0;
 my $machine;
@@ -149,7 +149,6 @@ my $bisect_ret_abort;
 my $bisect_ret_default;
 my $in_patchcheck = 0;
 my $run_test;
-my $redirect;
 my $buildlog;
 my $testlog;
 my $dmesg;
@@ -522,7 +521,7 @@ sub read_ync {
     return read_prompt 1, $prompt;
 }
 
-sub get_ktest_config {
+sub get_mandatory_config {
     my ($config) = @_;
     my $ans;
 
@@ -553,29 +552,29 @@ sub get_ktest_config {
     }
 }
 
-sub get_ktest_configs {
-    get_ktest_config("MACHINE");
-    get_ktest_config("BUILD_DIR");
-    get_ktest_config("OUTPUT_DIR");
+sub get_mandatory_configs {
+    get_mandatory_config("MACHINE");
+    get_mandatory_config("BUILD_DIR");
+    get_mandatory_config("OUTPUT_DIR");
 
     if ($newconfig) {
-       get_ktest_config("BUILD_OPTIONS");
+       get_mandatory_config("BUILD_OPTIONS");
     }
 
     # options required for other than just building a kernel
     if (!$buildonly) {
-       get_ktest_config("POWER_CYCLE");
-       get_ktest_config("CONSOLE");
+       get_mandatory_config("POWER_CYCLE");
+       get_mandatory_config("CONSOLE");
     }
 
     # options required for install and more
     if ($buildonly != 1) {
-       get_ktest_config("SSH_USER");
-       get_ktest_config("BUILD_TARGET");
-       get_ktest_config("TARGET_IMAGE");
+       get_mandatory_config("SSH_USER");
+       get_mandatory_config("BUILD_TARGET");
+       get_mandatory_config("TARGET_IMAGE");
     }
 
-    get_ktest_config("LOCALVERSION");
+    get_mandatory_config("LOCALVERSION");
 
     return if ($buildonly);
 
@@ -583,7 +582,7 @@ sub get_ktest_configs {
 
     if (!defined($rtype)) {
        if (!defined($opt{"GRUB_MENU"})) {
-           get_ktest_config("REBOOT_TYPE");
+           get_mandatory_config("REBOOT_TYPE");
            $rtype = $entered_configs{"REBOOT_TYPE"};
        } else {
            $rtype = "grub";
@@ -591,16 +590,16 @@ sub get_ktest_configs {
     }
 
     if ($rtype eq "grub") {
-       get_ktest_config("GRUB_MENU");
+       get_mandatory_config("GRUB_MENU");
     }
 
     if ($rtype eq "grub2") {
-       get_ktest_config("GRUB_MENU");
-       get_ktest_config("GRUB_FILE");
+       get_mandatory_config("GRUB_MENU");
+       get_mandatory_config("GRUB_FILE");
     }
 
     if ($rtype eq "syslinux") {
-       get_ktest_config("SYSLINUX_LABEL");
+       get_mandatory_config("SYSLINUX_LABEL");
     }
 }
 
@@ -1090,7 +1089,7 @@ sub read_config {
     $test_case = __read_config $config, \$test_num;
 
     # make sure we have all mandatory configs
-    get_ktest_configs;
+    get_mandatory_configs;
 
     # was a test specified?
     if (!$test_case) {
@@ -1529,7 +1528,7 @@ sub fail {
 }
 
 sub run_command {
-    my ($command) = @_;
+    my ($command, $redirect) = @_;
     my $dolog = 0;
     my $dord = 0;
     my $pid;
@@ -2265,9 +2264,7 @@ sub build {
     # Run old config regardless, to enforce min configurations
     make_oldconfig;
 
-    $redirect = "$buildlog";
-    my $build_ret = run_command "$make $build_options";
-    undef $redirect;
+    my $build_ret = run_command "$make $build_options", $buildlog;
 
     if (defined($post_build)) {
        # Because a post build may change the kernel version
@@ -2360,9 +2357,7 @@ sub child_run_test {
     $poweroff_on_error = 0;
     $die_on_failure = 1;
 
-    $redirect = "$testlog";
-    run_command $run_test or $failed = 1;
-    undef $redirect;
+    run_command $run_test, $testlog or $failed = 1;
 
     exit $failed;
 }
@@ -2789,12 +2784,17 @@ my %dependency;
 sub assign_configs {
     my ($hash, $config) = @_;
 
+    doprint "Reading configs from $config\n";
+
     open (IN, $config)
        or dodie "Failed to read $config";
 
     while (<IN>) {
+       chomp;
        if (/^((CONFIG\S*)=.*)/) {
            ${$hash}{$2} = $1;
+       } elsif (/^(# (CONFIG\S*) is not set)/) {
+           ${$hash}{$2} = $1;
        }
     }
 
@@ -2807,27 +2807,6 @@ sub process_config_ignore {
     assign_configs \%config_ignore, $config;
 }
 
-sub read_current_config {
-    my ($config_ref) = @_;
-
-    %{$config_ref} = ();
-    undef %{$config_ref};
-
-    my @key = keys %{$config_ref};
-    if ($#key >= 0) {
-       print "did not delete!\n";
-       exit;
-    }
-    open (IN, "$output_config");
-
-    while (<IN>) {
-       if (/^(CONFIG\S+)=(.*)/) {
-           ${$config_ref}{$1} = $2;
-       }
-    }
-    close(IN);
-}
-
 sub get_dependencies {
     my ($config) = @_;
 
@@ -2846,53 +2825,97 @@ sub get_dependencies {
     return @deps;
 }
 
+sub save_config {
+    my ($pc, $file) = @_;
+
+    my %configs = %{$pc};
+
+    doprint "Saving configs into $file\n";
+
+    open(OUT, ">$file") or dodie "Can not write to $file";
+
+    foreach my $config (keys %configs) {
+       print OUT "$configs{$config}\n";
+    }
+    close(OUT);
+}
+
 sub create_config {
-    my @configs = @_;
+    my ($name, $pc) = @_;
 
-    open(OUT, ">$output_config") or dodie "Can not write to $output_config";
+    doprint "Creating old config from $name configs\n";
 
-    foreach my $config (@configs) {
-       print OUT "$config_set{$config}\n";
-       my @deps = get_dependencies $config;
-       foreach my $dep (@deps) {
-           print OUT "$config_set{$dep}\n";
+    save_config $pc, $output_config;
+
+    make_oldconfig;
+}
+
+# compare two config hashes, and return configs with different vals.
+# It returns B's config values, but you can use A to see what A was.
+sub diff_config_vals {
+    my ($pa, $pb) = @_;
+
+    # crappy Perl way to pass in hashes.
+    my %a = %{$pa};
+    my %b = %{$pb};
+
+    my %ret;
+
+    foreach my $item (keys %a) {
+       if (defined($b{$item}) && $b{$item} ne $a{$item}) {
+           $ret{$item} = $b{$item};
        }
     }
 
-    # turn off configs to keep off
-    foreach my $config (keys %config_off) {
-       print OUT "# $config is not set\n";
-    }
+    return %ret;
+}
 
-    # turn off configs that should be off for now
-    foreach my $config (@config_off_tmp) {
-       print OUT "# $config is not set\n";
-    }
+# compare two config hashes and return the configs in B but not A
+sub diff_configs {
+    my ($pa, $pb) = @_;
+
+    my %ret;
+
+    # crappy Perl way to pass in hashes.
+    my %a = %{$pa};
+    my %b = %{$pb};
 
-    foreach my $config (keys %config_ignore) {
-       print OUT "$config_ignore{$config}\n";
+    foreach my $item (keys %b) {
+       if (!defined($a{$item})) {
+           $ret{$item} = $b{$item};
+       }
     }
-    close(OUT);
 
-    make_oldconfig;
+    return %ret;
 }
 
+# return if two configs are equal or not
+# 0 is equal +1 b has something a does not
+# +1 if a and b have a different item.
+# -1 if a has something b does not
 sub compare_configs {
-    my (%a, %b) = @_;
+    my ($pa, $pb) = @_;
 
-    foreach my $item (keys %a) {
-       if (!defined($b{$item})) {
-           print "diff $item\n";
+    my %ret;
+
+    # crappy Perl way to pass in hashes.
+    my %a = %{$pa};
+    my %b = %{$pb};
+
+    foreach my $item (keys %b) {
+       if (!defined($a{$item})) {
+           return 1;
+       }
+       if ($a{$item} ne $b{$item}) {
            return 1;
        }
-       delete $b{$item};
     }
 
-    my @keys = keys %b;
-    if ($#keys) {
-       print "diff2 $keys[0]\n";
+    foreach my $item (keys %a) {
+       if (!defined($b{$item})) {
+           return -1;
+       }
     }
-    return -1 if ($#keys >= 0);
 
     return 0;
 }
@@ -2900,24 +2923,13 @@ sub compare_configs {
 sub run_config_bisect_test {
     my ($type) = @_;
 
-    return run_bisect_test $type, "oldconfig";
-}
+    my $ret = run_bisect_test $type, "oldconfig";
 
-sub process_passed {
-    my (%configs) = @_;
-
-    doprint "These configs had no failure: (Enabling them for further compiles)\n";
-    # Passed! All these configs are part of a good compile.
-    # Add them to the min options.
-    foreach my $config (keys %configs) {
-       if (defined($config_list{$config})) {
-           doprint " removing $config\n";
-           $config_ignore{$config} = $config_list{$config};
-           delete $config_list{$config};
-       }
+    if ($bisect_manual) {
+       $ret = answer_bisect;
     }
-    doprint "config copied to $outputdir/config_good\n";
-    run_command "cp -f $output_config $outputdir/config_good";
+
+    return $ret;
 }
 
 sub process_failed {
@@ -2928,253 +2940,225 @@ sub process_failed {
     doprint "***************************************\n\n";
 }
 
-sub run_config_bisect {
+# used for config bisecting
+my $good_config;
+my $bad_config;
 
-    my @start_list = keys %config_list;
+sub process_new_config {
+    my ($tc, $nc, $gc, $bc) = @_;
 
-    if ($#start_list < 0) {
-       doprint "No more configs to test!!!\n";
-       return -1;
+    my %tmp_config = %{$tc};
+    my %good_configs = %{$gc};
+    my %bad_configs = %{$bc};
+
+    my %new_configs;
+
+    my $runtest = 1;
+    my $ret;
+
+    create_config "tmp_configs", \%tmp_config;
+    assign_configs \%new_configs, $output_config;
+
+    $ret = compare_configs \%new_configs, \%bad_configs;
+    if (!$ret) {
+       doprint "New config equals bad config, try next test\n";
+       $runtest = 0;
+    }
+
+    if ($runtest) {
+       $ret = compare_configs \%new_configs, \%good_configs;
+       if (!$ret) {
+           doprint "New config equals good config, try next test\n";
+           $runtest = 0;
+       }
     }
 
-    doprint "***** RUN TEST ***\n";
+    %{$nc} = %new_configs;
+
+    return $runtest;
+}
+
+sub run_config_bisect {
+    my ($pgood, $pbad) = @_;
+
     my $type = $config_bisect_type;
+
+    my %good_configs = %{$pgood};
+    my %bad_configs = %{$pbad};
+
+    my %diff_configs = diff_config_vals \%good_configs, \%bad_configs;
+    my %b_configs = diff_configs \%good_configs, \%bad_configs;
+    my %g_configs = diff_configs \%bad_configs, \%good_configs;
+
+    my @diff_arr = keys %diff_configs;
+    my $len_diff = $#diff_arr + 1;
+
+    my @b_arr = keys %b_configs;
+    my $len_b = $#b_arr + 1;
+
+    my @g_arr = keys %g_configs;
+    my $len_g = $#g_arr + 1;
+
+    my $runtest = 1;
+    my %new_configs;
     my $ret;
-    my %current_config;
 
-    my $count = $#start_list + 1;
-    doprint "  $count configs to test\n";
+    # First, lets get it down to a single subset.
+    # Is the problem with a difference in values?
+    # Is the problem with a missing config?
+    # Is the problem with a config that breaks things?
 
-    my $half = int($#start_list / 2);
+    # Enable all of one set and see if we get a new bad
+    # or good config.
 
-    do {
-       my @tophalf = @start_list[0 .. $half];
+    # first set the good config to the bad values.
 
-       # keep the bottom half off
-       if ($half < $#start_list) {
-           @config_off_tmp = @start_list[$half + 1 .. $#start_list];
-       } else {
-           @config_off_tmp = ();
-       }
+    doprint "d=$len_diff g=$len_g b=$len_b\n";
 
-       create_config @tophalf;
-       read_current_config \%current_config;
-
-       $count = $#tophalf + 1;
-       doprint "Testing $count configs\n";
-       my $found = 0;
-       # make sure we test something
-       foreach my $config (@tophalf) {
-           if (defined($current_config{$config})) {
-               logit " $config\n";
-               $found = 1;
-           }
-       }
-       if (!$found) {
-           # try the other half
-           doprint "Top half produced no set configs, trying bottom half\n";
-
-           # keep the top half off
-           @config_off_tmp = @tophalf;
-           @tophalf = @start_list[$half + 1 .. $#start_list];
-
-           create_config @tophalf;
-           read_current_config \%current_config;
-           foreach my $config (@tophalf) {
-               if (defined($current_config{$config})) {
-                   logit " $config\n";
-                   $found = 1;
-               }
-           }
-           if (!$found) {
-               doprint "Failed: Can't make new config with current configs\n";
-               foreach my $config (@start_list) {
-                   doprint "  CONFIG: $config\n";
-               }
-               return -1;
+    # first lets enable things in bad config that are enabled in good config
+
+    if ($len_diff > 0) {
+       if ($len_b > 0 || $len_g > 0) {
+           my %tmp_config = %bad_configs;
+
+           doprint "Set tmp config to be bad config with good config values\n";
+           foreach my $item (@diff_arr) {
+               $tmp_config{$item} = $good_configs{$item};
            }
-           $count = $#tophalf + 1;
-           doprint "Testing $count configs\n";
-       }
 
-       $ret = run_config_bisect_test $type;
-       if ($bisect_manual) {
-           $ret = answer_bisect;
-       }
-       if ($ret) {
-           process_passed %current_config;
-           return 0;
+           $runtest = process_new_config \%tmp_config, \%new_configs,
+                           \%good_configs, \%bad_configs;
        }
+    }
 
-       doprint "This config had a failure.\n";
-       doprint "Removing these configs that were not set in this config:\n";
-       doprint "config copied to $outputdir/config_bad\n";
-       run_command "cp -f $output_config $outputdir/config_bad";
+    if (!$runtest && $len_diff > 0) {
 
-       # A config exists in this group that was bad.
-       foreach my $config (keys %config_list) {
-           if (!defined($current_config{$config})) {
-               doprint " removing $config\n";
-               delete $config_list{$config};
-           }
+       if ($len_diff == 1) {
+           process_failed $diff_arr[0];
+           return 1;
        }
+       my %tmp_config = %bad_configs;
 
-       @start_list = @tophalf;
+       my $half = int($#diff_arr / 2);
+       my @tophalf = @diff_arr[0 .. $half];
 
-       if ($#start_list == 0) {
-           process_failed $start_list[0];
-           return 1;
+       doprint "Settings bisect with top half:\n";
+       doprint "Set tmp config to be bad config with some good config values\n";
+       foreach my $item (@tophalf) {
+           $tmp_config{$item} = $good_configs{$item};
        }
 
-       # remove half the configs we are looking at and see if
-       # they are good.
-       $half = int($#start_list / 2);
-    } while ($#start_list > 0);
+       $runtest = process_new_config \%tmp_config, \%new_configs,
+                           \%good_configs, \%bad_configs;
 
-    # we found a single config, try it again unless we are running manually
+       if (!$runtest) {
+           my %tmp_config = %bad_configs;
 
-    if ($bisect_manual) {
-       process_failed $start_list[0];
-       return 1;
-    }
+           doprint "Try bottom half\n";
 
-    my @tophalf = @start_list[0 .. 0];
+           my @bottomhalf = @diff_arr[$half+1 .. $#diff_arr];
 
-    $ret = run_config_bisect_test $type;
-    if ($ret) {
-       process_passed %current_config;
+           foreach my $item (@bottomhalf) {
+               $tmp_config{$item} = $good_configs{$item};
+           }
+
+           $runtest = process_new_config \%tmp_config, \%new_configs,
+                           \%good_configs, \%bad_configs;
+       }
+    }
+
+    if ($runtest) {
+       $ret = run_config_bisect_test $type;
+       if ($ret) {
+           doprint "NEW GOOD CONFIG\n";
+           %good_configs = %new_configs;
+           run_command "mv $good_config ${good_config}.last";
+           save_config \%good_configs, $good_config;
+           %{$pgood} = %good_configs;
+       } else {
+           doprint "NEW BAD CONFIG\n";
+           %bad_configs = %new_configs;
+           run_command "mv $bad_config ${bad_config}.last";
+           save_config \%bad_configs, $bad_config;
+           %{$pbad} = %bad_configs;
+       }
        return 0;
     }
 
-    process_failed $start_list[0];
-    return 1;
+    fail "Hmm, need to do a mix match?\n";
+    return -1;
 }
 
 sub config_bisect {
     my ($i) = @_;
 
-    my $start_config = $config_bisect;
+    my $type = $config_bisect_type;
+    my $ret;
 
-    my $tmpconfig = "$tmpdir/use_config";
+    $bad_config = $config_bisect;
 
     if (defined($config_bisect_good)) {
-       process_config_ignore $config_bisect_good;
-    }
-
-    # Make the file with the bad config and the min config
-    if (defined($minconfig)) {
-       # read the min config for things to ignore
-       run_command "cp $minconfig $tmpconfig" or
-           dodie "failed to copy $minconfig to $tmpconfig";
+       $good_config = $config_bisect_good;
+    } elsif (defined($minconfig)) {
+       $good_config = $minconfig;
     } else {
-       unlink $tmpconfig;
-    }
-
-    if (-f $tmpconfig) {
-       load_force_config($tmpconfig);
-       process_config_ignore $tmpconfig;
-    }
-
-    # now process the start config
-    run_command "cp $start_config $output_config" or
-       dodie "failed to copy $start_config to $output_config";
-
-    # read directly what we want to check
-    my %config_check;
-    open (IN, $output_config)
-       or dodie "failed to open $output_config";
-
-    while (<IN>) {
-       if (/^((CONFIG\S*)=.*)/) {
-           $config_check{$2} = $1;
+       doprint "No config specified, checking if defconfig works";
+       $ret = run_bisect_test $type, "defconfig";
+       if (!$ret) {
+           fail "Have no good config to compare with, please set CONFIG_BISECT_GOOD";
+           return 1;
        }
+       $good_config = $output_config;
     }
-    close(IN);
 
-    # Now run oldconfig with the minconfig
-    make_oldconfig;
+    # we don't want min configs to cause issues here.
+    doprint "Disabling 'MIN_CONFIG' for this test\n";
+    undef $minconfig;
 
-    # check to see what we lost (or gained)
-    open (IN, $output_config)
-       or dodie "Failed to read $start_config";
+    my %good_configs;
+    my %bad_configs;
+    my %tmp_configs;
 
-    my %removed_configs;
-    my %added_configs;
+    doprint "Run good configs through make oldconfig\n";
+    assign_configs \%tmp_configs, $good_config;
+    create_config "$good_config", \%tmp_configs;
+    assign_configs \%good_configs, $output_config;
 
-    while (<IN>) {
-       if (/^((CONFIG\S*)=.*)/) {
-           # save off all options
-           $config_set{$2} = $1;
-           if (defined($config_check{$2})) {
-               if (defined($config_ignore{$2})) {
-                   $removed_configs{$2} = $1;
-               } else {
-                   $config_list{$2} = $1;
-               }
-           } elsif (!defined($config_ignore{$2})) {
-               $added_configs{$2} = $1;
-               $config_list{$2} = $1;
-           }
-       } elsif (/^# ((CONFIG\S*).*)/) {
-           # Keep these configs disabled
-           $config_set{$2} = $1;
-           $config_off{$2} = $1;
-       }
-    }
-    close(IN);
+    doprint "Run bad configs through make oldconfig\n";
+    assign_configs \%tmp_configs, $bad_config;
+    create_config "$bad_config", \%tmp_configs;
+    assign_configs \%bad_configs, $output_config;
 
-    my @confs = keys %removed_configs;
-    if ($#confs >= 0) {
-       doprint "Configs overridden by default configs and removed from check:\n";
-       foreach my $config (@confs) {
-           doprint " $config\n";
-       }
-    }
-    @confs = keys %added_configs;
-    if ($#confs >= 0) {
-       doprint "Configs appearing in make oldconfig and added:\n";
-       foreach my $config (@confs) {
-           doprint " $config\n";
-       }
-    }
+    $good_config = "$tmpdir/good_config";
+    $bad_config = "$tmpdir/bad_config";
+
+    save_config \%good_configs, $good_config;
+    save_config \%bad_configs, $bad_config;
 
-    my %config_test;
-    my $once = 0;
 
-    @config_off_tmp = ();
+    if (defined($config_bisect_check) && $config_bisect_check ne "0") {
+       if ($config_bisect_check ne "good") {
+           doprint "Testing bad config\n";
 
-    # Sometimes kconfig does weird things. We must make sure
-    # that the config we autocreate has everything we need
-    # to test, otherwise we may miss testing configs, or
-    # may not be able to create a new config.
-    # Here we create a config with everything set.
-    create_config (keys %config_list);
-    read_current_config \%config_test;
-    foreach my $config (keys %config_list) {
-       if (!defined($config_test{$config})) {
-           if (!$once) {
-               $once = 1;
-               doprint "Configs not produced by kconfig (will not be checked):\n";
+           $ret = run_bisect_test $type, "useconfig:$bad_config";
+           if ($ret) {
+               fail "Bad config succeeded when expected to fail!";
+               return 0;
            }
-           doprint "  $config\n";
-           delete $config_list{$config};
        }
-    }
-    my $ret;
+       if ($config_bisect_check ne "bad") {
+           doprint "Testing good config\n";
 
-    if (defined($config_bisect_check) && $config_bisect_check) {
-       doprint " Checking to make sure bad config with min config fails\n";
-       create_config keys %config_list;
-       $ret = run_config_bisect_test $config_bisect_type;
-       if ($ret) {
-           doprint " FAILED! Bad config with min config boots fine\n";
-           return -1;
+           $ret = run_bisect_test $type, "useconfig:$good_config";
+           if (!$ret) {
+               fail "Good config failed when expected to succeed!";
+               return 0;
+           }
        }
-       doprint " Bad config with min config fails as expected\n";
     }
 
     do {
-       $ret = run_config_bisect;
+       $ret = run_config_bisect \%good_configs, \%bad_configs;
     } while (!$ret);
 
     return $ret if ($ret < 0);
@@ -3455,29 +3439,6 @@ sub read_depends {
     read_kconfig($kconfig);
 }
 
-sub read_config_list {
-    my ($config) = @_;
-
-    open (IN, $config)
-       or dodie "Failed to read $config";
-
-    while (<IN>) {
-       if (/^((CONFIG\S*)=.*)/) {
-           if (!defined($config_ignore{$2})) {
-               $config_list{$2} = $1;
-           }
-       }
-    }
-
-    close(IN);
-}
-
-sub read_output_config {
-    my ($config) = @_;
-
-    assign_configs \%config_ignore, $config;
-}
-
 sub make_new_config {
     my @configs = @_;
 
@@ -3863,7 +3824,7 @@ sub make_warnings_file {
     success $i;
 }
 
-$#ARGV < 1 or die "ktest.pl version: $VERSION\n   usage: ktest.pl config-file\n";
+$#ARGV < 1 or die "ktest.pl version: $VERSION\n   usage: ktest.pl [config-file]\n";
 
 if ($#ARGV == 0) {
     $ktest_config = $ARGV[0];
@@ -3873,8 +3834,6 @@ if ($#ARGV == 0) {
            exit 0;
        }
     }
-} else {
-    $ktest_config = "ktest.conf";
 }
 
 if (! -f $ktest_config) {
index 172eec4..911e45a 100644 (file)
 #
 #  The way it works is this:
 #
-#   First it finds a config to work with. Since a different version, or
-#   MIN_CONFIG may cause different dependecies, it must run through this
-#   preparation.
+#   You can specify a good config with CONFIG_BISECT_GOOD, otherwise it
+#   will use the MIN_CONFIG, and if that's not specified, it will use
+#   the config that comes with "make defconfig".
 #
-#   Overwrites any config set in the bad config with a config set in
-#   either the MIN_CONFIG or ADD_CONFIG. Thus, make sure these configs
-#   are minimal and do not disable configs you want to test:
-#   (ie.  # CONFIG_FOO is not set).
+#   It runs both the good and bad configs through a make oldconfig to
+#   make sure that they are set up for the kernel that is checked out.
 #
-#   An oldconfig is run on the bad config and any new config that
-#   appears will be added to the configs to test.
+#   It then reads the configs that are set, as well as the ones that are
+#   not set for both the good and bad configs, and then compares them.
+#   It will set half of the good configs within the bad config (note,
+#   "set" means to make the bad config match the good config, a config
+#   in the good config that is off, will be turned off in the bad
+#   config. That is considered a "set").
 #
-#   Finally, it generates a config with the above result and runs it
-#   again through make oldconfig to produce a config that should be
-#   satisfied by kconfig.
+#   It tests this new config and if it works, it becomes the new good
+#   config, otherwise it becomes the new bad config. It continues this
+#   process until there's only one config left and it will report that
+#   config.
 #
-#   Then it starts the bisect.
+#   The "bad config" can also be a config that is needed to boot but was
+#   disabled because it depended on something that wasn't set.
 #
-#   The configs to test are cut in half. If all the configs in this
-#   half depend on a config in the other half, then the other half
-#   is tested instead. If no configs are enabled by either half, then
-#   this means a circular dependency exists and the test fails.
+#   During this process, it saves the current good and bad configs in
+#   ${TMP_DIR}/good_config and ${TMP_DIR}/bad_config respectively.
+#   If you stop the test, you can copy them to a new location to
+#   reuse them again.
 #
-#   A config is created with the test half, and the bisect test is run.
-#
-#   If the bisect succeeds, then all configs in the generated config
-#   are removed from the configs to test and added to the configs that
-#   will be enabled for all builds (they will be enabled, but not be part
-#   of the configs to examine).
-#
-#   If the bisect fails, then all test configs that were not enabled by
-#   the config file are removed from the test. These configs will not
-#   be enabled in future tests. Since current config failed, we consider
-#   this to be a subset of the config that we started with.
-#
-#   When we are down to one config, it is considered the bad config.
-#
-#   Note, the config chosen may not be the true bad config. Due to
-#   dependencies and selections of the kbuild system, mulitple
-#   configs may be needed to cause a failure. If you disable the
-#   config that was found and restart the test, if the test fails
-#   again, it is recommended to rerun the config_bisect with a new
-#   bad config without the found config enabled.
+#   Although the MIN_CONFIG may be the config it starts with, the
+#   MIN_CONFIG is ignored.
 #
 #  The option BUILD_TYPE will be ignored.
 #
 # CONFIG_BISECT_GOOD (optional)
 #  If you have a good config to start with, then you
 #  can specify it with CONFIG_BISECT_GOOD. Otherwise
-#  the MIN_CONFIG is the base.
+#  the MIN_CONFIG is the base, if MIN_CONFIG is not set
+#  It will build a config with "make defconfig"
 #
 # CONFIG_BISECT_CHECK (optional)
 #  Set this to 1 if you want to confirm that the config ktest
 #  generates (the bad config with the min config) is still bad.
 #  It may be that the min config fixes what broke the bad config
 #  and the test will not return a result.
+#  Set it to "good" to test only the good config and set it
+#  to "bad" to only test the bad config.
 #
 # Example:
 #   TEST_START
index ae5faf9..790c23a 100644 (file)
@@ -1,6 +1,6 @@
 all:
 
 run_tests:
-       @/bin/sh ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]"
+       @/bin/bash ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]"
 
 clean:
index aa290c0..552f081 100644 (file)
@@ -193,6 +193,11 @@ int main(int argc, char **argv)
        int msg, pid, err;
        struct msgque_data msgque;
 
+       if (getuid() != 0) {
+               printf("Please run the test as root - Exiting.\n");
+               exit(1);
+       }
+
        msgque.key = ftok(argv[0], 822155650);
        if (msgque.key == -1) {
                printf("Can't make key\n");
index 350bfed..058c76f 100644 (file)
@@ -1,6 +1,6 @@
 all:
 
 run_tests:
-       @/bin/sh ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
+       @/bin/bash ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
 
 clean:
index 51267f4..2cede23 100644 (file)
@@ -2,7 +2,7 @@ PROGS := tm-resched-dscr
 
 all: $(PROGS)
 
-$(PROGS):
+$(PROGS): ../harness.c
 
 run_tests: all
        @-for PROG in $(PROGS); do \
index ee98e38..42d4c8c 100644 (file)
@@ -28,6 +28,8 @@
 #include <assert.h>
 #include <asm/tm.h>
 
+#include "utils.h"
+
 #define TBEGIN          ".long 0x7C00051D ;"
 #define TEND            ".long 0x7C00055D ;"
 #define TCHECK          ".long 0x7C00059C ;"
@@ -36,7 +38,8 @@
 #define SPRN_TEXASR     0x82
 #define SPRN_DSCR       0x03
 
-int main(void) {
+int test_body(void)
+{
        uint64_t rv, dscr1 = 1, dscr2, texasr;
 
        printf("Check DSCR TM context switch: ");
@@ -81,10 +84,15 @@ int main(void) {
                }
                if (dscr2 != dscr1) {
                        printf(" FAIL\n");
-                       exit(EXIT_FAILURE);
+                       return 1;
                } else {
                        printf(" OK\n");
-                       exit(EXIT_SUCCESS);
+                       return 0;
                }
        }
 }
+
+int main(void)
+{
+       return test_harness(test_body, "tm_resched_dscr");
+}
index ee1f6ca..3f6c9b7 100755 (executable)
@@ -54,10 +54,16 @@ do
                        if test -f "$i/qemu-cmd"
                        then
                                print_bug qemu failed
+                               echo "   $i"
+                       elif test -f "$i/buildonly"
+                       then
+                               echo Build-only run, no boot/test
+                               configcheck.sh $i/.config $i/ConfigFragment
+                               parse-build.sh $i/Make.out $configfile
                        else
                                print_bug Build failed
+                               echo "   $i"
                        fi
-                       echo "   $i"
                fi
        done
 done
index 27e544e..0f69dcb 100755 (executable)
@@ -42,6 +42,7 @@ grace=120
 
 T=/tmp/kvm-test-1-run.sh.$$
 trap 'rm -rf $T' 0
+touch $T
 
 . $KVM/bin/functions.sh
 . $KVPATH/ver_functions.sh
@@ -131,7 +132,10 @@ boot_args=$6
 
 cd $KVM
 kstarttime=`awk 'BEGIN { print systime() }' < /dev/null`
-echo ' ---' `date`: Starting kernel
+if test -z "$TORTURE_BUILDONLY"
+then
+       echo ' ---' `date`: Starting kernel
+fi
 
 # Generate -smp qemu argument.
 qemu_args="-nographic $qemu_args"
@@ -157,12 +161,13 @@ boot_args="`configfrag_boot_params "$boot_args" "$config_template"`"
 # Generate kernel-version-specific boot parameters
 boot_args="`per_version_boot_params "$boot_args" $builddir/.config $seconds`"
 
-echo $QEMU $qemu_args -m 512 -kernel $builddir/$BOOT_IMAGE -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
 if test -n "$TORTURE_BUILDONLY"
 then
        echo Build-only run specified, boot/test omitted.
+       touch $resdir/buildonly
        exit 0
 fi
+echo $QEMU $qemu_args -m 512 -kernel $builddir/$BOOT_IMAGE -append \"$qemu_append $boot_args\" > $resdir/qemu-cmd
 ( $QEMU $qemu_args -m 512 -kernel $builddir/$BOOT_IMAGE -append "$qemu_append $boot_args"; echo $? > $resdir/qemu-retval ) &
 qemu_pid=$!
 commandcompleted=0
index 40285c5..589e9c3 100644 (file)
@@ -340,12 +340,18 @@ function dump(first, pastlast)
        for (j = 1; j < jn; j++) {
                builddir=KVM "/b" j
                print "rm -f " builddir ".ready"
-               print "echo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date`";
-               print "echo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date` >> " rd "/log";
+               print "if test -z \"$TORTURE_BUILDONLY\""
+               print "then"
+               print "\techo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date`";
+               print "\techo ----", cfr[j], cpusr[j] ovf ": Starting kernel. `date` >> " rd "/log";
+               print "fi"
        }
        print "wait"
-       print "echo ---- All kernel runs complete. `date`";
-       print "echo ---- All kernel runs complete. `date` >> " rd "/log";
+       print "if test -z \"$TORTURE_BUILDONLY\""
+       print "then"
+       print "\techo ---- All kernel runs complete. `date`";
+       print "\techo ---- All kernel runs complete. `date` >> " rd "/log";
+       print "fi"
        for (j = 1; j < jn; j++) {
                builddir=KVM "/b" j
                print "echo ----", cfr[j], cpusr[j] ovf ": Build/run results:";
@@ -385,10 +391,7 @@ echo
 echo
 echo " --- `date` Test summary:"
 echo Results directory: $resdir/$ds
-if test -z "$TORTURE_BUILDONLY"
-then
-       kvm-recheck.sh $resdir/$ds
-fi
+kvm-recheck.sh $resdir/$ds
 ___EOF___
 
 if test "$dryrun" = script
@@ -403,7 +406,7 @@ then
                sed -e 's/:.*$//' -e 's/^echo //'
        exit 0
 else
-       # Not a dryru, so run the script.
+       # Not a dryrun, so run the script.
        sh $T/script
 fi
 
index 9c827ec..063b707 100644 (file)
@@ -15,7 +15,6 @@ CONFIG_RCU_FANOUT_EXACT=n
 CONFIG_RCU_NOCB_CPU=y
 CONFIG_RCU_NOCB_CPU_ZERO=y
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=n
index 1a777b5..ea119ba 100644 (file)
@@ -18,7 +18,6 @@ CONFIG_RCU_FANOUT_EXACT=n
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=y
 CONFIG_RCU_BOOST=n
index 61c8d9c..19cf948 100644 (file)
@@ -18,7 +18,6 @@ CONFIG_RCU_FANOUT_EXACT=n
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=y
 CONFIG_RCU_BOOST=n
index c1f111c..f4567fb 100644 (file)
@@ -14,7 +14,6 @@ CONFIG_RCU_FANOUT_LEAF=4
 CONFIG_RCU_FANOUT_EXACT=n
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=y
index 7dbd27c..0a262fb 100644 (file)
@@ -18,7 +18,6 @@ CONFIG_RCU_FANOUT_LEAF=2
 CONFIG_RCU_FANOUT_EXACT=n
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=y
 CONFIG_RCU_CPU_STALL_VERBOSE=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
index d0f32e5..3a06b97 100644 (file)
@@ -18,7 +18,6 @@ CONFIG_RCU_NOCB_CPU_NONE=y
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_PROVE_RCU=y
-CONFIG_PROVE_RCU_DELAY=y
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
index 2e477df..8f084cc 100644 (file)
@@ -19,7 +19,6 @@ CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=y
 CONFIG_PROVE_RCU=y
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
index 042f86e..ab62255 100644 (file)
@@ -17,7 +17,6 @@ CONFIG_RCU_FANOUT_LEAF=2
 CONFIG_RCU_FANOUT_EXACT=n
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=y
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
index 3438cee..69a2e25 100644 (file)
@@ -18,7 +18,6 @@ CONFIG_RCU_FANOUT_LEAF=2
 CONFIG_RCU_NOCB_CPU=y
 CONFIG_RCU_NOCB_CPU_ALL=y
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=n
index bf4523d..a0f32fb 100644 (file)
@@ -18,7 +18,6 @@ CONFIG_RCU_FANOUT_LEAF=2
 CONFIG_RCU_NOCB_CPU=y
 CONFIG_RCU_NOCB_CPU_ALL=y
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=n
index 81e4f7c..b7a62a5 100644 (file)
@@ -13,7 +13,6 @@ CONFIG_SUSPEND=n
 CONFIG_HIBERNATION=n
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_PROVE_RCU_DELAY=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_CPU_STALL_VERBOSE=n
 CONFIG_RCU_BOOST=n
index ef624ce..a55c008 100644 (file)
@@ -13,7 +13,6 @@ CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
 #CHECK#CONFIG_TREE_PREEMPT_RCU=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_PROVE_RCU_DELAY=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_RT_MUTEXES=y
index ef624ce..a55c008 100644 (file)
@@ -13,7 +13,6 @@ CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
 #CHECK#CONFIG_TREE_PREEMPT_RCU=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_PROVE_RCU_DELAY=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_RT_MUTEXES=y
index ef624ce..a55c008 100644 (file)
@@ -13,7 +13,6 @@ CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
 #CHECK#CONFIG_TREE_PREEMPT_RCU=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_PROVE_RCU_DELAY=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_RT_MUTEXES=y
index ef624ce..a55c008 100644 (file)
@@ -13,7 +13,6 @@ CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
 #CHECK#CONFIG_TREE_PREEMPT_RCU=y
 CONFIG_DEBUG_KERNEL=y
-CONFIG_PROVE_RCU_DELAY=y
 CONFIG_DEBUG_OBJECTS=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
 CONFIG_RT_MUTEXES=y
index adbb76c..3e588db 100644 (file)
@@ -14,7 +14,6 @@ CONFIG_NO_HZ_FULL_SYSIDLE -- Do one.
 CONFIG_PREEMPT -- Do half.  (First three and #8.)
 CONFIG_PROVE_LOCKING -- Do all but two, covering CONFIG_PROVE_RCU and not.
 CONFIG_PROVE_RCU -- Do all but one under CONFIG_PROVE_LOCKING.
-CONFIG_PROVE_RCU_DELAY -- Do one.
 CONFIG_RCU_BOOST -- one of TREE_PREEMPT_RCU.
 CONFIG_RCU_BOOST_PRIO -- set to 2 for _BOOST testing.
 CONFIG_RCU_CPU_STALL_INFO -- do one with and without _VERBOSE.
index 4473211..e775adc 100644 (file)
@@ -21,7 +21,7 @@ OBJS = tmon.o tui.o sysfs.o pid.o
 OBJS +=
 
 tmon: $(OBJS) Makefile tmon.h
-       $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS)  -o $(TARGET) -lm -lpanel -lncursesw  -lpthread
+       $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS)  -o $(TARGET) -lm -lpanel -lncursesw -ltinfo -lpthread
 
 valgrind: tmon
         sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET)  1> /dev/null
index b30f531..09b7c32 100644 (file)
@@ -142,6 +142,7 @@ static void start_syslog(void)
 static void prepare_logging(void)
 {
        int i;
+       struct stat logstat;
 
        if (!logging)
                return;
@@ -152,6 +153,29 @@ static void prepare_logging(void)
                return;
        }
 
+       if (lstat(TMON_LOG_FILE, &logstat) < 0) {
+               syslog(LOG_ERR, "Unable to stat log file %s\n", TMON_LOG_FILE);
+               fclose(tmon_log);
+               tmon_log = NULL;
+               return;
+       }
+
+       /* The log file must be a regular file owned by us */
+       if (S_ISLNK(logstat.st_mode)) {
+               syslog(LOG_ERR, "Log file is a symlink.  Will not log\n");
+               fclose(tmon_log);
+               tmon_log = NULL;
+               return;
+       }
+
+       if (logstat.st_uid != getuid()) {
+               syslog(LOG_ERR, "We don't own the log file.  Not logging\n");
+               fclose(tmon_log);
+               tmon_log = NULL;
+               return;
+       }
+
+
        fprintf(tmon_log, "#----------- THERMAL SYSTEM CONFIG -------------\n");
        for (i = 0; i < ptdata.nr_tz_sensor; i++) {
                char binding_str[33]; /* size of long + 1 */
@@ -331,7 +355,7 @@ static void start_daemon_mode()
        disable_tui();
 
        /* change the file mode mask */
-       umask(0);
+       umask(S_IWGRP | S_IWOTH);
 
        /* new SID for the daemon process */
        sid = setsid();
index fe1e66b..a87e99f 100644 (file)
@@ -116,8 +116,8 @@ static const struct {
        .header = {
                .magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC),
                .length = cpu_to_le32(sizeof descriptors),
-               .fs_count = 3,
-               .hs_count = 3,
+               .fs_count = cpu_to_le32(3),
+               .hs_count = cpu_to_le32(3),
        },
        .fs_descs = {
                .intf = {
index 56ff9be..476d3bf 100644 (file)
@@ -1526,17 +1526,33 @@ int kvm_vgic_hyp_init(void)
                goto out_unmap;
        }
 
-       kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
-                vctrl_res.start, vgic_maint_irq);
-       on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
-
        if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
                kvm_err("Cannot obtain VCPU resource\n");
                ret = -ENXIO;
                goto out_unmap;
        }
+
+       if (!PAGE_ALIGNED(vcpu_res.start)) {
+               kvm_err("GICV physical address 0x%llx not page aligned\n",
+                       (unsigned long long)vcpu_res.start);
+               ret = -ENXIO;
+               goto out_unmap;
+       }
+
+       if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
+               kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+                       (unsigned long long)resource_size(&vcpu_res),
+                       PAGE_SIZE);
+               ret = -ENXIO;
+               goto out_unmap;
+       }
+
        vgic_vcpu_base = vcpu_res.start;
 
+       kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
+                vctrl_res.start, vgic_maint_irq);
+       on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
+
        goto out;
 
 out_unmap:
index 2458a1d..e8ce34c 100644 (file)
@@ -254,10 +254,9 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap,
        spin_lock(&ioapic->lock);
        for (index = 0; index < IOAPIC_NUM_PINS; index++) {
                e = &ioapic->redirtbl[index];
-               if (!e->fields.mask &&
-                       (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
-                        kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC,
-                                index) || index == RTC_GSI)) {
+               if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
+                   kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
+                   index == RTC_GSI) {
                        if (kvm_apic_match_dest(vcpu, NULL, 0,
                                e->fields.dest_id, e->fields.dest_mode)) {
                                __set_bit(e->fields.vector,
index ced4a54..a228ee8 100644 (file)
@@ -323,13 +323,13 @@ out:
 
 #define IOAPIC_ROUTING_ENTRY(irq) \
        { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,  \
-         .u.irqchip.irqchip = KVM_IRQCHIP_IOAPIC, .u.irqchip.pin = (irq) }
+         .u.irqchip = { .irqchip = KVM_IRQCHIP_IOAPIC, .pin = (irq) } }
 #define ROUTING_ENTRY1(irq) IOAPIC_ROUTING_ENTRY(irq)
 
 #ifdef CONFIG_X86
 #  define PIC_ROUTING_ENTRY(irq) \
        { .gsi = irq, .type = KVM_IRQ_ROUTING_IRQCHIP,  \
-         .u.irqchip.irqchip = SELECT_PIC(irq), .u.irqchip.pin = (irq) % 8 }
+         .u.irqchip = { .irqchip = SELECT_PIC(irq), .pin = (irq) % 8 } }
 #  define ROUTING_ENTRY2(irq) \
        IOAPIC_ROUTING_ENTRY(irq), PIC_ROUTING_ENTRY(irq)
 #else